From 1a576e9ab0930c77c531d674f8df09ddc1640c1c Mon Sep 17 00:00:00 2001 From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Date: Mon, 20 Dec 2021 09:10:22 -0800 Subject: [PATCH 001/152] update to 0.2.0 --- react/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/react/package.json b/react/package.json index 52d3482f..9ec00bf3 100644 --- a/react/package.json +++ b/react/package.json @@ -1,6 +1,6 @@ { "name": "app", - "version": "0.1.0", + "version": "0.2.0", "private": true, "dependencies": { "@emotion/react": "^11.6.0", From 5ab888a0680fb7361edbebe38e62ce874107f5b6 Mon Sep 17 00:00:00 2001 From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Date: Mon, 20 Dec 2021 09:55:19 -0800 Subject: [PATCH 002/152] Superset changes (#74) --- .pipeline/lib/config.js | 2 +- .../metabase-postgresql/metabase-bc.yaml | 4 +- openshift/templates/superset/Dockerfile | 7 - .../redis-dc.yaml} | 2 +- openshift/templates/superset/superset-bc.yaml | 49 ------- .../templates/superset/superset/Dockerfile | 14 +- .../superset/scripts/docker-bootstrap.sh | 2 +- ...rset-bc-superset.yaml => superset-bc.yaml} | 12 +- .../superset/superset/superset-dc-beat.yaml | 135 ++++++++++++++++++ ...rset-dc.yaml => superset-dc-superset.yaml} | 115 ++++++++------- .../superset/superset/superset-dc-worker.yaml | 132 +++++++++++++++++ 11 files changed, 348 insertions(+), 126 deletions(-) delete mode 100644 openshift/templates/superset/Dockerfile rename openshift/templates/superset/{superset-dc-redis.yaml => redis/redis-dc.yaml} (99%) delete mode 100644 openshift/templates/superset/superset-bc.yaml rename openshift/templates/superset/superset/{superset-bc-superset.yaml => superset-bc.yaml} (85%) create mode 100644 openshift/templates/superset/superset/superset-dc-beat.yaml rename openshift/templates/superset/superset/{superset-dc.yaml => superset-dc-superset.yaml} (74%) create mode 100644 openshift/templates/superset/superset/superset-dc-worker.yaml diff --git a/.pipeline/lib/config.js b/.pipeline/lib/config.js index 4fadbb89..293bd4bd 100644 --- a/.pipeline/lib/config.js +++ b/.pipeline/lib/config.js @@ -51,7 +51,7 @@ const phases = { instance: `${name}-test`, version:`${version}`, tag:`test-${version}`, host: `cthub-test.${ocpName}.gov.bc.ca`, djangoDebug: 'False', logoutHostName: 'logontest.gov.bc.ca', metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500M', metabaseMemoryLimit: '2G', metabaseReplicas: 1, - frontendCpuRequest: '100m', frontendCpuLimit: '200m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, frontendMinReplicas: 1, frontendMaxReplicas: 3, + frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, frontendMinReplicas: 1, frontendMaxReplicas: 3, backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendReplicas: 1, backendMinReplicas: 1, backendMaxReplicas: 3, backendHost: `cthub-backend-test.${ocpName}.gov.bc.ca`, minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3G', schemaspyCpuRequest: '20m', schemaspyCpuLimit: '200m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160, diff --git a/openshift/templates/metabase-postgresql/metabase-bc.yaml b/openshift/templates/metabase-postgresql/metabase-bc.yaml index 3e345cd0..7dfdc696 100644 --- a/openshift/templates/metabase-postgresql/metabase-bc.yaml +++ b/openshift/templates/metabase-postgresql/metabase-bc.yaml @@ -25,7 +25,7 @@ objects: output: to: kind: ImageStreamTag - name: metabase:v0.41.1 + name: metabase:v0.41.5 namespace: 30b186-tools postCommit: {} resources: {} @@ -33,7 +33,7 @@ objects: source: contextDir: openshift/templates/metabase git: - ref: metabase-docker-0.1.0 + ref: release-0.2.0 uri: https://github.com/bcgov/cthub.git type: Git strategy: diff --git a/openshift/templates/superset/Dockerfile b/openshift/templates/superset/Dockerfile deleted file mode 100644 index 2baf0606..00000000 --- a/openshift/templates/superset/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM artifacts.developer.gov.bc.ca/docker-remote/apache/superset -USER root -RUN pip install psycopg2-binary sqlalchemy-redshift \ - && chgrp -R root /app/superset_home \ - && chmod -R g+w /app/superset_home -COPY ./scripts /app/docker -USER superset \ No newline at end of file diff --git a/openshift/templates/superset/superset-dc-redis.yaml b/openshift/templates/superset/redis/redis-dc.yaml similarity index 99% rename from openshift/templates/superset/superset-dc-redis.yaml rename to openshift/templates/superset/redis/redis-dc.yaml index aa6f7e8e..3a4ebc3d 100644 --- a/openshift/templates/superset/superset-dc-redis.yaml +++ b/openshift/templates/superset/redis/redis-dc.yaml @@ -123,4 +123,4 @@ objects: observedGeneration: 0 replicas: 0 unavailableReplicas: 0 - updatedReplicas: 0 + updatedReplicas: 0 \ No newline at end of file diff --git a/openshift/templates/superset/superset-bc.yaml b/openshift/templates/superset/superset-bc.yaml deleted file mode 100644 index 60bf3916..00000000 --- a/openshift/templates/superset/superset-bc.yaml +++ /dev/null @@ -1,49 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: superset-bc -objects: -- apiVersion: image.openshift.io/v1 - kind: ImageStream - metadata: - annotations: - description: Keeps track of changes in the metabase image - creationTimestamp: null - name: superset - spec: - lookupPolicy: - local: false -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - creationTimestamp: null - name: superset - spec: - failedBuildsHistoryLimit: 5 - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: superset:v1.0 - namespace: 30b186-tools - postCommit: {} - resources: {} - runPolicy: Serial - source: - contextDir: openshift/templates/superset - git: - ref: superset-0.1.0 - uri: https://github.com/bcgov/cthub.git - type: Git - strategy: - dockerStrategy: - forcePull: true - noCache: true - pullSecret: - name: artifacts-pull-default-idxprm - type: Docker - successfulBuildsHistoryLimit: 5 - triggers: [] - status: - lastVersion: 0 diff --git a/openshift/templates/superset/superset/Dockerfile b/openshift/templates/superset/superset/Dockerfile index b4b05d3a..ed2a08ee 100644 --- a/openshift/templates/superset/superset/Dockerfile +++ b/openshift/templates/superset/superset/Dockerfile @@ -1,4 +1,10 @@ -FROM artifacts.developer.gov.bc.ca/docker-remote/apache/superset:latest-dev -ARG SUPERSET_COMMAND -USER 1001 -ENTRYPOINT ["/app/docker/docker-bootstrap.sh", "$SUPERSET_COMMAND"] \ No newline at end of file +FROM artifacts.developer.gov.bc.ca/docker-remote/apache/superset:latest +USER root +# RUN pip install --upgrade pip \ +# pip install psycopg2-binary sqlalchemy-redshift +RUN chgrp -R root /app/superset_home \ + && chmod -R g+w /app/superset_home +RUN mkdir /app/docker +COPY ./scripts /app/docker +ENTRYPOINT ["/app/docker/docker-bootstrap.sh"] +CMD ["app-gunicorn"] \ No newline at end of file diff --git a/openshift/templates/superset/superset/scripts/docker-bootstrap.sh b/openshift/templates/superset/superset/scripts/docker-bootstrap.sh index 67e5294b..6f6ed6c9 100755 --- a/openshift/templates/superset/superset/scripts/docker-bootstrap.sh +++ b/openshift/templates/superset/superset/scripts/docker-bootstrap.sh @@ -38,7 +38,7 @@ fi if [[ "${1}" == "worker" ]]; then echo "Starting Celery worker..." - celery --app=superset.tasks.celery_app:app worker -Ofair -l INFO + celery --app=superset.tasks.celery_app:app worker -O fair -l INFO elif [[ "${1}" == "beat" ]]; then echo "Starting Celery beat..." celery --app=superset.tasks.celery_app:app beat --pidfile /tmp/celerybeat.pid -l INFO -s "${SUPERSET_HOME}"/celerybeat-schedule diff --git a/openshift/templates/superset/superset/superset-bc-superset.yaml b/openshift/templates/superset/superset/superset-bc.yaml similarity index 85% rename from openshift/templates/superset/superset/superset-bc-superset.yaml rename to openshift/templates/superset/superset/superset-bc.yaml index 42af6810..e1bf5e48 100644 --- a/openshift/templates/superset/superset/superset-bc-superset.yaml +++ b/openshift/templates/superset/superset/superset-bc.yaml @@ -2,7 +2,7 @@ apiVersion: template.openshift.io/v1 kind: Template metadata: creationTimestamp: null - name: superset-superset-bc + name: superset-bc objects: - apiVersion: image.openshift.io/v1 kind: ImageStream @@ -28,12 +28,18 @@ objects: name: superset:20211213 namespace: 30b186-tools postCommit: {} - resources: {} + resources: + limits: + cpu: '1' + memory: 500Mi + requests: + cpu: 500m + memory: 250Mi runPolicy: Serial source: contextDir: openshift/templates/superset/superset git: - ref: superset-0.1.0 + ref: superset2-0.1.0 uri: https://github.com/bcgov/cthub.git type: Git strategy: diff --git a/openshift/templates/superset/superset/superset-dc-beat.yaml b/openshift/templates/superset/superset/superset-dc-beat.yaml new file mode 100644 index 00000000..473f9ab1 --- /dev/null +++ b/openshift/templates/superset/superset/superset-dc-beat.yaml @@ -0,0 +1,135 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: superset-dc-beat +parameters: + - name: ENV_NAME + required: true + - name: CPU_REQUEST + value: 40m + displayName: Requested CPU + description: Requested CPU + required: true + - name: CPU_LIMIT + value: 80m + displayName: CPU upper limit + description: CPU upper limit + required: true + - name: MEMORY_REQUEST + value: 200Mi + displayName: Requested memory + description: Requested memory + required: true + - name: MEMORY_LIMIT + value: 400Mi + displayName: Memory upper limit + description: Memory upper limit + required: true + - name: REPLICAS + description: | + The number of replicas to use. + displayName: REPLICAS + value: "1" +objects: +- apiVersion: apps.openshift.io/v1 + kind: DeploymentConfig + metadata: + creationTimestamp: null + labels: + app: superset-beat + name: superset-beat + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 600 + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ConfigChange + replicas: 1 + revisionHistoryLimit: 10 + test: false + selector: + app: superset-beat + template: + metadata: + creationTimestamp: null + labels: + app: superset-beat + spec: + volumes: + - name: superset-home + persistentVolumeClaim: + claimName: superset-home + containers: + - name: superset-beat + image: >- + image-registry.openshift-image-registry.svc:5000/30b186-tools/superset:20211213 + args: ["beat"] + resources: + limits: + cpu: ${CPU_LIMIT} + memory: ${MEMORY_LIMIT} + requests: + cpu: ${CPU_REQUEST} + memory: ${MEMORY_REQUEST} + env: + - name: COMPOSE_PROJECT_NAME + value: superset + - name: DATABASE_DB + valueFrom: + secretKeyRef: + key: superset-db-name + name: patroni-creds-${ENV_NAME} + - name: DATABASE_HOST + value: patroni-master-test + - name: DATABASE_USER + valueFrom: + secretKeyRef: + key: superset-db-username + name: patroni-creds-${ENV_NAME} + - name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + key: superset-db-password + name: patroni-creds-${ENV_NAME} + - name: DATABASE_PORT + value: '5432' + - name: DATABASE_DIALECT + value: postgresql + - name: PYTHONPATH + value: '/app/pythonpath:/app/docker/pythonpath_dev' + - name: REDIS_HOST + value: redis + - name: REDIS_PORT + value: '6379' + - name: FLASK_ENV + value: production + - name: SUPERSET_ENV + value: production + - name: SUPERSET_LOAD_EXAMPLES + value: 'no' + - name: CYPRESS_CONFIG + value: 'false' + - name: SUPERSET_PORT + value: '8088' + volumeMounts: + - name: superset-home + mountPath: /app/superset_home + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + status: + availableReplicas: 0 + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + unavailableReplicas: 0 + updatedReplicas: 0 diff --git a/openshift/templates/superset/superset/superset-dc.yaml b/openshift/templates/superset/superset/superset-dc-superset.yaml similarity index 74% rename from openshift/templates/superset/superset/superset-dc.yaml rename to openshift/templates/superset/superset/superset-dc-superset.yaml index 3dd95918..3e825186 100644 --- a/openshift/templates/superset/superset/superset-dc.yaml +++ b/openshift/templates/superset/superset/superset-dc-superset.yaml @@ -8,22 +8,22 @@ parameters: value: test required: true - name: CPU_REQUEST - value: 50M + value: 200m displayName: Requested CPU description: Requested CPU required: true - name: CPU_LIMIT - value: 200M + value: 400m displayName: CPU upper limit description: CPU upper limit required: true - name: MEMORY_REQUEST - value: 50M + value: 700Mi displayName: Requested memory description: Requested memory required: true - name: MEMORY_LIMIT - value: 500M + value: 1400Mi displayName: Memory upper limit description: Memory upper limit required: true @@ -33,56 +33,49 @@ parameters: displayName: REPLICAS value: "1" objects: -# - apiVersion: v1 -# kind: ServiceAccount -# metadata: -# name: cthub-superset -# labels: -# app: cthub-superset -# service: cthub-superset -# - apiVersion: v1 -# kind: Service -# metadata: -# annotations: -# openshift.io/generated-by: OpenShiftWebConsole -# creationTimestamp: null -# labels: -# app: cthub-superset -# name: cthub-superset -# spec: -# ports: -# - name: superset-web -# port: 8080 -# protocol: TCP -# targetPort: 8080 -# selector: -# deploymentconfig: cthub-superset -# sessionAffinity: None -# type: ClusterIP -# status: -# loadBalancer: {} -# - apiVersion: route.openshift.io/v1 -# kind: Route -# metadata: -# annotations: -# openshift.io/host.generated: "true" -# creationTimestamp: null -# labels: -# app: cthub-superset -# name: cthub-superset -# spec: -# host: superset-${ENV_NAME}.apps.silver.devops.gov.bc.ca -# path: / -# port: -# targetPort: superset-web -# tls: -# insecureEdgeTerminationPolicy: Redirect -# termination: edge -# to: -# kind: Service -# name: cthub-superset -# weight: 100 -# wildcardPolicy: None +- apiVersion: v1 + kind: Service + metadata: + annotations: + openshift.io/generated-by: OpenShiftWebConsole + creationTimestamp: null + labels: + app: superset + name: superset + spec: + ports: + - name: superset-web + port: 8088 + protocol: TCP + targetPort: 8088 + selector: + app: superset + sessionAffinity: None + type: ClusterIP + status: + loadBalancer: {} +- apiVersion: route.openshift.io/v1 + kind: Route + metadata: + annotations: + openshift.io/host.generated: "true" + creationTimestamp: null + labels: + app: superset + name: superset + spec: + host: superset-${ENV_NAME}.apps.silver.devops.gov.bc.ca + path: / + port: + targetPort: superset-web + tls: + insecureEdgeTerminationPolicy: Redirect + termination: edge + to: + kind: Service + name: superset + weight: 100 + wildcardPolicy: None - kind: PersistentVolumeClaim apiVersion: v1 metadata: @@ -112,7 +105,7 @@ objects: activeDeadlineSeconds: 21600 triggers: - type: ConfigChange - replicas: 1 + replicas: ${{REPLICAS}} revisionHistoryLimit: 10 test: false selector: @@ -128,13 +121,19 @@ objects: persistentVolumeClaim: claimName: superset-home containers: - - name: superset + - name: superset-app-gunicorn image: >- image-registry.openshift-image-registry.svc:5000/30b186-tools/superset:20211213 ports: - containerPort: 8088 protocol: TCP - resources: {} + resources: + limits: + cpu: ${CPU_LIMIT} + memory: ${MEMORY_LIMIT} + requests: + cpu: ${CPU_REQUEST} + memory: ${MEMORY_REQUEST} env: - name: COMPOSE_PROJECT_NAME value: superset @@ -162,7 +161,7 @@ objects: - name: PYTHONPATH value: '/app/pythonpath:/app/docker/pythonpath_dev' - name: REDIS_HOST - value: localhost + value: redis - name: REDIS_PORT value: '6379' - name: FLASK_ENV diff --git a/openshift/templates/superset/superset/superset-dc-worker.yaml b/openshift/templates/superset/superset/superset-dc-worker.yaml new file mode 100644 index 00000000..54193111 --- /dev/null +++ b/openshift/templates/superset/superset/superset-dc-worker.yaml @@ -0,0 +1,132 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: superset-dc-worker +parameters: + - name: ENV_NAME + required: true + - name: CPU_REQUEST + value: 100m + displayName: Requested CPU + description: Requested CPU + required: true + - name: CPU_LIMIT + value: 200m + displayName: CPU upper limit + description: CPU upper limit + required: true + - name: MEMORY_REQUEST + value: 2200Mi + displayName: Requested memory + description: Requested memory + required: true + - name: MEMORY_LIMIT + value: 4400Mi + displayName: Memory upper limit + description: Memory upper limit + required: true + - name: REPLICAS + description: | + The number of replicas to use. + displayName: REPLICAS + value: "1" +objects: +- apiVersion: apps.openshift.io/v1 + kind: DeploymentConfig + metadata: + creationTimestamp: null + labels: + app: superset-worker + name: superset-worker + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 600 + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ConfigChange + replicas: 1 + revisionHistoryLimit: 10 + test: false + selector: + app: superset-worker + template: + metadata: + creationTimestamp: null + labels: + app: superset-worker + spec: + volumes: + - name: superset-home + persistentVolumeClaim: + claimName: superset-home + containers: + - name: superset-worker + image: >- + image-registry.openshift-image-registry.svc:5000/30b186-tools/superset:20211213 + args: ["worker"] + resources: + limits: + cpu: ${CPU_LIMIT} + memory: ${MEMORY_LIMIT} + requests: + cpu: ${CPU_REQUEST} + memory: ${MEMORY_REQUEST} + env: + - name: COMPOSE_PROJECT_NAME + value: superset + - name: DATABASE_DB + valueFrom: + secretKeyRef: + key: superset-db-name + name: patroni-creds-${ENV_NAME} + - name: DATABASE_HOST + value: patroni-master-test + - name: DATABASE_USER + valueFrom: + secretKeyRef: + key: superset-db-username + name: patroni-creds-${ENV_NAME} + - name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + key: superset-db-password + name: patroni-creds-${ENV_NAME} + - name: DATABASE_PORT + value: '5432' + - name: DATABASE_DIALECT + value: postgresql + - name: PYTHONPATH + value: '/app/pythonpath:/app/docker/pythonpath_dev' + - name: REDIS_HOST + value: redis + - name: REDIS_PORT + value: '6379' + - name: FLASK_ENV + value: production + - name: SUPERSET_ENV + value: production + - name: SUPERSET_LOAD_EXAMPLES + value: 'no' + - name: CYPRESS_CONFIG + value: 'false' + - name: SUPERSET_PORT + value: '8088' + volumeMounts: + - name: superset-home + mountPath: /app/superset_home + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + status: + availableReplicas: 0 + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + unavailableReplicas: 0 + updatedReplicas: 0 From fd87a5cb461500a76bbe1f3b52628562765626f7 Mon Sep 17 00:00:00 2001 From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Date: Mon, 20 Dec 2021 14:13:44 -0800 Subject: [PATCH 003/152] add sysdig (#76) --- openshift/templates/sysdig/sysdig-team.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 openshift/templates/sysdig/sysdig-team.yaml diff --git a/openshift/templates/sysdig/sysdig-team.yaml b/openshift/templates/sysdig/sysdig-team.yaml new file mode 100644 index 00000000..a598d439 --- /dev/null +++ b/openshift/templates/sysdig/sysdig-team.yaml @@ -0,0 +1,19 @@ +apiVersion: ops.gov.bc.ca/v1alpha1 +kind: SysdigTeam +metadata: + name: 30b186-sysdigteam + namespace: 30b186-tools +spec: + team: + description: The Sysdig Team for 30b186-tools + users: + - name: emailAddress + role: ROLE_TEAM_MANAGER + - name: emailAddress + role: ROLE_TEAM_EDIT + - name: emailAddress + role: ROLE_TEAM_STANDARD + - name: emailAddress + role: ROLE_TEAM_READ + - name: emailAddress + role: ROLE_TEAM_READ From 4e33026d574e113e1ad940c6f25785dd5d0ffe21 Mon Sep 17 00:00:00 2001 From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Date: Mon, 20 Dec 2021 15:44:42 -0800 Subject: [PATCH 004/152] resolve log4j issue --- openshift/templates/metabase/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openshift/templates/metabase/Dockerfile b/openshift/templates/metabase/Dockerfile index bef189c5..1e016607 100644 --- a/openshift/templates/metabase/Dockerfile +++ b/openshift/templates/metabase/Dockerfile @@ -1,2 +1,2 @@ -FROM metabase/metabase:v0.41.1 -EXPOSE 3000 \ No newline at end of file +FROM metabase/metabase:v0.41.5 +EXPOSE 3000 From 9c8888a0aa27bf139c1f7a890661d1e136f3265f Mon Sep 17 00:00:00 2001 From: Jamie Popkin Date: Tue, 21 Dec 2021 10:48:13 -0800 Subject: [PATCH 005/152] Hydrogen Fleet Model, Migration & Loader (#75) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * pipeline * pipeline * Initial commit * update pipeline * add s2i for frontend * add backend s2i * update s2i * update backend build * add minio * Changed migration and data to reflect zeva * update minio * update minio * update minio dc * update templates * update dev * Committing some missing files * Re-enabled metabase in docker-compose. Fixed the name for ICBC Table * Added more use of environment variables (#10) * Removed hardcoded variables for keycloak * Removed database url * Test Login (#12) * Removed hardcoded variables for keycloak * Removed database url * Temporary test for logging in * Add gunicorn and update backend template (#11) * Temp change to cthub-9 (#14) * Reorganizing so it loads our application (#16) * CTHUB-17: Set-up Login Page (#15) * CTHUB-17: Set-up Login Page * change hint to 'idir' * remove extra styles Co-authored-by: Richard * ctub 37 - set up jest (#13) * -adds jest to package json under dev dependencies and scripts * -removes coverage files * -adds coverage to git ignore * Major Refactor for keycloak (#17) * update backend dc (#18) * Development Settings for Webpack (#19) * remove .s2i/bin (#21) * Fixed hot keyword in webpack (#22) * update ALLOWED_HOSTS (#23) * Changed how we refresh tokens (#25) * add gunicorn (#24) * update websocket port * update websocket port * Update all package with the latest versions and updated some deprecated functions. (#20) * Update all package with the latest versions and updated some depricated functions. * Added node version to fix docker issue * Auto-logout when refreshing the token doesn't work (#26) * Removed hardcoded variables for keycloak * Removed database url * Temporary test for logging in * Changed to cthub-9 * Reorganizing the code * Removed ReactKeycloakProvider * keycloak.json checks on host now * Added a check to toggle development settings for webpack * Moved hot into devServer * Removed home.js * Some cleanup * Changed how refreshing the token works * Minor comments * Fixed the automatic logout on failing to refresh the token * Fixed merge conflicts * Fixed merge conflicts * use artifactory (#27) * roll back (#29) * Metabase build (#28) * update frontend build (#30) * Allow environment variable for keycloak.json (#32) * CHUB-56 - model for ldv rebates (#31) * adds model for ldv rebates * -removes db comments * -adds migration for ldv rebates * -adds company (dealer) city to ldv rebates table * Cthub 59/60: Vin decoder function and saving decoded data to the DB table. (#33) * VIN Decoder Function * Created command file to run decoder function * CTHUB-60: VIN Decoder Function - Save Data * Fix migration conflict * Cthub 57 - ldv rebates service (#35) * -adds service for parsing ldv rebates * -creates service for parsing ldv rebates -migration to update fields so they fit everything from ldv rebates -adds pandas to requirements * -removes conflicting migration * -adds new migration for ldv rebates * Cthub 57 - linting (#36) * -adds service for parsing ldv rebates * -creates service for parsing ldv rebates -migration to update fields so they fit everything from ldv rebates -adds pandas to requirements * -removes conflicting migration * -adds new migration for ldv rebates * lints, updates comments in import_ldv_rebates file * Allowed multiple pages for parsing (#37) * Redirect fix (#38) * Fixed how redirect works * Removed test code * Removed unused code * Made router file so its a bit more organized * Added tracking of VIN for vin decoding information (#39) * Model and Import function for SUVI Data (#40) * Model and Import function for SUVI Data * Check if fleet or indivisual columns contain string then put fleet or individual into applicant type column, else leave it blank * Metabase 0.1.0 (#34) * Patroni 0.1.0 (#42) * update (#43) * update (#44) * Fix tags 0.1.0 (#45) * update config * create metabase service for backupcontainer * update metabase service name * CTHUB-81: Create models and migration - LCFS Credit Transfers (RLCF-017) (#41) * CTHUB-81: Create models and migration - LCFS Credit Transfers (RLCF-017) * Removed managers and other properties that are not needed for importing * Removed unnecessary declaration AUTH_USER_MODEL in settings Co-authored-by: Richard Tan * cleanup (#46) * update clean up script * Cthub 92 - frontend for upload (#47) * -adds upload container * -adds file drop area files, -adds routes for upload, -adds dropzone to package.json -adds css files -adds file size utility * -lints -aligns everything properly including button and table -adds proptypes -fixes capitization -removes extra imports -removes unused props * -removes unused link syle * -removes unused svg style * CTHUB-107: Added Upload to Minio viewset (#48) * Added minio to docker * File cleanup. Installed minio for python * Added upload functionality * cthub 107 - backend upload (#49) * -adds material ui icons, updates to use icons, adds css * -adds backend viewset for uploading spreadsheet (grabs minio url and parses and inserts to database) -some linting * -passes dataset selected from frontend * Code cleanup (#50) * Added minio to docker * File cleanup. Installed minio for python * Added upload functionality * Code cleanup * Fixed merge conflicts * Further cleanup * Tekton 0.1.0 (#51) * CTHUB - Add dataset Table, Dropdown (#52) * -adds material ui icons, updates to use icons, adds css * -adds backend viewset for uploading spreadsheet (grabs minio url and parses and inserts to database) -some linting * -passes dataset selected from frontend * -adds model for datasets table to contain names of datasets that users will be able to import data into -adds fixtures for datasets table -adds serializer and viewset to retrieve a list of datasets -displays list of datasets in frontend on a dropdown * -fixes tabbing for "if done" * -adds suvi dataset * Setup prod (#54) * Cthub 113 - import suvi data (#53) * -imports function to parse suvi data * -adds alias for import ldv -initializes done variable * update hostname * Metabase resource 0.1.0 (#55) * update metabase * Update doc (#56) * add condition to build * Cthub 114 - replace or add to data (#58) * -adds ability to replace existing ldv file * -adds modal if records are being replaced * -makes replacedata proptypes string or bool because radio button is inconsistant * -renames alert file so it is less vague -adds title to alertdialog props * Charger Model and Migration (#59) * Charger model * The include * Migration running Documented process as the README was looking sad and lonely.😢 * Fixed duplicate column * Set condition from artifactory (#57) * Cthub 96/78 - import public charging data (#61) * -adds model for public charging * -adds migration for public charging * -adds public charging service -adds public charging fixture -adds public charging to upload viewset so service gets used if it is selected * -changes id for new fixture * Tekton4 0.1.0 (#63) * update assemble * CTHUB-31: Trial VIN Decoder from DataOne (#62) * Different vendor for the decode api * Created a copy of the old vin decoder * Added superset as part of docker compose (#64) * CTHUB-106 login redirect (#65) * -adds dashboard container and router -adds dashboard container to app.js that redirects to /upload -updates icbc router so it renders when user goes to /icbc * -removes extra empty line * moves dashboard switch to last place * -switches redirect to usehistory * Charger model * The include * Fixed duplicate column * No date fields * Removing migration that I'm re-writting * Fixed migration * Fixed migration again * Import is running I think * Working for real now * Documentation * Alternative method of accessing the spreadsheet. * Rolling back charger rebate migration * Charger Rebate table, model & migration (#67) * Charger model * The include * Fixed duplicate column * No date fields * Removing migration that I'm re-writting * Fixed migration * Fixed migration again * Import is running I think * Working for real now * Documentation * Alternative method of accessing the spreadsheet. * Rolling back charger rebate migration * use minio secret * CTHUB-95 - add option for uploading ev charging rebates (#66) * -adds charger rebates to dropdown -needs function still in the viewset (currently using import_ldv * -adds import charger rebates service to upload viewset * Fixed columns * Superset 0.1.0 (#68) * Fixed garbled columns (#69) * Charger model * The include * Fixed duplicate column * No date fields * Removing migration that I'm re-writting * Fixed migration * Fixed migration again * Import is running I think * Working for real now * Documentation * Alternative method of accessing the spreadsheet. * Rolling back charger rebate migration * Fixed columns * -adds migration for charger rebates model changes (#70) * Hydrogen model * Hydrogen loader function * Hydrogen loading script * Cthub 65/123 - hydrogen fueling model (#71) * -adds decoder to git ignore * Removing ignored files * -adds hydrogen fueling model * -adds migration for hydrogen fueling * -updates hydrogen fueling model to add boolean fields for bar 700 and 350 * -adds function to parse spreadsheet and insert to hydrogen fueling table -adds ability to call hydrogen fuelign and public charging parsers from command line -adds viewset for calling hydrogen fueling -fixture for adding hydrogen fueling to dropdown * Hydrogen migration and loading script Co-authored-by: Kuan Fan Co-authored-by: Richard Tan Co-authored-by: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Co-authored-by: NavpreetGrewal <65915811+NavpreetGrewal@users.noreply.github.com> Co-authored-by: Emily <44536222+emi-hi@users.noreply.github.com> --- .../api/management/import_hydrogen_fleets.py | 40 ++++ django/api/migrations/0013_hydrogenfleets.py | 41 ++++ django/api/models/__init__.py | 4 +- django/api/models/hydrogen_fleets.py | 109 ++++++++++ django/api/services/hydrogen_fleets.py | 60 ++++++ openshift/templates/metabase/Dockerfile | 2 +- openshift/templates/superset/Dockerfile | 7 + openshift/templates/superset/superset-bc.yaml | 49 +++++ .../templates/superset/superset-dc-redis.yaml | 126 +++++++++++ .../superset/superset-bc-superset.yaml | 49 +++++ .../superset/superset/superset-dc.yaml | 195 ++++++++++++++++++ 11 files changed, 680 insertions(+), 2 deletions(-) create mode 100644 django/api/management/import_hydrogen_fleets.py create mode 100644 django/api/migrations/0013_hydrogenfleets.py create mode 100644 django/api/models/hydrogen_fleets.py create mode 100644 django/api/services/hydrogen_fleets.py create mode 100644 openshift/templates/superset/Dockerfile create mode 100644 openshift/templates/superset/superset-bc.yaml create mode 100644 openshift/templates/superset/superset-dc-redis.yaml create mode 100644 openshift/templates/superset/superset/superset-bc-superset.yaml create mode 100644 openshift/templates/superset/superset/superset-dc.yaml diff --git a/django/api/management/import_hydrogen_fleets.py b/django/api/management/import_hydrogen_fleets.py new file mode 100644 index 00000000..24b4c237 --- /dev/null +++ b/django/api/management/import_hydrogen_fleets.py @@ -0,0 +1,40 @@ +import json +from os import path +from django.core.management import BaseCommand + +from api.services.hydrogen_fleets import import_from_xls + + +class Command(BaseCommand): + """ + This command takes in an excel file and will parse and create records + """ + help = 'Loads file into the ldv rebates table' + + def add_arguments(self, parser): + """ + Currently only takes in an excel file as a required argument + """ + parser.add_argument( + 'xls_file', help='Filename of the xls being imported' + ) + + def handle(self, *args, **options): + """ + Function to parse the file and pass it to the import + service + """ + xls_file = options.get('xls_file') + + if not path.exists(xls_file): + self.stdout.write(self.style.ERROR( + 'Cannot find {file}. ' + 'Please make sure the filename is correct.'.format( + file=xls_file + ) + )) + return False + import_from_xls(xls_file) + self.stdout.write(self.style.SUCCESS( + 'Import complete' + )) diff --git a/django/api/migrations/0013_hydrogenfleets.py b/django/api/migrations/0013_hydrogenfleets.py new file mode 100644 index 00000000..b6fc7f9c --- /dev/null +++ b/django/api/migrations/0013_hydrogenfleets.py @@ -0,0 +1,41 @@ +# Generated by Django 3.1.6 on 2021-12-17 23:56 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0012_hydrogrenfueling'), + ] + + operations = [ + migrations.CreateModel( + name='HydrogenFleets', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), + ('create_user', models.CharField(default='SYSTEM', max_length=130)), + ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), + ('update_user', models.CharField(max_length=130, null=True)), + ('application_number', models.IntegerField(blank=True, null=True)), + ('fleet_number', models.IntegerField(blank=True, null=True)), + ('application_date', models.CharField(blank=True, max_length=100, null=True)), + ('organization_name', models.CharField(blank=True, max_length=250, null=True)), + ('fleet_name', models.CharField(blank=True, max_length=250, null=True)), + ('steet_address', models.CharField(blank=True, max_length=250, null=True)), + ('city', models.CharField(blank=True, max_length=100, null=True)), + ('postal_code', models.CharField(blank=True, max_length=10, null=True)), + ('vin', models.CharField(blank=True, max_length=20, null=True)), + ('make', models.CharField(blank=True, max_length=100, null=True)), + ('model', models.CharField(blank=True, max_length=100, null=True)), + ('year', models.CharField(blank=True, max_length=100, null=True)), + ('purchase_date', models.CharField(blank=True, max_length=100, null=True)), + ('dealer_name', models.CharField(blank=True, max_length=250, null=True)), + ('rebate_amount', models.CharField(blank=True, max_length=250, null=True)), + ], + options={ + 'db_table': 'hydrogen_fleets', + }, + ), + ] diff --git a/django/api/models/__init__.py b/django/api/models/__init__.py index 86ec9083..b61842ce 100644 --- a/django/api/models/__init__.py +++ b/django/api/models/__init__.py @@ -13,4 +13,6 @@ from . import datasets from . import charger_rebates from . import public_charging -from . import hydrogen_fueling \ No newline at end of file +from . import hydrogen_fueling +from . import hydrogen_fleets + diff --git a/django/api/models/hydrogen_fleets.py b/django/api/models/hydrogen_fleets.py new file mode 100644 index 00000000..462aea79 --- /dev/null +++ b/django/api/models/hydrogen_fleets.py @@ -0,0 +1,109 @@ +from auditable.models import Auditable +from django.db import models + + +class HydrogenFleets(Auditable): + + application_number = models.IntegerField( + blank=True, + null=True + ) + + fleet_number = models.IntegerField( + blank=True, + null=True + ) + + application_date = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + organization_name = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + fleet_name = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + steet_address = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + city = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + postal_code = models.CharField( + blank=True, + null=True, + max_length=10, + unique=False + ) + + vin = models.CharField( + blank=True, + null=True, + max_length=20, + unique=False + ) + + make = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + model = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + year = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + purchase_date = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + dealer_name = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + rebate_amount = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + class Meta: + db_table = "hydrogen_fleets" diff --git a/django/api/services/hydrogen_fleets.py b/django/api/services/hydrogen_fleets.py new file mode 100644 index 00000000..ee36eda7 --- /dev/null +++ b/django/api/services/hydrogen_fleets.py @@ -0,0 +1,60 @@ +import pandas as pd +from api.models.charger_rebates import ChargerRebates + + +def trim_all_columns(df): + """ + Trim whitespace from ends of each value across all series in dataframe + """ + trim_strings = lambda x: x.strip() if isinstance(x, str) else x + return df.applymap(trim_strings) + + +def import_from_xls(excel_file): + df = pd.read_excel(excel_file, 'Fleets') + df.drop(df.columns.difference([ + "Application #", + "Fleet #", + "Application Date", + "Organization Name" + "Fleet Name", + "Street Address", + "City", + "Postal Code", + "VIN", + "Make", + "Model", + "Year", + "Purchase Date", + "Dealer Name", + "Rebate Amount" + ]), 1, inplace=True) + df = trim_all_columns(df) + df = df.applymap(lambda s: s.upper() if type(s) == str else s) + + # df.fillna('') + df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) + + for _, row in df.iterrows(): + try: + ChargerRebates.objects.create( + application_number=row["Application #"], + fleet_number=row["Fleet #"], + application_date=row["Application Date"], + organization_name=row["Organization Name"], + fleet_name=row["Fleet Name"], + street_address=row["Street Address"], + city=row["City"], + postal_code=row["Postal Code"], + vin=row["VIN"], + make=row["Make"], + model=row["Model"], + year=row["Year"], + purchase_date=row["Purchase Date"], + dealer_name=row["Dealer Name"], + rebate_amount=row["Rebate Amount"] + ) + except Exception as error: + print(error) + print(row) + return True diff --git a/openshift/templates/metabase/Dockerfile b/openshift/templates/metabase/Dockerfile index 1e016607..98411e9c 100644 --- a/openshift/templates/metabase/Dockerfile +++ b/openshift/templates/metabase/Dockerfile @@ -1,2 +1,2 @@ FROM metabase/metabase:v0.41.5 -EXPOSE 3000 +EXPOSE 3000 \ No newline at end of file diff --git a/openshift/templates/superset/Dockerfile b/openshift/templates/superset/Dockerfile new file mode 100644 index 00000000..2baf0606 --- /dev/null +++ b/openshift/templates/superset/Dockerfile @@ -0,0 +1,7 @@ +FROM artifacts.developer.gov.bc.ca/docker-remote/apache/superset +USER root +RUN pip install psycopg2-binary sqlalchemy-redshift \ + && chgrp -R root /app/superset_home \ + && chmod -R g+w /app/superset_home +COPY ./scripts /app/docker +USER superset \ No newline at end of file diff --git a/openshift/templates/superset/superset-bc.yaml b/openshift/templates/superset/superset-bc.yaml new file mode 100644 index 00000000..60bf3916 --- /dev/null +++ b/openshift/templates/superset/superset-bc.yaml @@ -0,0 +1,49 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: superset-bc +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the metabase image + creationTimestamp: null + name: superset + spec: + lookupPolicy: + local: false +- apiVersion: build.openshift.io/v1 + kind: BuildConfig + metadata: + creationTimestamp: null + name: superset + spec: + failedBuildsHistoryLimit: 5 + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: superset:v1.0 + namespace: 30b186-tools + postCommit: {} + resources: {} + runPolicy: Serial + source: + contextDir: openshift/templates/superset + git: + ref: superset-0.1.0 + uri: https://github.com/bcgov/cthub.git + type: Git + strategy: + dockerStrategy: + forcePull: true + noCache: true + pullSecret: + name: artifacts-pull-default-idxprm + type: Docker + successfulBuildsHistoryLimit: 5 + triggers: [] + status: + lastVersion: 0 diff --git a/openshift/templates/superset/superset-dc-redis.yaml b/openshift/templates/superset/superset-dc-redis.yaml new file mode 100644 index 00000000..aa6f7e8e --- /dev/null +++ b/openshift/templates/superset/superset-dc-redis.yaml @@ -0,0 +1,126 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: redis-dc +parameters: + - name: ENV_NAME + value: test + required: true + - name: CPU_REQUEST + value: 50M + displayName: Requested CPU + description: Requested CPU + required: true + - name: CPU_LIMIT + value: 200M + displayName: CPU upper limit + description: CPU upper limit + required: true + - name: MEMORY_REQUEST + value: 50M + displayName: Requested memory + description: Requested memory + required: true + - name: MEMORY_LIMIT + value: 500M + displayName: Memory upper limit + description: Memory upper limit + required: true + - name: REPLICAS + description: | + The number of replicas to use. + displayName: REPLICAS + value: "1" +objects: +- apiVersion: v1 + kind: Service + metadata: + annotations: + openshift.io/generated-by: OpenShiftWebConsole + creationTimestamp: null + labels: + app: redis + name: redis + spec: + ports: + - name: redis + port: 6379 + protocol: TCP + targetPort: 6379 + selector: + app: redis + sessionAffinity: None + type: ClusterIP + status: + loadBalancer: {} +- kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: redis + annotations: + volume.beta.kubernetes.io/storage-class: netapp-block-standard + template.openshift.io.bcgov/create: 'true' + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi +- apiVersion: apps.openshift.io/v1 + kind: DeploymentConfig + metadata: + creationTimestamp: null + labels: + app: redis + name: redis + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 600 + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ConfigChange + replicas: 1 + revisionHistoryLimit: 10 + test: false + selector: + app: redis + template: + metadata: + creationTimestamp: null + labels: + app: redis + spec: + volumes: + - name: redis + persistentVolumeClaim: + claimName: redis + containers: + - name: redis + image: >- + image-registry.openshift-image-registry.svc:5000/30b186-tools/redis:6.2.6 + ports: + - containerPort: 6379 + protocol: TCP + resources: {} + volumeMounts: + - name: redis + mountPath: /data + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + status: + availableReplicas: 0 + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + unavailableReplicas: 0 + updatedReplicas: 0 diff --git a/openshift/templates/superset/superset/superset-bc-superset.yaml b/openshift/templates/superset/superset/superset-bc-superset.yaml new file mode 100644 index 00000000..42af6810 --- /dev/null +++ b/openshift/templates/superset/superset/superset-bc-superset.yaml @@ -0,0 +1,49 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: superset-superset-bc +objects: + - apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the superset image + creationTimestamp: null + name: superset + spec: + lookupPolicy: + local: false + - apiVersion: build.openshift.io/v1 + kind: BuildConfig + metadata: + creationTimestamp: null + name: superset + spec: + failedBuildsHistoryLimit: 5 + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: superset:20211213 + namespace: 30b186-tools + postCommit: {} + resources: {} + runPolicy: Serial + source: + contextDir: openshift/templates/superset/superset + git: + ref: superset-0.1.0 + uri: https://github.com/bcgov/cthub.git + type: Git + strategy: + dockerStrategy: + forcePull: true + noCache: true + pullSecret: + name: artifacts-pull-default-idxprm + type: Docker + successfulBuildsHistoryLimit: 5 + triggers: [] + status: + lastVersion: 0 diff --git a/openshift/templates/superset/superset/superset-dc.yaml b/openshift/templates/superset/superset/superset-dc.yaml new file mode 100644 index 00000000..3dd95918 --- /dev/null +++ b/openshift/templates/superset/superset/superset-dc.yaml @@ -0,0 +1,195 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: superset-dc +parameters: + - name: ENV_NAME + value: test + required: true + - name: CPU_REQUEST + value: 50M + displayName: Requested CPU + description: Requested CPU + required: true + - name: CPU_LIMIT + value: 200M + displayName: CPU upper limit + description: CPU upper limit + required: true + - name: MEMORY_REQUEST + value: 50M + displayName: Requested memory + description: Requested memory + required: true + - name: MEMORY_LIMIT + value: 500M + displayName: Memory upper limit + description: Memory upper limit + required: true + - name: REPLICAS + description: | + The number of replicas to use. + displayName: REPLICAS + value: "1" +objects: +# - apiVersion: v1 +# kind: ServiceAccount +# metadata: +# name: cthub-superset +# labels: +# app: cthub-superset +# service: cthub-superset +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# openshift.io/generated-by: OpenShiftWebConsole +# creationTimestamp: null +# labels: +# app: cthub-superset +# name: cthub-superset +# spec: +# ports: +# - name: superset-web +# port: 8080 +# protocol: TCP +# targetPort: 8080 +# selector: +# deploymentconfig: cthub-superset +# sessionAffinity: None +# type: ClusterIP +# status: +# loadBalancer: {} +# - apiVersion: route.openshift.io/v1 +# kind: Route +# metadata: +# annotations: +# openshift.io/host.generated: "true" +# creationTimestamp: null +# labels: +# app: cthub-superset +# name: cthub-superset +# spec: +# host: superset-${ENV_NAME}.apps.silver.devops.gov.bc.ca +# path: / +# port: +# targetPort: superset-web +# tls: +# insecureEdgeTerminationPolicy: Redirect +# termination: edge +# to: +# kind: Service +# name: cthub-superset +# weight: 100 +# wildcardPolicy: None +- kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: superset-home + annotations: + volume.beta.kubernetes.io/storage-class: netapp-file-standard + template.openshift.io.bcgov/create: 'true' + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 2Gi +- apiVersion: apps.openshift.io/v1 + kind: DeploymentConfig + metadata: + creationTimestamp: null + labels: + app: superset + name: superset + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 600 + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ConfigChange + replicas: 1 + revisionHistoryLimit: 10 + test: false + selector: + app: superset + template: + metadata: + creationTimestamp: null + labels: + app: superset + spec: + volumes: + - name: superset-home + persistentVolumeClaim: + claimName: superset-home + containers: + - name: superset + image: >- + image-registry.openshift-image-registry.svc:5000/30b186-tools/superset:20211213 + ports: + - containerPort: 8088 + protocol: TCP + resources: {} + env: + - name: COMPOSE_PROJECT_NAME + value: superset + - name: DATABASE_DB + valueFrom: + secretKeyRef: + key: superset-db-name + name: patroni-creds-${ENV_NAME} + - name: DATABASE_HOST + value: patroni-master-test + - name: DATABASE_USER + valueFrom: + secretKeyRef: + key: superset-db-username + name: patroni-creds-${ENV_NAME} + - name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + key: superset-db-password + name: patroni-creds-${ENV_NAME} + - name: DATABASE_PORT + value: '5432' + - name: DATABASE_DIALECT + value: postgresql + - name: PYTHONPATH + value: '/app/pythonpath:/app/docker/pythonpath_dev' + - name: REDIS_HOST + value: localhost + - name: REDIS_PORT + value: '6379' + - name: FLASK_ENV + value: production + - name: SUPERSET_ENV + value: production + - name: SUPERSET_LOAD_EXAMPLES + value: 'no' + - name: CYPRESS_CONFIG + value: 'false' + - name: SUPERSET_PORT + value: '8088' + volumeMounts: + - name: superset-home + mountPath: /app/superset_home + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + status: + availableReplicas: 0 + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + unavailableReplicas: 0 + updatedReplicas: 0 From 3297f251a323177d10d2985fe5857855187868a6 Mon Sep 17 00:00:00 2001 From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Date: Tue, 21 Dec 2021 14:00:33 -0800 Subject: [PATCH 006/152] update backup container (#77) --- .../openshift/backup-config.yaml | 1 + .../openshift/backup-deploy.yaml | 10 ++++++++++ openshift/templates/metabase/metabase-dc.yaml | 2 +- openshift/templates/superset/README.md | 11 ++++++++--- .../superset/superset-dc-superset.yaml | 19 +++++++++++++++++++ 5 files changed, 39 insertions(+), 4 deletions(-) diff --git a/openshift/templates/backup-container-2.3.3/openshift/backup-config.yaml b/openshift/templates/backup-container-2.3.3/openshift/backup-config.yaml index 39901d18..7620f5da 100644 --- a/openshift/templates/backup-container-2.3.3/openshift/backup-config.yaml +++ b/openshift/templates/backup-container-2.3.3/openshift/backup-config.yaml @@ -19,5 +19,6 @@ objects: backup.conf: | postgres=patroni-master-${ENV_NAME}:5432/cthub postgres=patroni-master-${ENV_NAME}-metabase:5432/metabase + postgres=patroni-master-${ENV_NAME}-superset:5432/superset 0 19 * * * default ./backup.sh -s 0 22 * * * default ./backup.sh -s -v all \ No newline at end of file diff --git a/openshift/templates/backup-container-2.3.3/openshift/backup-deploy.yaml b/openshift/templates/backup-container-2.3.3/openshift/backup-deploy.yaml index f97e0ffc..8a98aa2e 100644 --- a/openshift/templates/backup-container-2.3.3/openshift/backup-deploy.yaml +++ b/openshift/templates/backup-container-2.3.3/openshift/backup-deploy.yaml @@ -107,6 +107,16 @@ objects: secretKeyRef: name: patroni-creds-${ENV_NAME_LOWERCASE} key: metabase-db-password + - name: PATRONI_MASTER_${ENV_NAME_UPPERCASE}_SUPERSET_USER + valueFrom: + secretKeyRef: + name: patroni-creds-${ENV_NAME_LOWERCASE} + key: superset-db-username + - name: PATRONI_MASTER_${ENV_NAME_UPPERCASE}_SUPERSET_PASSWORD + valueFrom: + secretKeyRef: + name: patroni-creds-${ENV_NAME_LOWERCASE} + key: superset-db-password resources: requests: cpu: ${CPU_REQUEST} diff --git a/openshift/templates/metabase/metabase-dc.yaml b/openshift/templates/metabase/metabase-dc.yaml index 28350125..7901d065 100644 --- a/openshift/templates/metabase/metabase-dc.yaml +++ b/openshift/templates/metabase/metabase-dc.yaml @@ -163,7 +163,7 @@ objects: - metabase from: kind: ImageStreamTag - name: metabase:v0.41.1 + name: metabase:v0.41.5 namespace: 30b186-tools lastTriggeredImage: type: ImageChange diff --git a/openshift/templates/superset/README.md b/openshift/templates/superset/README.md index fd269a8b..7541692f 100644 --- a/openshift/templates/superset/README.md +++ b/openshift/templates/superset/README.md @@ -1,5 +1,10 @@ -oc process -f ./superset-dc.yaml ENV_NAME=test SUFFIX=-test \ -CPU_REQUEST=300m CPU_LIMIT=500m MEMORY_REQUEST=500M MEMORY_LIMIT=2G REPLICAS=1 | \ -oc apply -f - -n 30b186-test +## Create Superset +* superset/scripts folder, will be copied to /app/docker +* redis folder folder, build and deploy redis +* Dockerfile the dockerfile +* superset-bc.yaml, build superset image, the superset, worker and beat all use this image +* superset-dc-beat.yaml deploy superset beat +* superset-dc-worker.yaml deploy superset worker +* superset-dc-superset.yaml deploy superset diff --git a/openshift/templates/superset/superset/superset-dc-superset.yaml b/openshift/templates/superset/superset/superset-dc-superset.yaml index 3e825186..80da7675 100644 --- a/openshift/templates/superset/superset/superset-dc-superset.yaml +++ b/openshift/templates/superset/superset/superset-dc-superset.yaml @@ -54,6 +54,25 @@ objects: type: ClusterIP status: loadBalancer: {} +- apiVersion: v1 + kind: Service + metadata: + creationTimestamp: null + labels: + cluster-name: patroni-${ENV_NAME} + name: patroni-master-${ENV_NAME}-superset + spec: + ports: + - port: 5432 + protocol: TCP + targetPort: 5432 + selector: + cluster-name: patroni-${ENV_NAME} + role: master + app.kubernetes.io/name: patroni + sessionAffinity: None + type: ClusterIP + status: - apiVersion: route.openshift.io/v1 kind: Route metadata: From 63b7d5a42d3a87ae2ee776375d2032fde008f561 Mon Sep 17 00:00:00 2001 From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Date: Tue, 21 Dec 2021 15:49:51 -0800 Subject: [PATCH 007/152] Update the value of minio endpoint (#78) --- openshift/templates/backend/backend-dc.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/openshift/templates/backend/backend-dc.yaml b/openshift/templates/backend/backend-dc.yaml index 978239a8..24869325 100644 --- a/openshift/templates/backend/backend-dc.yaml +++ b/openshift/templates/backend/backend-dc.yaml @@ -226,10 +226,7 @@ objects: - name: LOCAL_DEV value: "false" - name: MINIO_ENDPOINT - valueFrom: - secretKeyRef: - name: ${NAME}-minio-${ENV_NAME} - key: MINIO_ENDPOINT + value: ${NAME}-minio-${ENV_NAME}.apps.silver.devops.gov.bc.ca - name: MINIO_USE_SSL value: "true" - name: MINIO_ROOT_USER From 9e283daad251cd285d6e5b91eb61d32fa7525387 Mon Sep 17 00:00:00 2001 From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Date: Tue, 21 Dec 2021 16:22:07 -0800 Subject: [PATCH 008/152] Dev resource 0.2.0 (#79) --- .pipeline/lib/config.js | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/.pipeline/lib/config.js b/.pipeline/lib/config.js index 293bd4bd..97f84876 100644 --- a/.pipeline/lib/config.js +++ b/.pipeline/lib/config.js @@ -38,37 +38,37 @@ const phases = { ssoName:'dev.oidc.gov.bc.ca', phase: 'dev' , changeId:`${changeId}`, suffix: `-dev-${changeId}`, instance: `${name}-dev-${changeId}` , version:`${version}-${changeId}`, tag:`dev-${version}-${changeId}`, host: `cthub-dev-${changeId}.${ocpName}.gov.bc.ca`, djangoDebug: 'True', logoutHostName: 'logontest.gov.bc.ca', - metabaseCpuRequest: '300m', metabaseCpuLimit: '500m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, - frontendCpuRequest: '100m', frontendCpuLimit: '700m', frontendMemoryRequest: '300M', frontendMemoryLimit: '4G', frontendReplicas: 1, - backendCpuRequest: '300m', backendCpuLimit: '600m', backendMemoryRequest: '1G', backendMemoryLimit: '2G', backendHealthCheckDelay: 30, backendHost: `cthub-backend-dev-${changeId}.${ocpName}.gov.bc.ca`, backendReplicas: 1, - minioCpuRequest: '100m', minioCpuLimit: '200m', minioMemoryRequest: '200M', minioMemoryLimit: '500M', minioPvcSize: '1G', + metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, + frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, + backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendHost: `cthub-backend-dev-${changeId}.${ocpName}.gov.bc.ca`, backendReplicas: 1, + minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3Gi', schemaspyCpuRequest: '50m', schemaspyCpuLimit: '200m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160, rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '1G', rabbitmqPvcSize: '1G', rabbitmqReplica: 1, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard', - patroniCpuRequest: '200m', patroniCpuLimit: '400m', patroniMemoryRequest: '250M', patroniMemoryLimit: '500M', patroniPvcSize: '2G', patroniReplica: 2, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`}, + patroniCpuRequest: '200m', patroniCpuLimit: '400m', patroniMemoryRequest: '250Mi', patroniMemoryLimit: '500Mi', patroniPvcSize: '2G', patroniReplica: 2, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`}, test: {namespace:'30b186-test', name: `${name}`, ssoSuffix:'-test', ssoName:'test.oidc.gov.bc.ca', phase: 'test' , changeId:`${changeId}`, suffix: `-test`, instance: `${name}-test`, version:`${version}`, tag:`test-${version}`, host: `cthub-test.${ocpName}.gov.bc.ca`, djangoDebug: 'False', logoutHostName: 'logontest.gov.bc.ca', - metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500M', metabaseMemoryLimit: '2G', metabaseReplicas: 1, + metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, frontendMinReplicas: 1, frontendMaxReplicas: 3, backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendReplicas: 1, backendMinReplicas: 1, backendMaxReplicas: 3, backendHost: `cthub-backend-test.${ocpName}.gov.bc.ca`, minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3G', schemaspyCpuRequest: '20m', schemaspyCpuLimit: '200m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160, rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '700M', rabbitmqPvcSize: '1G', rabbitmqReplica: 2, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard', - patroniCpuRequest: '200m', patroniCpuLimit: '400m', patroniMemoryRequest: '250M', patroniMemoryLimit: '500M', patroniPvcSize: '5G', patroniReplica: 2, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`}, + patroniCpuRequest: '200m', patroniCpuLimit: '400m', patroniMemoryRequest: '250Mi', patroniMemoryLimit: '500Mi', patroniPvcSize: '5G', patroniReplica: 2, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`}, prod: {namespace:'30b186-prod', name: `${name}`, ssoSuffix:'', ssoName:'oidc.gov.bc.ca', phase: 'prod' , changeId:`${changeId}`, suffix: `-prod`, instance: `${name}-prod`, version:`${version}`, tag:`prod-${version}`, - metabaseCpuRequest: '200m', metabaseCpuLimit: '400m', metabaseMemoryRequest: '750Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, + metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, host: `cthub-prod.${ocpName}.gov.bc.ca`, djangoDebug: 'False', logoutHostName: 'logon7.gov.bc.ca', - frontendCpuRequest: '300m', frontendCpuLimit: '600m', frontendMemoryRequest: '1G', frontendMemoryLimit: '2G', frontendReplicas: 2, frontendMinReplicas: 2, frontendMaxReplicas: 5, - backendCpuRequest: '200m', backendCpuLimit: '700m', backendMemoryRequest: '1G', backendMemoryLimit: '2G', backendHealthCheckDelay: 30, backendReplicas: 2, backendMinReplicas: 2, backendMaxReplicas: 5, backendHost: `cthub-backend-prod.${ocpName}.gov.bc.ca`, - minioCpuRequest: '100m', minioCpuLimit: '300m', minioMemoryRequest: '500M', minioMemoryLimit: '700M', minioPvcSize: '3G', + frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, frontendMinReplicas: 1, frontendMaxReplicas: 3, + backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendReplicas: 1, backendMinReplicas: 1, backendMaxReplicas: 3, backendHost: `cthub-backend-prod.${ocpName}.gov.bc.ca`, + minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3G', schemaspyCpuRequest: '50m', schemaspyCpuLimit: '400m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160, rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '1G', rabbitmqPvcSize: '5G', rabbitmqReplica: 2, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard', - patroniCpuRequest: '200m', patroniCpuLimit: '500m', patroniMemoryRequest: '300M', patroniMemoryLimit: '800M', patroniPvcSize: '8G', patroniReplica: 3, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`} + patroniCpuRequest: '200m', patroniCpuLimit: '400m', patroniMemoryRequest: '250Mi', patroniMemoryLimit: '500Mi', patroniPvcSize: '8G', patroniReplica: 3, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`} }; From 73e7f99d890d8d8d42feb6025802809f65b097e1 Mon Sep 17 00:00:00 2001 From: Richard Date: Thu, 6 Jan 2022 11:31:26 -0800 Subject: [PATCH 009/152] Fixed the routes so they don't do multi-loads (#81) --- react/src/App.js | 14 ++++++++++---- react/src/dashboard/DashboardContainer.js | 13 +++---------- react/src/dashboard/router.js | 1 - react/src/icbc_data/router.js | 21 ++++++++++----------- react/src/uploads/router.js | 21 ++++++++++----------- 5 files changed, 33 insertions(+), 37 deletions(-) diff --git a/react/src/App.js b/react/src/App.js index 31e89131..5066453d 100644 --- a/react/src/App.js +++ b/react/src/App.js @@ -4,12 +4,14 @@ import React from 'react'; import { Redirect, BrowserRouter as Router, + Route, + Switch, } from 'react-router-dom'; import settings from './app/settings'; import IcbcDataRouter from './icbc_data/router'; import UploadRouter from './uploads/router'; -import DashboardRouter from './dashboard/router'; +import DashboardContainer from './dashboard/DashboardContainer'; const { API_BASE } = settings; @@ -37,9 +39,13 @@ const App = () => { )} - - - + + {IcbcDataRouter()} + {UploadRouter()} + + + + diff --git a/react/src/dashboard/DashboardContainer.js b/react/src/dashboard/DashboardContainer.js index 9dcca644..f49ec102 100644 --- a/react/src/dashboard/DashboardContainer.js +++ b/react/src/dashboard/DashboardContainer.js @@ -1,18 +1,11 @@ -import React from 'react'; import { withRouter, useHistory } from 'react-router-dom'; const DashboardContainer = () => { - const { location } = window; - const { pathname } = location; const history = useHistory(); - return ( -
-
- {pathname === '/' && history.push('/upload')} -
-
- ); + history.push('/upload'); + + return null; }; export default withRouter(DashboardContainer); diff --git a/react/src/dashboard/router.js b/react/src/dashboard/router.js index 0939e88d..7d740664 100644 --- a/react/src/dashboard/router.js +++ b/react/src/dashboard/router.js @@ -6,7 +6,6 @@ import DashboardContainer from './DashboardContainer'; const Router = () => ( <> diff --git a/react/src/icbc_data/router.js b/react/src/icbc_data/router.js index c9aebd00..e159da47 100644 --- a/react/src/icbc_data/router.js +++ b/react/src/icbc_data/router.js @@ -1,17 +1,16 @@ import React from 'react'; -import { Route, Switch } from 'react-router-dom'; +import { Route } from 'react-router-dom'; import IcbcDataContainer from './IcbcDataContainer'; -const Router = () => ( - - - - - -); +const Router = () => ([ + + + , +]); export default Router; diff --git a/react/src/uploads/router.js b/react/src/uploads/router.js index 877eff4a..3575a73d 100644 --- a/react/src/uploads/router.js +++ b/react/src/uploads/router.js @@ -1,17 +1,16 @@ import React from 'react'; -import { Route, Switch } from 'react-router-dom'; +import { Route } from 'react-router-dom'; import UploadContainer from './UploadContainer'; -const Router = () => ( - - - - - -); +const Router = () => ([ + + + , +]); export default Router; From 9f878616fcdbc7c3321c640091fb605d5e37a655 Mon Sep 17 00:00:00 2001 From: Richard Date: Thu, 6 Jan 2022 11:31:59 -0800 Subject: [PATCH 010/152] Combined the superset containers into one (#82) --- docker-compose.yml | 35 +---------------------------------- superset.env | 3 --- superset/run.sh | 7 +++++++ 3 files changed, 8 insertions(+), 37 deletions(-) create mode 100755 superset/run.sh diff --git a/docker-compose.yml b/docker-compose.yml index 2684809f..c4df8510 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -42,7 +42,7 @@ services: superset: env_file: ./superset.env image: apache/superset:latest-dev - command: ["/app/docker/docker-bootstrap.sh", "app-gunicorn"] + command: ["/app/docker/run.sh"] user: "root" ports: - 8088:8088 @@ -52,39 +52,6 @@ services: volumes: - ./superset:/app/docker - superset_home:/app/superset_home - superset-init: - image: apache/superset:latest-dev - command: ["/app/docker/docker-init.sh"] - env_file: ./superset.env - depends_on: - - db - - redis - user: "root" - volumes: - - ./superset:/app/docker - - superset_home:/app/superset_home - superset-worker: - image: apache/superset:latest-dev - command: ["/app/docker/docker-bootstrap.sh", "worker"] - env_file: ./superset.env - depends_on: - - db - - redis - user: "root" - volumes: - - ./superset:/app/docker - - superset_home:/app/superset_home - superset-worker-beat: - image: apache/superset:latest-dev - command: ["/app/docker/docker-bootstrap.sh", "beat"] - env_file: ./superset.env - depends_on: - - db - - redis - user: "root" - volumes: - - ./superset:/app/docker - - superset_home:/app/superset_home api: build: ./django command: > diff --git a/superset.env b/superset.env index 5e300f62..705779aa 100644 --- a/superset.env +++ b/superset.env @@ -7,9 +7,6 @@ DATABASE_USER=postgres DATABASE_PORT=5432 DATABASE_DIALECT=postgresql -POSTGRES_DB=superset -POSTGRES_USER=postgres -POSTGRES_PASSWORD=postgres # Add the mapped in /app/pythonpath_docker which allows devs to override stuff PYTHONPATH=/app/pythonpath:/app/docker/pythonpath_dev diff --git a/superset/run.sh b/superset/run.sh new file mode 100755 index 00000000..2693369c --- /dev/null +++ b/superset/run.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +/app/docker/docker-init.sh & + +/app/docker/docker-bootstrap.sh worker & +/app/docker/docker-bootstrap.sh beat & + +/app/docker/docker-bootstrap.sh app-gunicorn From a6bde69022db477b0b9253f17feabfd31b91de48 Mon Sep 17 00:00:00 2001 From: Jamie Popkin Date: Fri, 7 Jan 2022 13:48:26 -0800 Subject: [PATCH 011/152] Data Fleets Table (#83) * Model for Data Fleets * Remaining columns for loader script * New fixtures * Data Fleets table migration and import * Arc Project Tracking model --- django/README.md | 2 +- .../fixtures/0005_add_hydrogen_fleets.json | 1 + django/api/fixtures/0006_add_data_fleets.json | 1 + .../management/commands/import_data_fleets.py | 40 ++++ .../{ => commands}/import_hydrogen_fleets.py | 2 +- django/api/migrations/0014_datafleets.py | 52 ++++++ django/api/models/__init__.py | 1 + django/api/models/arc_project_tracking.py | 104 +++++++++++ django/api/models/data_fleets.py | 174 ++++++++++++++++++ django/api/services/data_fleets.py | 82 +++++++++ 10 files changed, 457 insertions(+), 2 deletions(-) create mode 100644 django/api/fixtures/0005_add_hydrogen_fleets.json create mode 100644 django/api/fixtures/0006_add_data_fleets.json create mode 100644 django/api/management/commands/import_data_fleets.py rename django/api/management/{ => commands}/import_hydrogen_fleets.py (95%) create mode 100644 django/api/migrations/0014_datafleets.py create mode 100644 django/api/models/arc_project_tracking.py create mode 100644 django/api/models/data_fleets.py create mode 100644 django/api/services/data_fleets.py diff --git a/django/README.md b/django/README.md index c4a2e231..0f3a3d5c 100644 --- a/django/README.md +++ b/django/README.md @@ -13,7 +13,7 @@ python manage.py makemigrations python manage.py migrate # Log into database to inspect the new table -docker-compose exec db psql -U postgre +docker-compose exec db psql -U postgres # run this: \d ``` diff --git a/django/api/fixtures/0005_add_hydrogen_fleets.json b/django/api/fixtures/0005_add_hydrogen_fleets.json new file mode 100644 index 00000000..0582a591 --- /dev/null +++ b/django/api/fixtures/0005_add_hydrogen_fleets.json @@ -0,0 +1 @@ +[{"model": "api.datasets", "pk": 6, "fields": {"create_timestamp": "2022-01-04T00:00:00Z", "create_user": "user", "update_timestamp": null, "update_user": null, "name": "Hydrogen Fleets"}}] diff --git a/django/api/fixtures/0006_add_data_fleets.json b/django/api/fixtures/0006_add_data_fleets.json new file mode 100644 index 00000000..a6a1759f --- /dev/null +++ b/django/api/fixtures/0006_add_data_fleets.json @@ -0,0 +1 @@ +[{"model": "api.datasets", "pk": 7, "fields": {"create_timestamp": "2022-01-04T00:00:00Z", "create_user": "user", "update_timestamp": null, "update_user": null, "name": "Data Fleets"}}] diff --git a/django/api/management/commands/import_data_fleets.py b/django/api/management/commands/import_data_fleets.py new file mode 100644 index 00000000..33306804 --- /dev/null +++ b/django/api/management/commands/import_data_fleets.py @@ -0,0 +1,40 @@ +import json +from os import path +from django.core.management import BaseCommand + +from api.services.data_fleets import import_from_xls + + +class Command(BaseCommand): + """ + This command takes in an excel file and will parse and create records + """ + help = 'Loads file into the data fleets table' + + def add_arguments(self, parser): + """ + Currently only takes in an excel file as a required argument + """ + parser.add_argument( + 'xls_file', help='Filename of the xls being imported' + ) + + def handle(self, *args, **options): + """ + Function to parse the file and pass it to the import + service + """ + xls_file = options.get('xls_file') + + if not path.exists(xls_file): + self.stdout.write(self.style.ERROR( + 'Cannot find {file}. ' + 'Please make sure the filename is correct.'.format( + file=xls_file + ) + )) + return False + import_from_xls(xls_file) + self.stdout.write(self.style.SUCCESS( + 'Import complete' + )) diff --git a/django/api/management/import_hydrogen_fleets.py b/django/api/management/commands/import_hydrogen_fleets.py similarity index 95% rename from django/api/management/import_hydrogen_fleets.py rename to django/api/management/commands/import_hydrogen_fleets.py index 24b4c237..67a8df28 100644 --- a/django/api/management/import_hydrogen_fleets.py +++ b/django/api/management/commands/import_hydrogen_fleets.py @@ -9,7 +9,7 @@ class Command(BaseCommand): """ This command takes in an excel file and will parse and create records """ - help = 'Loads file into the ldv rebates table' + help = 'Loads file into the hydrogen fleets table' def add_arguments(self, parser): """ diff --git a/django/api/migrations/0014_datafleets.py b/django/api/migrations/0014_datafleets.py new file mode 100644 index 00000000..7afa757c --- /dev/null +++ b/django/api/migrations/0014_datafleets.py @@ -0,0 +1,52 @@ +# Generated by Django 3.1.6 on 2022-01-05 20:17 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0013_hydrogenfleets'), + ] + + operations = [ + migrations.CreateModel( + name='DataFleets', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), + ('create_user', models.CharField(default='SYSTEM', max_length=130)), + ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), + ('update_user', models.CharField(max_length=130, null=True)), + ('current_stage', models.CharField(blank=True, max_length=250, null=True)), + ('rebate_value', models.CharField(blank=True, max_length=100, null=True)), + ('legal_name_of_organization_fleet', models.CharField(blank=True, max_length=500, null=True)), + ('business_category', models.CharField(blank=True, max_length=250, null=True)), + ('city', models.CharField(blank=True, max_length=250, null=True)), + ('postal_code', models.CharField(blank=True, max_length=100, null=True)), + ('applicant_first_name', models.CharField(blank=True, max_length=100, null=True)), + ('applicant_last_name', models.CharField(blank=True, max_length=100, null=True)), + ('email_address', models.CharField(blank=True, max_length=100, null=True)), + ('fleet_size_all', models.IntegerField(blank=True, null=True)), + ('fleet_size_light_duty', models.IntegerField(blank=True, null=True)), + ('total_number_of_evs', models.IntegerField(blank=True, null=True)), + ('total_number_of_light_duty_evs', models.IntegerField(blank=True, null=True)), + ('phev', models.IntegerField(blank=True, null=True)), + ('evse', models.IntegerField(blank=True, null=True)), + ('average_daily_travel_distance', models.CharField(blank=True, max_length=100, null=True)), + ('component_being_applyied_for', models.CharField(blank=True, max_length=250, null=True)), + ('estimated_cost', models.CharField(blank=True, max_length=100, null=True)), + ('type_of_charger_being_installing', models.CharField(blank=True, max_length=250, null=True)), + ('number_of_Level_2_Charging_Stations_being_applying_for', models.IntegerField(blank=True, null=True)), + ('number_of_level_3_dc_fast_charging_stations_being_applying_for', models.IntegerField(blank=True, null=True)), + ('application_form_fleets_completion_date_time', models.CharField(blank=True, max_length=100, null=True)), + ('pre_approval_date', models.CharField(blank=True, max_length=100, null=True)), + ('deadline', models.CharField(blank=True, max_length=250, null=True)), + ('application_number', models.CharField(blank=True, max_length=250, null=True)), + ('potential_rebate', models.CharField(blank=True, max_length=100, null=True)), + ], + options={ + 'db_table': 'data_fleets', + }, + ), + ] diff --git a/django/api/models/__init__.py b/django/api/models/__init__.py index b61842ce..de2f0820 100644 --- a/django/api/models/__init__.py +++ b/django/api/models/__init__.py @@ -15,4 +15,5 @@ from . import public_charging from . import hydrogen_fueling from . import hydrogen_fleets +from . import data_fleets diff --git a/django/api/models/arc_project_tracking.py b/django/api/models/arc_project_tracking.py new file mode 100644 index 00000000..ef547e5d --- /dev/null +++ b/django/api/models/arc_project_tracking.py @@ -0,0 +1,104 @@ +from auditable.models import Auditable +from django.db import models + + +class ARCProjectTracking(Auditable): + + funding_call = models.CharField( + blank=True, + null=True, + max_length=50, + unique=False + ) + + proponent = models.CharField( + blank=True, + null=False, + max_length=500, + unique=False + ) + + reference_number = models.CharField( + blank=True, + null=True, + max_length=50, + unique=False + ) + + project_title = models.CharField( + blank=True, + null=True, + max_length=500, + unique=False + ) + + primary_location = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + status = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + arc_funding = models.IntegerField( + blank=True, + null=True + ) + + funds_issued = models.IntegerField( + blank=True, + null=True + ) + + start_date = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + completion_date = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + total_project_value = models.IntegerField( + blank=True, + null=True + ) + + zev_sub_sector = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + on_road_off_road = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + fuel_type = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + publicly_announced = models.BooleanField( + default=False + ) + + class Meta: + db_table = "arc_project_tracking" \ No newline at end of file diff --git a/django/api/models/data_fleets.py b/django/api/models/data_fleets.py new file mode 100644 index 00000000..586fcdc1 --- /dev/null +++ b/django/api/models/data_fleets.py @@ -0,0 +1,174 @@ +from auditable.models import Auditable +from django.db import models + + +class DataFleets(Auditable): + current_stage = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + rebate_value = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + legal_name_of_organization_fleet = models.CharField( + blank=True, + null=True, + max_length=500, + unique=False + ) + + business_category = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + city = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + postal_code = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + applicant_first_name = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + applicant_last_name = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + email_address = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + fleet_size_all = models.IntegerField( + blank=True, + null=True + ) + + fleet_size_light_duty = models.IntegerField( + blank=True, + null=True + ) + + total_number_of_evs = models.IntegerField( + blank=True, + null=True + ) + + total_number_of_light_duty_evs = models.IntegerField( + blank=True, + null=True + ) + + phev = models.IntegerField( + blank=True, + null=True + ) + + evse = models.IntegerField( + blank=True, + null=True + ) + + average_daily_travel_distance = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + component_being_applyied_for = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + estimated_cost = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + type_of_charger_being_installing = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + number_of_Level_2_Charging_Stations_being_applying_for = models.IntegerField( + blank=True, + null=True + ) + + number_of_level_3_dc_fast_charging_stations_being_applying_for = models.IntegerField( + blank=True, + null=True + ) + + application_form_fleets_completion_date_time = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + pre_approval_date = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + deadline = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + application_number = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + potential_rebate = models.CharField( + blank=True, + null=True, + max_length=100, + unique=False + ) + + + class Meta: + db_table = "data_fleets" diff --git a/django/api/services/data_fleets.py b/django/api/services/data_fleets.py new file mode 100644 index 00000000..33b10ed3 --- /dev/null +++ b/django/api/services/data_fleets.py @@ -0,0 +1,82 @@ +import pandas as pd +from api.models.data_fleets import DataFleets + + +def trim_all_columns(df): + """ + Trim whitespace from ends of each value across all series in dataframe + """ + trim_strings = lambda x: x.strip() if isinstance(x, str) else x + return df.applymap(trim_strings) + + +def import_from_xls(excel_file): + df = pd.read_excel(excel_file, 'Data Fleets') + df.drop(df.columns.difference([ + "Current Stage", + "Rebate Value", + "Legal Name of your Organization/Fleet: ", + "Your business Category", + "City:*", + "Postal Code:*", + "Applicant First Name", + "Applicant Last Name", + "Email Address:*", + "Fleet Size All", + "Fleet Size Light-duty", + "Total number of EVs?", + "Total number of light-duty EVs?", + "PHEV's", + "EVSE's?", + "Average daily travel distance?", + "Which component are you applying for?*", + "Estimated cost", + "Which type of charger are you installing?", + "How many Level 2 Charging Stations are you applying for", + "How many Level 3/DC Fast Charging Stations are you applying for", + '"Application Form Fleets" completion date/time', + "Pre-Approval Date", + "Deadline", + "Application Number", + "Potential Rebate" + ]), 1, inplace=True) + df = trim_all_columns(df) + df = df.applymap(lambda s: s.upper() if type(s) == str else s) + + # df.fillna('') + df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) + + for _, row in df.iterrows(): + try: + DataFleets.objects.create( + current_stage=row["Current Stage"], + rebate_value=row["Rebate Value"], + legal_name_of_organization_fleet =row["Legal Name of your Organization/Fleet: "], + business_category=row["Your business Category"], + city=row["City:*"], + postal_code=row["Postal Code:*"], + applicant_first_name=row["Applicant First Name"], + applicant_last_name =row["Applicant Last Name"], + email_address=row["Email Address:*"], + fleet_size_all=row["Fleet Size All"], + fleet_size_light_duty=row["Fleet Size Light-duty"], + total_number_of_evs=row["Total number of EVs?"], + total_number_of_light_duty_evs=row["Total number of light-duty EVs?"], + phev=row["PHEV's"], + evse=row["EVSE's?"], + average_daily_travel_distance=row["Average daily travel distance?"], + component_being_applyied_for=row["Which component are you applying for?*"], + estimated_cost=row["Estimated cost"], + type_of_charger_being_installing=row["Which type of charger are you installing?"], + number_of_Level_2_Charging_Stations_being_applying_for=row["How many Level 2 Charging Stations are you applying for"], + number_of_level_3_dc_fast_charging_stations_being_applying_for=row["How many Level 3/DC Fast Charging Stations are you applying for"], + application_form_fleets_completion_date_time=row['"Application Form Fleets" completion date/time'], + pre_approval_date=row["Pre-Approval Date"], + deadline=row["Deadline"], + application_number=row["Application Number"], + potential_rebate=row["Potential Rebate"] + ) + except Exception as error: + print(error) + print(row) + return True From ef6044613b9c77c4abef17e4b88e66c2ee9e2348 Mon Sep 17 00:00:00 2001 From: Jamie Popkin Date: Wed, 12 Jan 2022 14:42:52 -0800 Subject: [PATCH 012/152] ARC Project Tracking (#84) * Model for Data Fleets * Remaining columns for loader script * New fixtures * Data Fleets table migration and import * Arc Project Tracking model * ARC table migration/service/command * Data is loading * Lost this file for some reason * Scrap it model * Scrap it service * Data import working * The dropdown for all new data imports --- .../0007_add_arc_project_tracking.json | 1 + django/api/fixtures/0008_add_scrap_it.json | 1 + .../commands/import_arc_project_tracking.py | 40 +++++++++ .../management/commands/import_scrap_it.py | 40 +++++++++ .../api/migrations/0015_arcprojecttracking.py | 41 +++++++++ django/api/migrations/0016_scrapit.py | 37 ++++++++ django/api/models/__init__.py | 3 +- django/api/models/arc_project_tracking.py | 2 +- django/api/models/scrap_it.py | 85 +++++++++++++++++++ django/api/services/arc_project_tracking.py | 74 ++++++++++++++++ django/api/services/scrap_it.py | 40 +++++++++ django/api/viewsets/upload.py | 24 ++++++ 12 files changed, 386 insertions(+), 2 deletions(-) create mode 100644 django/api/fixtures/0007_add_arc_project_tracking.json create mode 100644 django/api/fixtures/0008_add_scrap_it.json create mode 100644 django/api/management/commands/import_arc_project_tracking.py create mode 100644 django/api/management/commands/import_scrap_it.py create mode 100644 django/api/migrations/0015_arcprojecttracking.py create mode 100644 django/api/migrations/0016_scrapit.py create mode 100644 django/api/models/scrap_it.py create mode 100644 django/api/services/arc_project_tracking.py create mode 100644 django/api/services/scrap_it.py diff --git a/django/api/fixtures/0007_add_arc_project_tracking.json b/django/api/fixtures/0007_add_arc_project_tracking.json new file mode 100644 index 00000000..96d8bde9 --- /dev/null +++ b/django/api/fixtures/0007_add_arc_project_tracking.json @@ -0,0 +1 @@ +[{"model": "api.datasets", "pk": 8, "fields": {"create_timestamp": "2022-01-09T00:00:00Z", "create_user": "user", "update_timestamp": null, "update_user": null, "name": "ARC Project Tracking"}}] diff --git a/django/api/fixtures/0008_add_scrap_it.json b/django/api/fixtures/0008_add_scrap_it.json new file mode 100644 index 00000000..95fe81fa --- /dev/null +++ b/django/api/fixtures/0008_add_scrap_it.json @@ -0,0 +1 @@ +[{"model": "api.datasets", "pk": 9, "fields": {"create_timestamp": "2022-01-11T00:00:00Z", "create_user": "user", "update_timestamp": null, "update_user": null, "name": "Scrap It"}}] diff --git a/django/api/management/commands/import_arc_project_tracking.py b/django/api/management/commands/import_arc_project_tracking.py new file mode 100644 index 00000000..e6cd574c --- /dev/null +++ b/django/api/management/commands/import_arc_project_tracking.py @@ -0,0 +1,40 @@ +import json +from os import path +from django.core.management import BaseCommand + +from api.services.arc_project_tracking import import_from_xls + + +class Command(BaseCommand): + """ + This command takes in an excel file and will parse and create records + """ + help = 'Loads file into the data fleets table' + + def add_arguments(self, parser): + """ + Currently only takes in an excel file as a required argument + """ + parser.add_argument( + 'xls_file', help='Filename of the xls being imported' + ) + + def handle(self, *args, **options): + """ + Function to parse the file and pass it to the import + service + """ + xls_file = options.get('xls_file') + + if not path.exists(xls_file): + self.stdout.write(self.style.ERROR( + 'Cannot find {file}. ' + 'Please make sure the filename is correct.'.format( + file=xls_file + ) + )) + return False + import_from_xls(xls_file) + self.stdout.write(self.style.SUCCESS( + 'Import complete' + )) diff --git a/django/api/management/commands/import_scrap_it.py b/django/api/management/commands/import_scrap_it.py new file mode 100644 index 00000000..56e424dc --- /dev/null +++ b/django/api/management/commands/import_scrap_it.py @@ -0,0 +1,40 @@ +import json +from os import path +from django.core.management import BaseCommand + +from api.services.scrap_it import import_from_xls + + +class Command(BaseCommand): + """ + This command takes in an excel file and will parse and create records + """ + help = 'Loads file into the data fleets table' + + def add_arguments(self, parser): + """ + Currently only takes in an excel file as a required argument + """ + parser.add_argument( + 'xls_file', help='Filename of the xls being imported' + ) + + def handle(self, *args, **options): + """ + Function to parse the file and pass it to the import + service + """ + xls_file = options.get('xls_file') + + if not path.exists(xls_file): + self.stdout.write(self.style.ERROR( + 'Cannot find {file}. ' + 'Please make sure the filename is correct.'.format( + file=xls_file + ) + )) + return False + import_from_xls(xls_file) + self.stdout.write(self.style.SUCCESS( + 'Import complete' + )) diff --git a/django/api/migrations/0015_arcprojecttracking.py b/django/api/migrations/0015_arcprojecttracking.py new file mode 100644 index 00000000..490e02f5 --- /dev/null +++ b/django/api/migrations/0015_arcprojecttracking.py @@ -0,0 +1,41 @@ +# Generated by Django 3.1.6 on 2022-01-07 23:49 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0014_datafleets'), + ] + + operations = [ + migrations.CreateModel( + name='ARCProjectTracking', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), + ('create_user', models.CharField(default='SYSTEM', max_length=130)), + ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), + ('update_user', models.CharField(max_length=130, null=True)), + ('funding_call', models.CharField(blank=True, max_length=50, null=True)), + ('proponent', models.CharField(blank=True, max_length=500, null=True)), + ('reference_number', models.CharField(blank=True, max_length=50, null=True)), + ('project_title', models.CharField(blank=True, max_length=500, null=True)), + ('primary_location', models.CharField(blank=True, max_length=250, null=True)), + ('status', models.CharField(blank=True, max_length=250, null=True)), + ('arc_funding', models.IntegerField(blank=True, null=True)), + ('funds_issued', models.IntegerField(blank=True, null=True)), + ('start_date', models.CharField(blank=True, max_length=250, null=True)), + ('completion_date', models.CharField(blank=True, max_length=250, null=True)), + ('total_project_value', models.IntegerField(blank=True, null=True)), + ('zev_sub_sector', models.CharField(blank=True, max_length=250, null=True)), + ('on_road_off_road', models.CharField(blank=True, max_length=250, null=True)), + ('fuel_type', models.CharField(blank=True, max_length=250, null=True)), + ('publicly_announced', models.BooleanField(default=False)), + ], + options={ + 'db_table': 'arc_project_tracking', + }, + ), + ] diff --git a/django/api/migrations/0016_scrapit.py b/django/api/migrations/0016_scrapit.py new file mode 100644 index 00000000..2cfaa3fc --- /dev/null +++ b/django/api/migrations/0016_scrapit.py @@ -0,0 +1,37 @@ +# Generated by Django 3.1.6 on 2022-01-11 19:58 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0015_arcprojecttracking'), + ] + + operations = [ + migrations.CreateModel( + name='ScrapIt', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), + ('create_user', models.CharField(default='SYSTEM', max_length=130)), + ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), + ('update_user', models.CharField(max_length=130, null=True)), + ('approval_number', models.IntegerField(blank=True, null=True)), + ('application_received_date', models.CharField(blank=True, max_length=250, null=True)), + ('completion_date', models.CharField(blank=True, max_length=250, null=True)), + ('postal_code', models.CharField(blank=True, max_length=250, null=True)), + ('vin', models.CharField(blank=True, max_length=250, null=True)), + ('application_city_fuel', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)), + ('incentive_type', models.CharField(blank=True, max_length=250, null=True)), + ('incentive_cost', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)), + ('cheque_number', models.CharField(blank=True, max_length=250, null=True)), + ('budget_code', models.CharField(blank=True, max_length=250, null=True)), + ('scrap_date', models.CharField(blank=True, max_length=250, null=True)), + ], + options={ + 'db_table': 'scrap_it', + }, + ), + ] diff --git a/django/api/models/__init__.py b/django/api/models/__init__.py index de2f0820..01276ddc 100644 --- a/django/api/models/__init__.py +++ b/django/api/models/__init__.py @@ -16,4 +16,5 @@ from . import hydrogen_fueling from . import hydrogen_fleets from . import data_fleets - +from . import arc_project_tracking +from . import scrap_it diff --git a/django/api/models/arc_project_tracking.py b/django/api/models/arc_project_tracking.py index ef547e5d..5047f1e7 100644 --- a/django/api/models/arc_project_tracking.py +++ b/django/api/models/arc_project_tracking.py @@ -13,7 +13,7 @@ class ARCProjectTracking(Auditable): proponent = models.CharField( blank=True, - null=False, + null=True, max_length=500, unique=False ) diff --git a/django/api/models/scrap_it.py b/django/api/models/scrap_it.py new file mode 100644 index 00000000..409f3753 --- /dev/null +++ b/django/api/models/scrap_it.py @@ -0,0 +1,85 @@ +from auditable.models import Auditable +from django.db import models + + +class ScrapIt(Auditable): + + approval_number = models.IntegerField( + blank=True, + null=True + ) + + application_received_date = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + completion_date = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + postal_code = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + vin = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + application_city_fuel = models.DecimalField( + blank=True, + null=True, + max_digits=10, + decimal_places=2, + unique=False + ) + + incentive_type = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + incentive_cost = models.DecimalField( + blank=True, + null=True, + max_digits=10, + decimal_places=2, + unique=False + ) + + cheque_number = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + budget_code = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + scrap_date = models.CharField( + blank=True, + null=True, + max_length=250, + unique=False + ) + + class Meta: + db_table = "scrap_it" \ No newline at end of file diff --git a/django/api/services/arc_project_tracking.py b/django/api/services/arc_project_tracking.py new file mode 100644 index 00000000..900af593 --- /dev/null +++ b/django/api/services/arc_project_tracking.py @@ -0,0 +1,74 @@ +import pandas as pd +from api.models.arc_project_tracking import ARCProjectTracking + + +def trim_all_columns(df): + """ + Trim whitespace from ends of each value across all series in dataframe + """ + trim_strings = lambda x: x.strip() if isinstance(x, str) else x + return df.applymap(trim_strings) + + +def import_from_xls(excel_file): + df = pd.read_excel(excel_file, 'Project_Tracking') + + df.drop(df.columns.difference([ + "Funding Call", + "Proponent", + "Ref #", + "Project Title", + "Primary Location", + "Status", + "ARC Funding", + "Funds Issued", + "Start Date", + "Completion Date", + "Total Project Value", + "ZEV Sub-Sector", + "On-Road/Off-Road", + "Fuel Type", + "Publicly Announced" + ]), 1, inplace=True) + + df['Publicly Announced'].replace( + to_replace=['No', 'N'], + value=False, + inplace=True + ) + + df['Publicly Announced'].replace( + to_replace=['Yes', 'Y'], + value=False, + inplace=True + ) + + df = trim_all_columns(df) + df = df.applymap(lambda s: s.upper() if type(s) == str else s) + + df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) + + for _, row in df.iterrows(): + if row["Publicly Announced"] == '': continue # Skip rows without this field + try: + ARCProjectTracking.objects.create( + funding_call=row["Funding Call"], + proponent=row["Proponent"], + reference_number=row["Ref #"], + project_title=row["Project Title"], + primary_location=row["Primary Location"], + status=row["Status"], + arc_funding=row["ARC Funding"], + funds_issued=row["Funds Issued"], + start_date=row["Start Date"], + completion_date=row["Completion Date"], + total_project_value=row["Total Project Value"], + zev_sub_sector=row["ZEV Sub-Sector"], + on_road_off_road=row["On-Road/Off-Road"], + fuel_type=row["Fuel Type"], + publicly_announced=row["Publicly Announced"] + ) + except Exception as error: + print(error) + print(row) + return True diff --git a/django/api/services/scrap_it.py b/django/api/services/scrap_it.py new file mode 100644 index 00000000..6c8f4da0 --- /dev/null +++ b/django/api/services/scrap_it.py @@ -0,0 +1,40 @@ +import pandas as pd +from api.models.scrap_it import ScrapIt + + +def trim_all_columns(df): + """ + Trim whitespace from ends of each value across all series in dataframe + """ + trim_strings = lambda x: x.strip() if isinstance(x, str) else x + return df.applymap(trim_strings) + + +def import_from_xls(excel_file): + df = pd.read_excel(excel_file, 'TOP OTHER TRANSACTIONS', header=5) + + df = trim_all_columns(df) + df = df.applymap(lambda s: s.upper() if type(s) == str else s) + + df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) + + for _, row in df.iterrows(): + if row["VIN"] == '': continue # Skip rows without this field + try: + ScrapIt.objects.create( + approval_number=row["Approval Num"], + application_received_date=row["App Recv'd Date"], + completion_date=row["Completion Date"], + postal_code=row["Postal Code"], + vin=row["VIN"], + application_city_fuel=row["App City Fuel"], + incentive_type=row["Incentive Type"], + incentive_cost=row["Incentive Cost"], + cheque_number=row["Cheque #"], + budget_code=row["Budget Code"], + scrap_date=row["Scrap Date"] + ) + except Exception as error: + print(error) + print(row) + return True diff --git a/django/api/viewsets/upload.py b/django/api/viewsets/upload.py index 529011c8..027657ac 100644 --- a/django/api/viewsets/upload.py +++ b/django/api/viewsets/upload.py @@ -14,12 +14,24 @@ from api.models.speciality_use_vehicle_incentives import \ SpecialityUseVehicleIncentives from api.models.hydrogen_fueling import HydrogrenFueling +from api.models.scrap_it import ScrapIt +from api.models.arc_project_tracking import ARCProjectTracking +from api.models.data_fleets import DataFleets +from api.models.hydrogen_fleets import HydrogenFleets from api.serializers.datasets import DatasetsSerializer from api.services.ldv_rebates import import_from_xls as import_ldv from api.services.hydrogen_fueling import import_from_xls as \ import_hydrogen_fueling from api.services.charger_rebates import import_from_xls as \ import_charger_rebates +from api.services.scrap_it import import_from_xls as \ + import_scrap_it +from api.services.arc_project_tracking import import_from_xls as \ + import_arc_project_tracking +from api.services.data_fleets import import_from_xls as \ + import_data_fleets +from api.services.hydrogen_fleets import import_from_xls as \ + import_hydrogen_fleets from api.services.minio import minio_get_object, minio_remove_object from api.services.public_charging import import_from_xls as \ import_public_charging @@ -63,6 +75,18 @@ def import_data(self, request): if dataset_selected == 'Public Charging': import_func = import_public_charging model = PublicCharging + if dataset_selected == 'Scrap It': + import_func = import_scrap_it + model = ScrapIt + if dataset_selected == 'ARC Project Tracking': + import_func = import_arc_project_tracking + model = ARCProjectTracking + if dataset_selected == 'Data Fleets': + import_func = import_data_fleets + model = DataFleets + if dataset_selected == 'Hydrogen Fleets': + import_func = import_hydrogen_fleets + model = HydrogenFleets if replace_data: model.objects.all().delete() done = import_func(filename) From 1893595d4eb314e80059d7701d7897a66f640358 Mon Sep 17 00:00:00 2001 From: Emily <44536222+emi-hi@users.noreply.github.com> Date: Wed, 12 Jan 2022 15:55:53 -0800 Subject: [PATCH 013/152] Cthub 128 - whitelisted users (#85) * -adds model for whitelisted users * -adds migration for whitelisted users * filters for usernames matching current user and gives 403 if not found * -adds some frontend stuff for upload alert (unfinished) * -fixes issue where post error wasnt getting caught, is now getting caught and updating alert state so error message appears * -removes uneccessary line break * -adds alert if upload succeeds * -creates whitelisted users decorator, calls it for both minio and upload functions -updates frontend to match new response from whitelisted_users * -removes uneeded imports * -fixes migration conflict --- django/api/decorators/whitelisted_users.py | 15 +++++++++++ django/api/keycloak_authentication.py | 2 +- .../api/migrations/0017_whitelistedusers.py | 27 +++++++++++++++++++ django/api/models/__init__.py | 1 + django/api/models/whitelisted_users.py | 13 +++++++++ django/api/viewsets/minio.py | 4 ++- django/api/viewsets/upload.py | 6 ++--- react/src/uploads/UploadContainer.js | 26 ++++++++++++++---- 8 files changed, 84 insertions(+), 10 deletions(-) create mode 100644 django/api/decorators/whitelisted_users.py create mode 100644 django/api/migrations/0017_whitelistedusers.py create mode 100644 django/api/models/whitelisted_users.py diff --git a/django/api/decorators/whitelisted_users.py b/django/api/decorators/whitelisted_users.py new file mode 100644 index 00000000..35efa337 --- /dev/null +++ b/django/api/decorators/whitelisted_users.py @@ -0,0 +1,15 @@ +from rest_framework import exceptions +from api.models.whitelisted_users import WhitelistedUsers + +def check_whitelist(): + def wrapper(func): + def wrapped(request, *args, **kwargs): + user = request.user + whitelisted_users = WhitelistedUsers.objects.filter(user=user) + if not whitelisted_users: + raise exceptions.PermissionDenied( + 'You do not have permission to upload data.' + ) + return func(request, *args, **kwargs) + return wrapped + return wrapper diff --git a/django/api/keycloak_authentication.py b/django/api/keycloak_authentication.py index 9f4c8186..77779d7e 100644 --- a/django/api/keycloak_authentication.py +++ b/django/api/keycloak_authentication.py @@ -55,9 +55,9 @@ def authenticate(self, request): raise exceptions.AuthenticationFailed( 'Invalid Token' ) + return user_info.get('preferred_username'), None # user = None - # if 'user_id' not in user_info: # # try email # if 'email' in user_info: diff --git a/django/api/migrations/0017_whitelistedusers.py b/django/api/migrations/0017_whitelistedusers.py new file mode 100644 index 00000000..e58314d5 --- /dev/null +++ b/django/api/migrations/0017_whitelistedusers.py @@ -0,0 +1,27 @@ +# Generated by Django 3.1.6 on 2022-01-12 23:35 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0016_scrapit'), + ] + + operations = [ + migrations.CreateModel( + name='WhitelistedUsers', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), + ('create_user', models.CharField(default='SYSTEM', max_length=130)), + ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), + ('update_user', models.CharField(max_length=130, null=True)), + ('user', models.CharField(max_length=100, unique=True)), + ], + options={ + 'db_table': 'whitelisted_users', + }, + ), + ] diff --git a/django/api/models/__init__.py b/django/api/models/__init__.py index 01276ddc..d8c3a3e8 100644 --- a/django/api/models/__init__.py +++ b/django/api/models/__init__.py @@ -16,5 +16,6 @@ from . import hydrogen_fueling from . import hydrogen_fleets from . import data_fleets +from . import whitelisted_users from . import arc_project_tracking from . import scrap_it diff --git a/django/api/models/whitelisted_users.py b/django/api/models/whitelisted_users.py new file mode 100644 index 00000000..eaee419c --- /dev/null +++ b/django/api/models/whitelisted_users.py @@ -0,0 +1,13 @@ +from django.db import models +from auditable.models import Auditable + +class WhitelistedUsers(Auditable): + user = models.CharField( + blank=False, + null=False, + unique=True, + max_length=100, + ) + + class Meta: + db_table = 'whitelisted_users' diff --git a/django/api/viewsets/minio.py b/django/api/viewsets/minio.py index b3532a53..a6cfca1c 100644 --- a/django/api/viewsets/minio.py +++ b/django/api/viewsets/minio.py @@ -4,7 +4,8 @@ from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.viewsets import GenericViewSet - +from django.utils.decorators import method_decorator +from api.decorators.whitelisted_users import check_whitelist from api.services.minio import minio_put_object @@ -13,6 +14,7 @@ class MinioViewSet(GenericViewSet): http_method_names = ['get'] @action(detail=False, methods=['get']) + @method_decorator(check_whitelist()) def put(self, request): object_name = uuid.uuid4().hex url = minio_put_object(object_name) diff --git a/django/api/viewsets/upload.py b/django/api/viewsets/upload.py index 027657ac..f4fb2693 100644 --- a/django/api/viewsets/upload.py +++ b/django/api/viewsets/upload.py @@ -1,12 +1,12 @@ import urllib.request import os - from rest_framework import status from rest_framework.decorators import action from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.viewsets import GenericViewSet - +from django.utils.decorators import method_decorator +from api.decorators.whitelisted_users import check_whitelist from api.models.datasets import Datasets from api.models.ldv_rebates import LdvRebates from api.models.public_charging import PublicCharging @@ -49,6 +49,7 @@ def datasets_list(self, request): return Response(serializer.data) @action(detail=False, methods=['post']) + @method_decorator(check_whitelist()) def import_data(self, request): filename = request.data.get('filename') dataset_selected = request.data.get('datasetSelected') @@ -98,5 +99,4 @@ def import_data(self, request): print('!!!!! error !!!!!!') print(error) return Response(status=400) - return Response('success!', status=status.HTTP_201_CREATED) diff --git a/react/src/uploads/UploadContainer.js b/react/src/uploads/UploadContainer.js index c67ea426..51bff237 100644 --- a/react/src/uploads/UploadContainer.js +++ b/react/src/uploads/UploadContainer.js @@ -1,6 +1,7 @@ import { withRouter } from 'react-router-dom'; import axios from 'axios'; import CircularProgress from '@mui/material/CircularProgress'; +import Alert from '@mui/material/Alert'; import React, { useState, useEffect } from 'react'; import ROUTES_UPLOAD from './routes'; import UploadPage from './components/UploadPage'; @@ -12,6 +13,9 @@ const UploadContainer = () => { const [loading, setLoading] = useState(false); const [datasetSelected, setDatasetSelected] = useState(''); // string identifying which dataset is being uploaded const [replaceData, setReplaceData] = useState('false'); // if true, we will replace all + const [alertContent, setAlertContent] = useState(); + const [alert, setAlert] = useState(false); + const [alertSeverity, setAlertSeverity] = useState(''); // existing data with what is being uploaded const [open, setOpen] = useState(false); const dialogue = 'Selecting replace will delete all previously uploaded records for this dataset'; @@ -33,6 +37,13 @@ const UploadContainer = () => { }); }; + const showError = (error) => { + const { response: errorResponse } = error; + setAlertContent(errorResponse.data.detail); + setAlertSeverity('error'); + setAlert(true); + }; + const doUpload = () => uploadFiles.forEach((file) => { axios.get(ROUTES_UPLOAD.MINIO_URL).then((response) => { const { url: uploadUrl, minio_object_name: filename } = response.data; @@ -49,16 +60,19 @@ const UploadContainer = () => { filename, datasetSelected, replace, + }).then(() => { + setAlertContent('Data has been successfully uploaded.'); + setAlertSeverity('success'); + setAlert(true); + }).catch((error) => { + showError(error); }); - }).catch((error) => { - console.error(error); - const { response: errorResponse } = error; - console.log(errorResponse.data); }).finally(() => { setUploadFiles([]); }); }).catch((error) => { - console.error(error); + const { response: errorResponse } = error; + showError(error); }); }); @@ -77,6 +91,8 @@ const UploadContainer = () => { return (
+ {alert && alertContent && alertSeverity + && {alertContent}} {open && ( Date: Wed, 19 Jan 2022 13:45:32 -0800 Subject: [PATCH 014/152] Made secret key as an environment variable (#86) --- django/api/settings.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/django/api/settings.py b/django/api/settings.py index 2ab6c0c7..352956df 100644 --- a/django/api/settings.py +++ b/django/api/settings.py @@ -21,7 +21,11 @@ # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! -SECRET_KEY = '#8+m(ba_(ra1=lo+-7jyp#x49l27guk*i4)w@xp7j9b9umkwh^' +SECRET_KEY = os.getenv( + 'DJANGO_SECRET_KEY', + '#8+m(ba_(ra1=lo+-7jyp#x49l27guk*i4)w@xp7j9b9umkwh^' +) + # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True From f59d78533c6dc83e287f879434ddf65331494803 Mon Sep 17 00:00:00 2001 From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Date: Thu, 18 Aug 2022 15:35:04 -0700 Subject: [PATCH 015/152] Rebuild metabase on prod 0.2.0 (#88) * update for metabase * update metabase build contact * increate build resource * redeploy on prod --- .../templates/metabase-postgresql/Dockerfile | 2 +- .../metabase-postgresql/metabase-bc.yaml | 14 +- .../metabase-dc-redeploy-prod.yaml | 132 ++++++++++++++++++ .../metabase-postgresql/metabase-dc.yaml | 2 +- 4 files changed, 144 insertions(+), 6 deletions(-) create mode 100644 openshift/templates/metabase-postgresql/metabase-dc-redeploy-prod.yaml diff --git a/openshift/templates/metabase-postgresql/Dockerfile b/openshift/templates/metabase-postgresql/Dockerfile index bef189c5..f903836f 100644 --- a/openshift/templates/metabase-postgresql/Dockerfile +++ b/openshift/templates/metabase-postgresql/Dockerfile @@ -1,2 +1,2 @@ -FROM metabase/metabase:v0.41.1 +FROM metabase/metabase:v0.44.0 EXPOSE 3000 \ No newline at end of file diff --git a/openshift/templates/metabase-postgresql/metabase-bc.yaml b/openshift/templates/metabase-postgresql/metabase-bc.yaml index 7dfdc696..540ae7dd 100644 --- a/openshift/templates/metabase-postgresql/metabase-bc.yaml +++ b/openshift/templates/metabase-postgresql/metabase-bc.yaml @@ -25,15 +25,21 @@ objects: output: to: kind: ImageStreamTag - name: metabase:v0.41.5 + name: metabase:v0.44.0 namespace: 30b186-tools postCommit: {} - resources: {} + resources: + limits: + cpu: '2' + memory: 2Gi + requests: + cpu: 500m + memory: 200Mi runPolicy: Serial source: - contextDir: openshift/templates/metabase + contextDir: openshift/templates/metabase-postgresql git: - ref: release-0.2.0 + ref: rebuid-metabase-on-prod-0.2.0 uri: https://github.com/bcgov/cthub.git type: Git strategy: diff --git a/openshift/templates/metabase-postgresql/metabase-dc-redeploy-prod.yaml b/openshift/templates/metabase-postgresql/metabase-dc-redeploy-prod.yaml new file mode 100644 index 00000000..c9e5bbb5 --- /dev/null +++ b/openshift/templates/metabase-postgresql/metabase-dc-redeploy-prod.yaml @@ -0,0 +1,132 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: metabase-dc +parameters: + - name: CPU_REQUEST + value: 130m + displayName: Requested CPU + description: Requested CPU + required: true + - name: CPU_LIMIT + value: 260m + displayName: CPU upper limit + description: CPU upper limit + required: true + - name: MEMORY_REQUEST + value: 820Mi + displayName: Requested memory + description: Requested memory + required: true + - name: MEMORY_LIMIT + value: 1640Mi + displayName: Memory upper limit + description: Memory upper limit + required: true + - name: REPLICAS + description: The number of replicas to use + displayName: REPLICAS + value: "1" +objects: +- apiVersion: apps.openshift.io/v1 + kind: DeploymentConfig + metadata: + annotations: + openshift.io/generated-by: OpenShiftWebConsole + creationTimestamp: null + labels: + app: metabase + name: metabase + spec: + replicas: ${{REPLICAS}} + revisionHistoryLimit: 10 + selector: + app: metabase + deploymentconfig: metabase + strategy: + activeDeadlineSeconds: 600 + recreateParams: + timeoutSeconds: 300 + type: Recreate + template: + metadata: + annotations: + openshift.io/generated-by: OpenShiftWebConsole + creationTimestamp: null + labels: + app: metabase + deploymentconfig: metabase + spec: + containers: + - name: metabase + image: + imagePullPolicy: Always + env: + - name: MB_DB_TYPE + value: postgres + - name: MB_DB_DBNAME + valueFrom: + secretKeyRef: + key: metabase-db-name + name: patroni-creds-prod + - name: MB_DB_PORT + value: '5432' + - name: MB_DB_USER + valueFrom: + secretKeyRef: + key: metabase-db-username + name: patroni-creds-prod + - name: MB_DB_PASS + valueFrom: + secretKeyRef: + key: metabase-db-password + name: patroni-creds-prod + - name: MB_DB_HOST + value: patroni-master-prod-metabase + resources: + limits: + cpu: ${CPU_LIMIT} + memory: ${MEMORY_LIMIT} + requests: + cpu: ${CPU_REQUEST} + memory: ${MEMORY_REQUEST} + readinessProbe: + httpGet: + path: /api/health + port: 3000 + scheme: HTTP + initialDelaySeconds: 180 + timeoutSeconds: 5 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 10 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: metabase + serviceAccountName: metabase + terminationGracePeriodSeconds: 30 + test: false + triggers: + - imageChangeParams: + automatic: true + containerNames: + - metabase + from: + kind: ImageStreamTag + name: metabase:v0.44.0 + namespace: 30b186-tools + lastTriggeredImage: + type: ImageChange + - type: ConfigChange + status: + availableReplicas: 0 + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + unavailableReplicas: 0 + updatedReplicas: 0 diff --git a/openshift/templates/metabase-postgresql/metabase-dc.yaml b/openshift/templates/metabase-postgresql/metabase-dc.yaml index 6e287b0c..9c101cd1 100644 --- a/openshift/templates/metabase-postgresql/metabase-dc.yaml +++ b/openshift/templates/metabase-postgresql/metabase-dc.yaml @@ -195,7 +195,7 @@ objects: - metabase from: kind: ImageStreamTag - name: metabase:v0.41.1 + name: metabase:v0.44.0 namespace: 30b186-tools lastTriggeredImage: type: ImageChange From 7c614b17063e35af6895fde4ec4b9d10261c27c4 Mon Sep 17 00:00:00 2001 From: kuanfandevops Date: Tue, 13 Dec 2022 16:49:23 -0800 Subject: [PATCH 016/152] update for metabase --- openshift/templates/metabase-postgresql/README.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/openshift/templates/metabase-postgresql/README.md b/openshift/templates/metabase-postgresql/README.md index d469dac4..93a1229a 100644 --- a/openshift/templates/metabase-postgresql/README.md +++ b/openshift/templates/metabase-postgresql/README.md @@ -6,18 +6,20 @@ ## Metabase to TFRS and ZEVA database access The network policy allow-patroni-accepts-cthub-metabase-test in both TFRS and ZEVA open the access from the Metabase in CTHUB. -## Create read only user metabaseuser in both TFRS and ZEVA for Metabase connection from CTHUB +## Create read only user metabaseuser in TFRS, ZEVA and ITVR for Metabase connection from CTHUB ```//login zeva database as postgres user, psql zeva CREATE USER metabaseuser WITH PASSWORD 'xxxxxx'; -GRANT CONNECT ON DATABASE zeva TO metabaseuser; +GRANT CONNECT ON DATABASE [tfrs/zeva/itvr] TO metabaseuser; GRANT USAGE ON SCHEMA public TO metabaseuser; GRANT SELECT ON ALL TABLES IN SCHEMA public TO metabaseuser; ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO metabaseuser; -// verify permissions are granted. select * from information_schema.role_table_grants where grantee='metabaseuser'; +// verify permissions are granted. +// select * from information_schema.role_table_grants where grantee='metabaseuser'; ``` Notes: replace zeva to be tfrs when ron on TFRS project Login to metabase pod and test the connection to tfrs and zeva database +Remember store the metabaseuser password in a secret +When create database connection in Metabase console, use the patroni master service otherwise the tables will not be shown ``` -curl patroni-master-[env].e52f12-[env].svc.cluster.local:5432 -curl patroni-master-[env].0ab226-[env].svc.cluster.local:5432 +curl [patroni master service name].e52f12-[env].svc.cluster.local:5432 ``` \ No newline at end of file From a7ccaf8e7aad74cecb3ed1f08a94831ee172b3f8 Mon Sep 17 00:00:00 2001 From: kcabhar <117694594+kcabhar@users.noreply.github.com> Date: Wed, 4 Jan 2023 13:57:00 -0800 Subject: [PATCH 017/152] Update issue templates (#89) --- .github/ISSUE_TEMPLATE/bug.md | 28 ++++++++++++++++++++++++++++ .github/ISSUE_TEMPLATE/spike.md | 26 ++++++++++++++++++++++++++ .github/ISSUE_TEMPLATE/task.md | 23 +++++++++++++++++++++++ .github/ISSUE_TEMPLATE/user-story.md | 28 ++++++++++++++++++++++++++++ 4 files changed, 105 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug.md create mode 100644 .github/ISSUE_TEMPLATE/spike.md create mode 100644 .github/ISSUE_TEMPLATE/task.md create mode 100644 .github/ISSUE_TEMPLATE/user-story.md diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md new file mode 100644 index 00000000..afa680ef --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -0,0 +1,28 @@ +--- +name: Bug +about: An undesirable behaviour that needs correction +title: Bug +labels: '' +assignees: '' + +--- + +**Describe the Bug** +A clear and concise description of what the bug is. + +**Expected Behaviour** +A clear and concise description of what you expected to happen. + +**Actual Behaviour** +A clear and concise description of what actually happens. + +**Implications** +A clear and concise description of any implications. + +**Steps To Reproduce** +Steps to reproduce the behaviour: +User/Role: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error diff --git a/.github/ISSUE_TEMPLATE/spike.md b/.github/ISSUE_TEMPLATE/spike.md new file mode 100644 index 00000000..f5917b39 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/spike.md @@ -0,0 +1,26 @@ +--- +name: Spike +about: Research options prior to development work +title: Spike +labels: '' +assignees: '' + +--- + +**Problem Description** +**In order to** (achieve some goal), (a system or persona) **needs to** (some action). + +**Solution Needs** +- Enter the non-negotiables of the solution (what are the needs vs. what are the wants) + +**Timebox** +- How much effort are we committing to this research? + +**Outcome** +Details describing the outcome of the research +- Was it successful? What direction should the work go? +- Was it unsuccessful? Discuss next steps with team + +**Additional Context** +- enter text here +- enter text here diff --git a/.github/ISSUE_TEMPLATE/task.md b/.github/ISSUE_TEMPLATE/task.md new file mode 100644 index 00000000..e0f7fcd1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/task.md @@ -0,0 +1,23 @@ +--- +name: Task +about: Any work that does not directly impact the user +title: Task +labels: '' +assignees: '' + +--- + +**Describe the task** +A clear and concise description of what the task is. + +**Purpose** +The reason why this task is needed and/or what value it adds. + +**Acceptance Criteria** +- [ ] first +- [ ] second +- [ ] third + +**Additional context** +- Add any other context about the task here. +- Or here diff --git a/.github/ISSUE_TEMPLATE/user-story.md b/.github/ISSUE_TEMPLATE/user-story.md new file mode 100644 index 00000000..fa1ceaf5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/user-story.md @@ -0,0 +1,28 @@ +--- +name: User Story +about: This template is to be used when describing a feature from the user's perspective +title: User Story +labels: '' +assignees: '' + +--- + +**Title:** + +**Description:** + +**Wireframe:** + +**Purpose and benefit to user:** + +**Acceptance Criteria:** + +[] Given I am a , When I am , then +[] Given I am a , When I am , then + +**Development Checklist:** +[] A +[] B +[] C + +**Notes:** From 11cabc71c302c0b292e3beaadb4ddde99a163d46 Mon Sep 17 00:00:00 2001 From: kcabhar <117694594+kcabhar@users.noreply.github.com> Date: Wed, 18 Jan 2023 11:13:57 -0800 Subject: [PATCH 018/152] Update issue templates (#90) Co-authored-by: Alex Zorkin <47334977+AlexZorkin@users.noreply.github.com> --- .github/ISSUE_TEMPLATE/user-story.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/user-story.md b/.github/ISSUE_TEMPLATE/user-story.md index fa1ceaf5..98ef72cc 100644 --- a/.github/ISSUE_TEMPLATE/user-story.md +++ b/.github/ISSUE_TEMPLATE/user-story.md @@ -17,12 +17,12 @@ assignees: '' **Acceptance Criteria:** -[] Given I am a , When I am , then -[] Given I am a , When I am , then +- [ ] Given I am a , When I am , then +- [ ] Given I am a , When I am , then **Development Checklist:** -[] A -[] B -[] C +- [ ] A +- [ ] B +- [ ] C **Notes:** From 7c18b424109a2f75410a74b567ce6eeb8e5317d9 Mon Sep 17 00:00:00 2001 From: kuanfandevops Date: Fri, 20 Jan 2023 09:10:14 -0800 Subject: [PATCH 019/152] update logoutHostName --- .pipeline/lib/config.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pipeline/lib/config.js b/.pipeline/lib/config.js index 97f84876..21b48e5d 100644 --- a/.pipeline/lib/config.js +++ b/.pipeline/lib/config.js @@ -37,7 +37,7 @@ const phases = { dev: {namespace:'30b186-dev', transient:true, name: `${name}`, ssoSuffix:'-dev', ssoName:'dev.oidc.gov.bc.ca', phase: 'dev' , changeId:`${changeId}`, suffix: `-dev-${changeId}`, instance: `${name}-dev-${changeId}` , version:`${version}-${changeId}`, tag:`dev-${version}-${changeId}`, - host: `cthub-dev-${changeId}.${ocpName}.gov.bc.ca`, djangoDebug: 'True', logoutHostName: 'logontest.gov.bc.ca', + host: `cthub-dev-${changeId}.${ocpName}.gov.bc.ca`, djangoDebug: 'True', logoutHostName: 'logontest7.gov.bc.ca', metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendHost: `cthub-backend-dev-${changeId}.${ocpName}.gov.bc.ca`, backendReplicas: 1, @@ -49,7 +49,7 @@ const phases = { test: {namespace:'30b186-test', name: `${name}`, ssoSuffix:'-test', ssoName:'test.oidc.gov.bc.ca', phase: 'test' , changeId:`${changeId}`, suffix: `-test`, instance: `${name}-test`, version:`${version}`, tag:`test-${version}`, - host: `cthub-test.${ocpName}.gov.bc.ca`, djangoDebug: 'False', logoutHostName: 'logontest.gov.bc.ca', + host: `cthub-test.${ocpName}.gov.bc.ca`, djangoDebug: 'False', logoutHostName: 'logontest7.gov.bc.ca', metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, frontendMinReplicas: 1, frontendMaxReplicas: 3, backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendReplicas: 1, backendMinReplicas: 1, backendMaxReplicas: 3, backendHost: `cthub-backend-test.${ocpName}.gov.bc.ca`, From d04936b625bdc8dc249c4a0939f83a3e425c6672 Mon Sep 17 00:00:00 2001 From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Date: Mon, 23 Jan 2023 15:23:04 -0800 Subject: [PATCH 020/152] release based pipeline (#91) --- .github/workflows/build-release.yaml | 150 +++++++++++ .pipeline/lib/config.js | 8 +- charts/cthub-spilo/.helmignore | 23 ++ charts/cthub-spilo/Chart.lock | 6 + charts/cthub-spilo/Chart.yaml | 31 +++ charts/cthub-spilo/Readme.md | 82 ++++++ charts/cthub-spilo/charts/spilo-0.2.0.tgz | Bin 0 -> 381815 bytes charts/cthub-spilo/templates/_helpers.tpl | 62 +++++ charts/cthub-spilo/values-dev.yaml | 49 ++++ charts/cthub-spilo/values-prod.yaml | 50 ++++ charts/cthub-spilo/values-test.yaml | 50 ++++ charts/spilo/Chart.yaml | 9 + charts/spilo/docs/restore.md | 87 ++++++ charts/spilo/docs/s3.png | Bin 0 -> 377834 bytes charts/spilo/templates/_helpers.tpl | 63 +++++ charts/spilo/templates/archive-pvc.yaml | 14 + charts/spilo/templates/networkpolicy.yaml | 14 + .../spilo/templates/poddisruptionbudget.yaml | 12 + charts/spilo/templates/role.yaml | 44 +++ charts/spilo/templates/rolebinding.yaml | 14 + charts/spilo/templates/secret.yaml | 30 +++ charts/spilo/templates/serviceaccount.yaml | 7 + charts/spilo/templates/services.yaml | 29 ++ charts/spilo/templates/statefulset.yaml | 251 ++++++++++++++++++ charts/spilo/values.yaml | 112 ++++++++ .../templates/metabase-postgresql/README.md | 6 +- .../metabase-dc-spilo.yaml | 203 ++++++++++++++ .../patroni-2.1.1/templates/deploy.yaml | 10 - .../templates/secret-template.yaml | 2 - openshift/templates/spilo/s3-secret.yaml | 22 ++ .../templates/spilo/tfrs-patroni-admin.yaml | 30 +++ .../templates/spilo/tfrs-patroni-app.yaml | 26 ++ 32 files changed, 1479 insertions(+), 17 deletions(-) create mode 100644 .github/workflows/build-release.yaml create mode 100644 charts/cthub-spilo/.helmignore create mode 100644 charts/cthub-spilo/Chart.lock create mode 100644 charts/cthub-spilo/Chart.yaml create mode 100644 charts/cthub-spilo/Readme.md create mode 100644 charts/cthub-spilo/charts/spilo-0.2.0.tgz create mode 100644 charts/cthub-spilo/templates/_helpers.tpl create mode 100644 charts/cthub-spilo/values-dev.yaml create mode 100644 charts/cthub-spilo/values-prod.yaml create mode 100644 charts/cthub-spilo/values-test.yaml create mode 100644 charts/spilo/Chart.yaml create mode 100644 charts/spilo/docs/restore.md create mode 100644 charts/spilo/docs/s3.png create mode 100644 charts/spilo/templates/_helpers.tpl create mode 100644 charts/spilo/templates/archive-pvc.yaml create mode 100644 charts/spilo/templates/networkpolicy.yaml create mode 100644 charts/spilo/templates/poddisruptionbudget.yaml create mode 100644 charts/spilo/templates/role.yaml create mode 100644 charts/spilo/templates/rolebinding.yaml create mode 100644 charts/spilo/templates/secret.yaml create mode 100644 charts/spilo/templates/serviceaccount.yaml create mode 100644 charts/spilo/templates/services.yaml create mode 100644 charts/spilo/templates/statefulset.yaml create mode 100644 charts/spilo/values.yaml create mode 100644 openshift/templates/metabase-postgresql/metabase-dc-spilo.yaml create mode 100644 openshift/templates/spilo/s3-secret.yaml create mode 100644 openshift/templates/spilo/tfrs-patroni-admin.yaml create mode 100644 openshift/templates/spilo/tfrs-patroni-app.yaml diff --git a/.github/workflows/build-release.yaml b/.github/workflows/build-release.yaml new file mode 100644 index 00000000..b87084cd --- /dev/null +++ b/.github/workflows/build-release.yaml @@ -0,0 +1,150 @@ +## For each release, the value of workflow name, branches, PR_NUMBER and RELEASE_NAME need to be adjusted accordingly +## Also change the .pipelin/lib/config.js version number +name: CTHUB v0.2.0 + +on: + push: + branches: [ release-0.2.0 ] + workflow_dispatch: + workflow_call: + +env: + ## The pull request number of the Tracking pull request to merge the release branch to main + PR_NUMBER: 73 + RELEASE_NAME: release-0.2.0 + +jobs: + + ## This is the CI job + build: + + name: Build CTHUB on Openshift + runs-on: ubuntu-latest + timeout-minutes: 60 + + steps: + + ## it will checkout to /home/runner/work/cthub/cthub + - name: Check out repository + uses: actions/checkout@v3 + + # open it when cthub updated the python packages + #- name: Run django tests + # uses: kuanfandevops/django-test-action@cthub-django-test + # with: + # settings-dir-path: "backend/cthub" + # requirements-file: "backend/requirements.txt" + # managepy-dir: backend + + ## Log in to Openshift with a token of service account + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.2 + with: + openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools + + ## Run build on Openshift + - name: Run build + run: | + cd .pipeline + npm install + npm run build -- --pr=${{ env.PR_NUMBER }} --env=build + + deploy-on-dev: + + name: Deploy CTHUB on Dev Environment + runs-on: ubuntu-latest + timeout-minutes: 60 + needs: build + + steps: + + - name: Check out repository + uses: actions/checkout@v3 + + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.2 + with: + openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools + + - name: Run BCDK deployment on CTHUB Dev environment + run: | + cd .pipeline + echo "Deploying CTHUB ${{ env.RELEASE_NAME }} on Dev" + npm install + npm run deploy -- --pr=${{ env.PR_NUMBER }} --env=dev + + deploy-on-test: + + name: Deploy CTHUB on Test Environment + runs-on: ubuntu-latest + timeout-minutes: 60 + needs: deploy-on-dev + + steps: + + - name: Check out repository + uses: actions/checkout@v3 + + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.2 + with: + openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools + + - name: Ask for approval for CTHUB Test deployment + uses: trstringer/manual-approval@v1.6.0 + with: + secret: ${{ github.TOKEN }} + approvers: AlexZorkin,emi-hi,tim738745,vibhiquartech,kuanfandevops + minimum-approvals: 1 + issue-title: "CTHUB ${{ env.RELEASE_NAME }} Test Deployment" + + - name: Run BCDK deployment on CTHUB Test environment + run: | + cd .pipeline + echo "Deploying CTHUB ${{ env.RELEASE_NAME }} on Test" + npm install + npm run deploy -- --pr=${{ env.PR_NUMBER }} --env=test + + deploy-on-prod: + + name: Deploy CTHUB on Prod Environment + runs-on: ubuntu-latest + timeout-minutes: 60 + needs: deploy-on-test + + steps: + + - name: Check out repository + uses: actions/checkout@v3 + + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.2 + with: + openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools + + - name: Ask for approval for CTHUB Prod deployment + uses: trstringer/manual-approval@v1.6.0 + with: + secret: ${{ github.TOKEN }} + approvers: AlexZorkin,kuanfandevops + minimum-approvals: 2 + issue-title: "CTHUB ${{ env.RELEASE_NAME }} Prod Deployment" + + - name: Run BCDK deployment on CTHUB Prod environment + run: | + cd .pipeline + echo "Deploying CTHUB ${{ env.RELEASE_NAME }} on Prod" + npm install + npm run deploy -- --pr=${{ env.PR_NUMBER }} --env=prod diff --git a/.pipeline/lib/config.js b/.pipeline/lib/config.js index 21b48e5d..88210cb4 100644 --- a/.pipeline/lib/config.js +++ b/.pipeline/lib/config.js @@ -1,7 +1,7 @@ 'use strict'; const options= require('@bcgov/pipeline-cli').Util.parseArguments() const changeId = options.pr //aka pull-request -const version = '1.0.0' +const version = '0.2.0' const name = 'cthub' const ocpName = 'apps.silver.devops' @@ -35,9 +35,9 @@ const phases = { version:`${version}-${changeId}`, tag:`build-${version}-${changeId}`, ocpName: `${ocpName}`}, dev: {namespace:'30b186-dev', transient:true, name: `${name}`, ssoSuffix:'-dev', - ssoName:'dev.oidc.gov.bc.ca', phase: 'dev' , changeId:`${changeId}`, suffix: `-dev-${changeId}`, - instance: `${name}-dev-${changeId}` , version:`${version}-${changeId}`, tag:`dev-${version}-${changeId}`, - host: `cthub-dev-${changeId}.${ocpName}.gov.bc.ca`, djangoDebug: 'True', logoutHostName: 'logontest7.gov.bc.ca', + ssoName:'dev.oidc.gov.bc.ca', phase: 'dev' , changeId:`${changeId}`, suffix: '-dev', + instance: `${name}-dev` , version:`${version}`, tag:`dev-${version}`, + host: `cthub-dev.${ocpName}.gov.bc.ca`, djangoDebug: 'True', logoutHostName: 'logontest7.gov.bc.ca', metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendHost: `cthub-backend-dev-${changeId}.${ocpName}.gov.bc.ca`, backendReplicas: 1, diff --git a/charts/cthub-spilo/.helmignore b/charts/cthub-spilo/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/charts/cthub-spilo/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/cthub-spilo/Chart.lock b/charts/cthub-spilo/Chart.lock new file mode 100644 index 00000000..97c1cd25 --- /dev/null +++ b/charts/cthub-spilo/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: spilo + repository: file://../spilo + version: 0.2.0 +digest: sha256:7511538016e34905d07c80deed4ac95eadf6e208c2f1e7036eebfcef7b296897 +generated: "2023-01-20T11:30:53.758009-08:00" diff --git a/charts/cthub-spilo/Chart.yaml b/charts/cthub-spilo/Chart.yaml new file mode 100644 index 00000000..a7329f9a --- /dev/null +++ b/charts/cthub-spilo/Chart.yaml @@ -0,0 +1,31 @@ +apiVersion: v2 +name: cthub-spilo +description: A Helm chart for setting up splio for tfrs project on Openshift + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 1.0.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.2.0" + +#repository: "https://bcgov.github.io/spilo-chart" + +dependencies: + - name: spilo + version: "0.2.0" + repository: "file://../spilo" \ No newline at end of file diff --git a/charts/cthub-spilo/Readme.md b/charts/cthub-spilo/Readme.md new file mode 100644 index 00000000..71d31279 --- /dev/null +++ b/charts/cthub-spilo/Readme.md @@ -0,0 +1,82 @@ +## Before running Helm +* Create secret cthub-patroni-admin + * Create the secret by using cthub/openshift/templates/spilo/cthub-patroni-admin.yaml, the three passwords are generated randomly + +* Create secret cthub-patroni-app + * Create the secret by using cthub/openshift-v4/templates/spilo/cthub-patroni-app.yaml, the three password fields must be in sync with the existing secret patroni-dev + * It contains: app-db-name, app-db-password, app-db-username, metabaseuser-name, metabaseuser-password + * The replication- and superuser- are not needed + * If this secret is aleady existed, please verify the password fields + +* Create Object Storage secret for database continuous backup, cthub-object-storage + * Create the secret by using cthub/openshift-v4/templates/object-storage/object-storage-secret.yaml + * The secret should have been created, verify it by using CyberDuck + +* Create secret cthub-db-backup-s3 + * It includes AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_ENDPOINT + * The values are in sync with secret cthub-object-storage + +* Verify values-dev.yaml. Create the bucket on object storage if needed + +* Add new KNPs templates/knp/knp-env-pr-new-cthub-spilo.yaml + * oc process -f ./knp-env-pr-new-cthub-spilo.yaml ENVIRONMENT=test | oc apply -f - -n 30b186-dev + +## Heml command +helm install -n 30b186-dev -f ./values-dev.yaml cthub-spilo . +helm uninstall -n 30b186-dev cthub-spilo + +## Migrate Postgresql 10 on Patroni to 14 on Spilo container + +### Bring down the cthub application and route the frontend to maintenance mode + +### Run a final backup on backup container + +### Create cthub database user and database +* Login to the cthub-spilo leader pod +* If the username contains upper case letters, should be double quoted + * create user for cthub database, the username should be the same on v10 otherwise the restore may encounter issue + * create user [username] with password '[password]' + * The password can be found in secret cthub-patroni-app + * create cthub database + * create database cthub owner [username] ENCODING 'UTF8' LC_COLLATE = 'en_US.UTF-8' LC_CTYPE = 'en_US.UTF-8' +### Reset postgresql logging +* login cthub-spilo leader pod, run the following psql to only keep 24 hours log files, otherwise they take too much space + ALTER SYSTEM SET log_filename='postgresql-%H.log'; + ALTER SYSTEM SET log_connections='off'; + ALTER SYSTEM SET log_disconnections='off'; + ALTER SYSTEM SET log_checkpoints='off'; + select pg_reload_conf(); +### Create metabase user +* login cthub-spilo leader pod + CREATE USER metabaseuser WITH PASSWORD 'xxxxxx'; + GRANT CONNECT ON DATABASE cthub TO metabaseuser; + GRANT USAGE ON SCHEMA public TO metabaseuser; + GRANT SELECT ON ALL TABLES IN SCHEMA public TO metabaseuser; + ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO metabaseuser; + verify permissions are granted: select * from information_schema.role_table_grants where grantee='metabaseuser'; + +## Backup the existing v10 database and restore to v14 cluster +* Make sure the application is stopped +* Login to patroni-dev leader pod + * make an empty dir /home/postgres/migration and cd into it + * backup cthub database: pg_dump cthub > cthub.sql +* Restore cthub database + * psql cthub < ./cthub.sql >> ./restore.log 2>&1 + * verify the restore.log when complete + +* Point the applications to v14 cluster, update the enviuronment variables for + * backend: DATABASE_SERVICE_NAME, POSTGRESQL_SERVICE_HOST + * celery: DATABASE_SERVICE_NAME + * scan-handler: DATABASE_SERVICE_NAME +* Bring down the v10 cluster +* Bring down the maintenance page +* Bring up the cthub appliation +* Update patroni backup to only backup minio data +* Update metabase connection from CTHUB +* Update dbServiceName to be cthub-spilo in .pipeline/lib/config.js + +## Notes for uninstalling cthub-spilo when needed +* After the helm uninstall command, remember to remove the followings: + * The two configmaps: cthub-spilo-config, cthub-spilo-leader + * The PVCs storage-volume-cthub-spilo-* + * The backup bucket in object storage diff --git a/charts/cthub-spilo/charts/spilo-0.2.0.tgz b/charts/cthub-spilo/charts/spilo-0.2.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..a275b1cc3295832fb8f1b534f2de5249458c3e14 GIT binary patch literal 381815 zcmV)1K+V4&iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMX9JXPDNlBBVq!O+=+AWLEq?d!K{jpt|>azrDZz``&#&H~Z|h*0Y}VtY=!! zTKm~dI*Clfn)!eX7A6p+kOlq=m#V6&s+OiE@>f+=mH)4*rmC8Nnx=-9`ZBF$+RL;A zRMpfqG?obfs{ajS_%Ak-1u_Iw|Kq;+2lxMz3#5})LJTH}M%4xU)KDOuKJ-%^qozXF zL=hn-fkC3PkY@@jNZvl=KmhauNo3HI3;_@sqWgeU7T^Ohh`PWE6TpGSWO*|nyrV5Z zAhVe)h=EZ+`OqkkF5tsr(V4nftT&0}!}i1wXcVj`!JFoXMc7b<0J9;A3c^*9CnzS3 z%^*NbU6jgLZG%8ENF~xZt;eo18ob}&_*60K7**8&^aT75UmW@O1IcWN`QK6iKau}s z8tU5o@~@?;{jdE0j*9`&$s_`3Mq^W1x_|}>MWTS-kS+=UWC3gV7Vrm|fG3+oW&vId z8U-o%sVD*f1ESNIBo>Vks0%P4ZxWNm2*l9n5S1g*7^FUiNW+2z5|JyPYMKaiBIE_K z$t)(oq5&))2tdRZ7}iWE0APXMx zOqML2qvn_!$v=k1kuY9Z7WONxopAPc7WU3CBotv7QJC;-CS(ynVzNk7Z#+a`KrCIr z3nVik6aYXYpQ8W(CYU`)fdHBpLaf0W028SK5NK2uNTR||5Lym*g*4{q+AuC48z}G> z;V{{Bh{0w;3_x*^4oUzWWHS9}3?c$;sD=#`_;-2$i4+obBuM1Z-vq~GfmEVrAb{XJ z5>ZYG7cR`Y@U&5M0TzP|Aq?s40|6Wj33x(enm+)N;eP{ZY?wuvbcjIm3LFB;1iWaB zafzhN8HF+cQi&swVq#D{MAHRW5QyRqSr;JC$nY<2eO-V~qJr=NykuPsv#IIJ0bJh94{dyl7-1#DJNZ0U;wM)4Z90KZ#6+N6V%Y;n8v+ zA;h$$5kQWY><^O1P|aL|v3x)l;6o!5nfw}g(BN_UM<5KB;zy2y4%#>L}s|Zv9J@59aLpl zx;GJIfp8^|N~N)ou`>}30RSL@05O?%G$O<$whG_`fkamZi3Q=P1PFyE#u!=| zs%fH7bOz0nGkIi^A4G+i+=(9QX3%G0Kp>GcMmZM^03cCGFoT;zWH1m95olB*Qy0)u zMasND5}D0_oP8J&6P6j6kSG)jVo*p_gyF2=M9_M9~G9u*oBlA=ttj zP++nUd>~z52{tqo1=m7Z1khpi3F}QNk6A-}7z~PAPX%7Sk*E-WMmP%t*F*y4Qb4S{KHMZ-87Lz;OEJFJ(exh`^#T zY`GvY01s_BJYg~vGlR=OIvwLXq}4Da8g?iY9vnPIlMrAdo5mV3@-d-MKq}}B5mh_` zIe-QQ4bNqlk?lFe!gG=T3Ay;ks4^x39M5<(g}BTyHr6C6yyzi}*^q~`_^|(tJ86;6qd|Qtchy@Zs7C5}t4Gsnd8FOsORN-i9@38?0F^`~LR1!@3F8L)Xu3a%RoH>lKx7Te6RZG6tnhilY#jABE_Gm$ zVv>Rog*Y-CV?Q|DpETxa@=xg{e);DH7wAZE;UCffKa+p0W&Hd9TI&DG|L?i@oUswH zr9rVD7G!(g;t>MOZAhYeV|;Zu&c)voX%dyi09ClFrvFT^=``Y?pTUt-3dkb(*p3Tu zv_~}zSc@aIB&s)q8_H5a_Cp6X#;Csj!~WD%e~;(EFaLBJkvJ4Q^hAP(|FIVMx%_MK z$-lOys^-7)|2r-|`5z;8%&P@>C7xp z2!>DIks^@I@}V(ELCD?P7OVfOIBO<_9r6xdxmwP~_Gd*>F_>$RsA`h7Lv=}MQ;7M4`l%&M21)p zKw=_`0TKfu0yHW)5ILTpz&$`jV3E0-jS5I>PMe=2%nd({hwe|?LsTN2MxwHQWxQhy zQ+DLWDtF(W&LH`b$dEV0iGdI8sd9$x?*mZ*HWRKs)cZ(!{u=3ifqcWa1R5Duog^Cd z=QN?yh@;5ySLXbGYYb>C{qISl5@ARFcg26S)HFxMf3*MA|G(w>$MwG{hk1UV8DO$K zH$VgySEf0L;70`Tc}_Zxfx}qa6bLr3C^$i0NQj85Tz)_W6aerT!CxQq|FB}=mw(P7 zrGM4~{E7Ult7)n8`+v){{(b-Bw_IHLr_xx%!N=hfX_!%Gkq22;Zs@401NYo3plbjq zGC(TPgiQAV?b#GSLtP070X(%dAu54?H%SgT+4?uza?ip3-PS`V>EMRQ&gzf%7CF0C zLoJc+$TXTSn+~A;)X)H$LmIw4W+ojZKqGon;@P1agf%`n4FFin1q{~2z{(Ur51uks z8ESwYhonDgk5opFOVXe9&Z&;(-xlQ$>JO@87{>2239LXmqzmBa;6^rt8tjqJ((!UR z=fpRsyU_$jx*lVKMpoyZ)P}2$oCiLrgGaa#n0!P>st#>c{|KG2MlpQce2DeOLvgI; zoCEjcH04Em%)YrN*+(|#v2DY*|I-@p-;4JeZT}H(hw& z?f>t%e(5j{-%5Z-ua9uEe@VZO>u&!oP5-}jS^nFuapeCWeHZj+@n7|2{PMp{P5s~f z-`{fmNUne58lJm#!*SfqIB$(xJ0N?wjKQTn2LqnhF5zW8T&@c&)6`H$ezHJ@H^g#4 z9`e^@&{#AAjjRhen>p}q()`6e8BKtDgpl*nDuYIbbOFw*Stu11l^<%#zqKL8fIuRR zN)G%#N?Pt4%Kz&3|H?H=|Fb|A*T|(a ztSceH&_Tpe&h=mQ!4XaWipE!vMLjdq9k#6O$^#wBdc1VWWDgEJ2_!~!HJn|77c+?JH+;lva3Bs=4kmcK zE6&N>1MlKs;pBq1a2l!vaMA}@4xJYpyg0!%8z_*w&%k^AAYL_QRCZ)*>Zl{7JncuO zkr;ZvVI%}!X#8*w9{}E552FE%3}}ssjgQC=$LWH%@G!&KTUuM$ znK+Cw-f<{};4&t4evuD`Wp1T~6W$s(N>TD1jvSpTU@WTfOWb%bn~a4CObJ<dUEC zD8MvO7`K6ksfn45i^E@U=VXC*#yO2aM!tqW9Ti`Cnz2U5!O;V6w!*^P#dhqe`}GdE zr01hNpGo{F+9NXJ`~Yct7dsDUoQIQzv&m?K`V-7Y_VHKn{TFY5O+1P^}uU* zn7WwRSUCSxT5DjqNn(DW7RE|xGolm3QUT+eDt_fU+S-hT7|(|D*gAcDym&DmE9uVv zE7*^Z8}A5sQUm?Jg7_$u{!`ZWCNRh2Z7clW<85yO^B0)g)x_2U?!v*z!qVDpbQj)C zT`cxj`w0#G)vBf=GA)gTm=z9ZWozMK;^1IwZD!(ZjkEVKbFwhEuy?jL8OvEfV<8p+ z%j0(O-eWc}X;kz-Hd}N*!Z@>Kt8gMys+VW{9rPr!-oD+S=k=J?wFK{8*0cNQaY;FCeNPKS3EF)f)oje4#*PK+X>&1G)y|Rp*He zAq|G+Cd69-54(&$`4tOYR$k}v)kr=;pXrGF6 zjk%)*Uw(*K+QCxE9395vs%jNZEgpEhdGLN!+|fMkhqLPas5Q6Nh_))ceIv$dFvsr5 zv2t@fQQn)zd1n|0^whE+>dA2{eg+K@};a!28G`ZVbSO`GR>Ql;Tk!lB=X1C z2H9~OozP(L`VTfv)wPD{&(ZLsHU-DDWK5ov=fl(a_d@r)If4ID@O}^g9~Ps-?tDG` za^QUwi~e;J_a7R#Uo~%|=VLso{zU}FB*9oT;eBUilqWTcEI;w^APSuoXij2q^B4c9 z-~X=9#7}=dJoyKTq*CIY1p@rw7a31wqzg7;!2GuChXnQ;#>IK?DA8bM6#hdIz>g<_ zH<$FdhLs-~o<^Kg!The!4_gd*M>;=SCH|!o8oa(1?;#Doj8ngY>~Q^YkmXBIHTZC3 znkp_TgUPCXyb|FgszQ&gR7O|lOHDPR6)Xs774)yQ9>{_Li~sO_{sTiO>H&){688RTK}H^{w43Wyo@G$go1lW`y-|rawwipaZ7QnM;^ue$DAWH@$ z?u2|~#fQxz()_7|-z9@l9|VFzsbK&dl_xj`JJZSlk;Fu@`5=if27#g0uV0UnT#IvpY+ zbfkbJDhr~5R00H0XhbA|2aP(KC`dL91Xp-ooFEp11aUvNfcOP0-p{X~P(P`FbPeN# za1;cyl&gs?hpjNTP6|Vu{kcZmcgbX(Heh{ ztO8+(ScvM!{ag(PiV6q%m>0c=g2E%44Lkn%FWCCN~=ik4PfIt9u`i z2!H^=2Vrc^mu!Y<3ZTqr6gtTL01Pi4KkRjI*jT!^hd)RL2J_~7kr1TE9Lb0ggG3?; zR(l|s+vzWklNQG(2N#oUFTnnj&0l2Br84zL{12IgJ_aOPW z!oi}kf2JOgdp;1H?$j}W36aAqgXKdCo5gE=;T5G9(uV7{c??!F02B!HUt$)lycd9$ zr=t~yq!7;n87zKdLgQDk{I&^Xun^|~Cc-Gn41U{ylgWwG78b@Ibv4pZqEO2)fD@ZK zXk?gNBg0|ek!FdC>~%3&aHljX3Z(_BJSLmMkzam8J?_YO6Hvjek>~a_|1qJEnA~qo zaA$(343KHwB6gTZzc^e7+4_r=A4r^3!xu36KBJF%}gLTdwU6_EytS^I^`J zX1&7PNkBkYTtGmmUv!dyfPm&wr&|I70)k8@D^r22`wJQc1O&D~Zq{CQb^=QT1O!9{ zCJN3H5QeJ=2nq-Y&KH=7+zSZ&At*I;?yAE4#1SZp0tYQiX%$0}#7qoVgYY`9-DknL+vE1xzbqO>I z1|-rvAxr>;j`XG-y1W?FSrfz@%8mXa6W$MC@v1kCP!tymhyV*GaX7JKQ z3!{orMH`|30MKU;y>y+-ExFAZL@xt0cf;J@-yh?zfuS+Hv1-egFUP8?W7XAF7(_1> zW+0UX2B=V(vK%0TaLk84t_;AxK~EZ+Wq=+Tj^}j5B6$t>L}hY^MW9iza0yloqlz7? z85BSorx{$rX=XUwP9FxQyUGa~{LZ1Fnx>{ccGOe;7Q;m@R1(Wjo!f-(F@Gz!G5c}* zK|eSK38HcWcZO=*`n=D0>civHbsz;mWH)mX1)?(Ha%~+Qee9Se{4LFBWEulc2MG|| z5&xqx`&q@)kCqm1KdVOdqmSjAe-3kDx!h%l(#H;Qsv%0ifc`*0z*xZA+{8IRuqj_; zlejdA zxi;oYq($#?*Sh+=KczyyZym-4m`F@&I#mC;k$M07tIHMpFEjrj-Os#ycpqlwiV3qK z%mLm%^2SF$HY9ntZ{Dy-r=_Lod2~Ep zOev|%PQ^>JoUbRR)DDeip~x|bI{xWL{C)N`IzF~S64P1)J*$IKYjZ2nbf*zPJ8$6-KhP?o@|!--o;zDNQ3jz z^YXMl7OEziMci~fxNO<71Y96yt&59`f3gnigM{V>SqaVCClYa6C!Asp5_GcwL6k6q zFh#=tZOnue|J2hPaZn8V=y5e%v+CBiTRU2(WT@G_m1+p?G&}$BOSq_bpy;cFsar$j zUJ323vsztt^Wme0-N&c9Ryxkr?G3Oo{HAa@uD2z|D3-nDHZJ-FAsKG+nyxwdwk^;wZIURFm>W zi+APVdhcn!UQn(Y-v3P$Z#7lG%yD0kPV!dGka;4}*y6cgZ`$_OCeAUlRb0HItUo&M zs0VrWftb^jy*Q&GHe4`aUc8)u+9XoQ=B>pC{8Ka3{&*`bQ#M~ps=4E-N4#3XmAH-5 zrcG1ZIjeZlmP%=f<~Ol(dql~xRff<*m3+~gA}!*%2kW&<4O^z#>qi}$h%GGC38_-d zxN>6l+}HPx7}R{eF%Y)<%&AkQSyw+_5iJbX@V-CwNN8)3MDvBG?&qSVgB0s^D^q3G zK5+6lsoWqOBxcyX{sI2r?ye69KbS{+d#EtOO)e_6JEvntV2sEW6@Tb7Z_?(hs@rdR6XXx)xK9ZA zu5hOt;JF`F0Sk5T*Es9(+#`V_kDYBB=b#nzgXDrCb<)Gv|IifUgbGo!Uy^iI#LF{ z-RXZl&=)>4y?;^kf?3VG=%3D9to9h_eOh8>t)%-QHr_gktj+%T=%`&vX~xUcIK{x6 z6-x{AV~tnMS+_!F{d8%c%7jUTmR%_Ub=zmW+WzToiG0%9S#0;v4wr)(-uBwE#=={b z`rq72e)L%EMD;PdvQrdyRM~?`?2_Zsw&}fz$F8|g@Ts(N&x{ZjDUOw&=GJ^7Wl)yi-ESPuKZhSoU=7L(KK= zy3c3nH#Z5YWu3yXCTnz7%4}KDK`^0stsXEo)>tn06%e_lpm3Pwq?n~x*fNDtZP4|C zp!fL_p#^;{GBR2To3>x)+Mi1ATNZUFo$IXjt+9|;CoXfqI{4$MjHVaNiXB&4THS-2 z=Rl|8&Ag6F=N_m!X?3WR6zk_BY_+Q1@KA4BP(pHe(836dTnF*s(#YGbg%)mN^Xw$j zqZX}*j8Ls9SfIMB-1ya(@PXi((5{!1FZo?Nv(=oY8g^Z-c#aM4v8nN^p)@b`=!r4- z)>2qGy}Hr=`qzau)T_-+=Tg3tlOL44vAI84;m`~ZM&7F+-@cmH{ay-30z9wH5Lw9D zbvAO+^gRn2uQuhzHs2WN@BDB)-tva5#=x*K*(UH7cq{GUJI+S zbU#eDi*1Qnnl+E`sJr#8nQ>(QUF;2VoNwIaxMPZvYvX*~G>=B)bie!(_SElHpf^E3 z^jqs4i6B~yDgvUGeKI`;-uriu| zt-T_=Un4-wu;@yJ@`b|bdJ`%N_q4oTntg57-06w&23;>S0_MwaK4m}9eaEQj5gxb0 zKdOHLvs^7Nf6bd+9VJP}7wAtD^9X88dzmZST!mZEwdF-}*%pUSdi`i6@hQDvJ-GXR zVSepaDbuU@jp+x;ZS|!a{mWC#?rxdcqJ%SKHr?0zqTkn35l+3)_wtKrY}E8}9o7ej ztDV)pg>qfdM&A~FG;Ax8&FlSmTEr~ut<-B#`OTBoclUk1icOqRUZ6Pb)0vCz?NEJ5 z!m)q{osAhO(k#r4Z*A-S=Ixi*)H|%E3C%qw9j+GjB5>VTvpA${;dHUom(d2ktrbI9srhD zmDd@kn;9Pe{MglFVa&w2(=z9+-EyNV|4WqTt>`sni|`xOCZbXr?M@sIA?KDZK8x9= z{{k1*w#Pwy*F?vV8{eC0RtK+H8TD4ir&0`O$l6qSmmYW&r4?F?d)ftS#k#{+?iF7D z-dSU|>Be&j&%z1%WKx2DNP$i!s{VM?E591bt&k9lTB}*bJ+~DMXCC-^$Ed&6El#*u zCGQzpd}5efC9ww;`FL|%X3e~18=R9P)nY^IWo0J6tDzT8+tK6y=*FAPQ&L0I*H>y` z1>%-cx(!G6t2kacC<$3wAi|NcVbLsJT}G~^>@z` z3GaQFQtfT{Rq~)=Xi%JgV$rcpZ)Q{{FD?q2;&Ujxzenxm1aNBXq>h~A72QsuMGYcn zznybNZz89?kJFzZHsg-arUhF5pI^AWzIi34Vc(jpV8_lI{TL&{IdYoTHl_OV`4L$}9il-&%kl1M>#g@h;!4f7N8lVacaT=?O<>%by z+?G?6(K+v>yWR> z(>Hh2MuZn(sz9eip)ybFH&%N(rX|Y9=dKPBo>p}V`)`cVzz4qr!iWDnr(a=n8i@qRs zdOrK?`jgWQclrxt1emwsQcTt-h@&RIFLK|rwdKCp{OOkH>Brl0Dm6j5g5-NG`>VEi zm3ihGbUu?UUzZ#M5)WmHWv1U-eVch9%Yl-;}>G+&_kVE1^z&3xG=1{Hm|L0 zY4n5eXJE9XuWHG0s8bQ#Neb0_Tq-LGxDxaB2yG;&FiQOVDSm}K5wne<^PObJ?Gp%GT&)#aZ@_^84!xl9*LVz&a zo`43cVm4OA*!Q^I-M=%Ipj_Y>-?}}pL3FjKmrjH4)E&38jEY+KFhpZT-HQxQ-t!FK zyYj@|+O#Qxe$_-UywbYAvGMfOTygaU-y(MlVr$<>bWgcF#e;F6%2_5wvvq^oTIJ(@ zM{QPpsP#FhUwurhOaXU3Mjsb+<$AYQlya50%ZF%k()g zi{z=_wjNNLU0+*rXO@sbhY4kgeX#1mx<@m=&fMp*sDOnAKqLs-zTF)=Of-SyL%#pY$kDNEN47z@0b zC3AD>+7f$%&VgtgEj4#jk&Lr855uh^F$7t{ zx{Xb8?qn;rOLR~occW^R({XettUai)8R=7MXHLZ>eUOq5Q|>^_j& z@@aE-MoDa$!j4`yT~Ud^-Ksma9KC!Zb8kNm6K^qEvgX7ZGOnTO%eo13|8!^%$zdgF zrb!g4%1yl`Z9wQLYcyp){b9a`xUs;0T|W6aJl{bOmJqIi#TtA%YoHq8!}jWbJz z#cLLDv^Bal>g@JbOR)HK)birR#p*k3;?_O#?$bbY7Usp*T*OBzkS2PJw3DTPpjqYPMDZ6BsVrxPSP+pcWtZ-EJN$f zeJ%X4#L@kla&K%K^;B5z7a5`0{H=B0KdsJ792Hs!T^JFyTCulx8gXD$7~^Qyj1^I- zT{#u|X7d$^=?k552zes|zT%HbJO~y&VOkmd%=<21xZ5CoK=#ulz6N=h%&L*sNzP&T2tOWbYfK+ffIqQ9d zN9G-+XF65RUM3xhE0vGud{D~DZR}E+a3w?kmfNB8N_T|o&znjE*X=RN`(&~9sE$9h zUwpgtlKpyi7^V;K^;<10@;Kng!w=+(AImmKIpAp*T=uSXeMmZ;sb8$MIvo0XlC5)& zS>TzFW#3ot?64s4eBngdF8#-jp)N8K&X!$5J3QAI6kc)td}f;!OKyN?nu50qyv;8T z+V)3_^3JbCYWefh^*0u?cMP29IAOi|iZ)%T%)sKC+@;90vNy9u2#4sFmUVT@6D-%2 z%}tT-sXw*z$iha8RdGS7XWnOZ##T6y{*>7Z8?vLOQ*WEqFKWGcdA)$z{Cs(w*d86R z%k@*Y?hstrU2!5%D}$Y5cf=*PI8XQB!)MgHanjy|BpKS7!;iHn_R;qac=(<>C{q1N z<8W)p{n;yHVzT^ZzB;x&vN9{bv--16sQA}PTyIpYAxlm7ds=w=9`!5OI+U3(Wq)3x zrCXP6@|8q=bfi(tAt2$xb`M^w!eRpep!u@kMlhoD1`@fyEnQyNte{d0@ zw*8Pr^d+L!1?++Lr1N#m>FAz}3cx|otTv!!t-gZ+(O_+xJn?i$mszKNVodtVfi{oj z5B?OHeqn3RnY{bDpQ?3zv!ONAvpenF0-;YQPv*Y6zrG!jYdVJ${fhA~iguk4ynJ#`*X!^bhr@asUsgngBu3Spd$WJ)JXPS&s_VT+ zb74c;u>Ev=yA^cuc-R}}oJI>-gbop&oBU2?e=m|qjJ8@UmXzcEC?F?H%FuhsTsLh^ zEjO!Dl?8LotkgbPHebqXbzx@XN=7L3?2Yc}tPUBGl+Vtq@T7ZZzN)luxA(pslRIhY zw2KwMo`u%K(mya?sAQ&vg5c~xXuSPKNrO57br?8 z3W|PplTftjeQ}w2H7el9H1@_najQbt78g`LdPLl@W5=;wGm>V>zBpwi8S)_^zDX%6 zK_}EH<(SEl#C`AHz55{39#pkbs+P}NOQ%jfD%ujc_E^gWkt|Q!7oNfgmq7TN5C?to!>mAg_e*W%vPyP zw-0BR_*g8sxivs-;frnBnwndV4pfBqKm2^UiaFS-&U|x%eU87%8;0| zLrSCi@-G~= zD>?Mc;vM>BZ{)Q%}9=p~QOVP+i z13eXQ&Az53xzFF#xjpjb zdmQ@{@%CrEQ$nAFb)HLk9w8#_<9(RDZyodAvSV^D0^aU$l$j_r|KjVg;P$WA`#w+U zeNlLx^sp&rY1THa_~q~B>Aj89Xxgq2GW#ei6cv$ttt9qx*^<-Gwn|aw#Z7p4=E9TJ zx7?n3gy&C_)_?Y74K4qJ4p#LEYToIx{kxuqx)aXT$c5}YLX**Bw2Q9semsYAzuvX! zShUR9GN+8kf!`a7cU<`MK!jFbhxL=nCyi_Fi-hXeiwC$32U+|)kH;>l5u~k4_ zWV%gcdX0Y93#JzB$d~1>8=klaH;c}hSQ;TLQhmTTOLBA0$C!N|(*}ysY@&hTi#le+ z9EL>Of}|H$K4tj^f4=N(-t~+icD1EoN+Ehvf#7|s1slyp`(A&0r+s|tAE=6Ejg+1^ z%K1u5li5)drikR$8}+w}>Gjm_x!O?TeI&4$dbK$(hkmWS%BJ;@Bj8sZdQ(XM-pBE*I6-nTHgH)OP}xRU43yqt2%D~xu!Jh z?7l3AEWxDYSI(5L2i9cfJInYgJrbF>Y~8SaZI_uauW#o34#$}$NsXB7Yi)m=h@7x- z0p6SbNYW_m+F`$H(oCzP7W-~G8W}lF5|0pei-=#au_eJQeRk)G_=S2aSLKzf-`%oS zF-q7(=3>W_HU3W%iU{o=DibDs-}tsTDs!7tN&Cr$eQPiuTjLhnCeL`71h3a)Chwb? zD3)#qz6|)@@sv>ZaJ#~hdEZZbvAtYSiThY-dF6VP@f@tQ*w!S;i_Lj8)sIf$3!An~ zp4!YRPgYCFkVw?E(56%qrPNrPrgcVrR|#`lhc9FWx7>PKcWCOP&vYdb9J)09-P4y~zv#v*r8AWGwmCAbLv ze8R2dbur?uVc{K72NNgL*K91B)7+SmlD9Xs%Wgr8(8Gk}>sy3_>4|2m1n?nr`7e*~ zxM`n+_Dy}F=O9vTka4uqaHU-0?$Z9Vl2O9Cic@zl)qWlFHD{&mesy{AX&=`4J%=3k zb3 zTy9$-rB6J`zA`8E%tM*5Wz{(YqTMcUtJ9>OWY+sC{2Ai=JiUkd@euYo&B#)&HF(;>yrhHvq=$GsA ze5c-LAMKZ{J&&J0t=aE=ge0++9WRtV}4da(8L@@)+%!$c0i z%{0DmydIkRqOY*kjyh&+fw?X))PZc=ofPp~Z+;vTN7tlNF|vddytno*RYJ z^`6=5HH)iD_^y8wVW;(6j;Qsd4gIk8t=yg}tuEux-qPw343X>ezDY`KuZ$HEhjGNg^xU(-BM%Ni&u`-mI4 z-uOJz*z&==smpd;wn*D_c9nYcYIDO~N7pK!4*525M`O{q{u!2^O=d`Bz0i0Tq0pTZ zJPS1;cdxi$t+gSk#J24C_4=$Mnz0SLvQG%C@QTDxzFzBSIvPFu9;4|@chkuDWSClhTrNY~{i!d3Vng%vFVmnYxEm~f^zY=WbgJn%++l8u1M&O33H zaZlsA9zBe za@%t9na<5Pg`l3O(kIn*F@mQTf1~T2Zu=f%P!W>g$ByzXxhL;ot0u7k8*_3a1sOrVaERFVGPq-_0=<*iX4Pb>GJ-e!E#6HPo+s~am8CxVK6BfDkx zlgOg?f^Dg@j~LykoFiuI8CI2wz4T5QmC{lF-Y$7r-u(*)HeA%al=tSK{wj+Mn&hXB z)iwEH@^|vJ^^U83FOk^b+Hyf~+OtsFo_^yS${(&qE$Vfgef^8U8ZU zaSvyCx_A(DSE8=(lr1;SxS^M`M_J+9wms7g-`tt#UT-5kYiB`9$j2E;*P?LqD}+;~ zC|udHchWk${8WALh*E`Pj7W#`&MP0KG}~K~G<$KYD$~hVu16S`UTj z*tOf|dv7@fR-*0QC~vw>oYrkr;Ei*v5d}hIg{|#PUM;d~?A{%9;=Hxwo)9z1D$P*T z>eSw0;jb{afdIScD2oLhsV>E+9dzcew7G)`o>91mJ?=u z5WQDW{r2sqaOlJOb(^QW+I!vfwbY`^_2s0nn+vN2%S4u*Yb~LDxm|E`|2#-{;k$it zN1F~KA@NPbvaTkVty4gwDIPw2EasW8Oa;xlJ#N!T0HHcLZ?POdf!Y_QH-QYDw2 z?5><0v?QDH2R84F{@%UWxiJ~9`$VtlMQ!ds{^CmMQ5A8N$F7&|dFCsQeP~z0%{k)1 zu%!Cim`7oECs#e#Aa+gj<-7B}fire6$(xo=k9@IK&C4g>Zk|T@)~9{@vTP5xo!_@} z?$WbShkPZJKU+5Xbe%`d_3m?dCI9hW+2(nYmLXS@tRC3h^_n>=c6UsJMRcy`2DP&x zmDWbQVYlmB1OQw>qrVCQ9kXi9PSsi5I&3KWbeE+hrR2ES=@R4J+hYtn60I&}>}fLZ z4OY_5#~(E4+tu17BB(8V%_chPXpP&ZS#LIKD{XmS=_GHrevk3jjtLdPt&dCn3iEN* zhst!NRDHIsQ42}CUEa9zN$|kR2 z-x|Ny>W#a%JMXhqT3VmrUM{;Mqoj)P^6m?%mwxy!bosaD%1@PX17$ygL?&1vnX} z$eno2xU156xgs=cO@gsjs=fqx`RbSJHTR7MzU>N?6RE{pp7+@k8&IKNYIows;VQK5 zpV$;b@lO(cY5i%huZms^o>FwMcfhOmNYTUcwSN+^ZeK6^i}u`KagWk$*nH}_SMM!< zyX)lO>POEF_nsmOe@|={r0(#c`F6ZK64L9;zDk?(DaOn@%B63~msNIODf^xlE?<@L zv_qsY`k`gvJ9PO1BaG5KpEsTi+d!Z9s_mr;DP*(g>C=keH&nWpsxEza^jY(E#-mzv zTx2OpVcm#;_cX!!d8JCb*X1|>Q))~0C`a0w>*mg4h_Nr!$TMbkv?j!!jJ#&L0!S3gC?;$%`?;WuUegwZd`l&vGm3Tp?jm`XSLx{jMtx?gG#oDd;YFm z6>1ZgGVn??s-4k#clY(LA73oDTe%8PS;h{-L@m9Pa98HFb4+1#UQO%lC%TNd$CpFg z=QKhoW_h(8_8u`auj*eezfW~&{%Sxev+)Y6^2@C9`Fca~zTwO5Nt?sq#f@L3g~ani z%b5joT^-rs231A_Jry-c*W2aw`kKx~ZtQx&#MK0U$$x7Y*0v`F_GO7O{VmavJ&r5Z zulG5`C{yx#9oqGL*CMm2XTk=)cg5=;Ipl8ojm2}OFhCUWQGeCR65T2KCu3I39PG}k zvtsrtNH#S4oL(E=8tb;PqJ;KGp@{Dm=HxF=*D`m!WItUeSRpK?zy2U4Z~sC@$)da$ zjJri5YYdV|57VEEg|!IRP_H(VQ$jn>(!#zT%zJmhxB9UEE6KLmjnUFUJ`XInFaO^D zkf2p^((!oe^tlP}n4Ua)=Hv1GqbudrwZOcm?!nJD-dX6qhH}BB#Jzm}@~`C@%^!}( z=Pi5UR`@x;%l)7_6chUGt&G@ys>!v79!GpVu2o#TUOMH2d|dI}?R(TKA3YKm+oxnU z(BBoF3A#rvo#u0f@-DjJdEooQpURn2?#k#dlMByoF3>)3I{UPu%VpC|%AZjRdiVFe z*iifN&%S$M_7=zcsFie^iEB*G#tEgR##|70Pg^ooIBR>iQVn@|>}+QEr!}v0nfI3~ z)M>vCVP4b8zSb7AF0f&*U)^?vNpFw%Rg;Q?CclW2)F6L;aoN$nTCMR$&pldGl!SXu z%J(L3$M>1%?&p5Wx|-EzQ7*I8;g8{ceMzBNqOqvR#K~3No9|f}PVlI6l3pQ;+ZfuD zE-`DLMQ~|Q9_!I8jN+;SvF8@^HYO>G*1wH3U_5YfT?frpus;veagoTnkk%qckyzoL z61zLn!I@{SwUr&4U@Rip6tgrdM>bwx9g`T9krH-5+3a0VTdBfZ+2#llX&-oH*0fcM zn$!CBPR&C97c<)$G%ECG=gQ_veh>K8dWVv>cV*1=dxw@k#9XeQrT6jV@y(qw1Kn1^ zozJ}^{rVgAM6b0@ezUMwPTq8CY3yoOSMOzaF}??>S5B?#@48EgK5Vq%Q|7sy%a&yw&uyC8P;Kg_ z*kW@%N?A|$arexctlUIWYMjoFS@>Q0sL?B}E8}ScdMe47j|$Go`;hBd@dC*D5%rr`$Vr`T;d8q2FzyGS9{% zdIHgIz3pQw-BPKL9gBCIT^Js#lDADO`A$Xm-Sd)VZ4(!QmlRVR9L^`5B-mKX6X)m5 zPTyj^$JFGIYIiF*C*}MO?N`tIUIo5C+0}>(ukP>fx*>91QL#S!OYrM+4M`q`TY^hX zOSWA7eC70+i?4!AypQkD%Dp2UtXz+&S4vOHzVKulVgSlgb2 zokexGL}cQ0Lf@|nubtxYA?rrz;)A!Xz*29U(zm@gUl*IsK_y#FEqvLwyKcp4*1H(miHsnh5f4_z!ZMg8Vt2=R;^jaBHBaw>Bl)`LPl}qnePP|BD=_o1B zJAD7aVf*aJX6WU|o1|y*uV?=GFvE&{X=3Wb##I--1g|v-&E6t%?`SM|d|BeQ!s&X- z%2QO|Y?Y$=c=T02t&=xCU0hx)HvPK?wAZESShRGED(Tz$(s~gU&6ZR6jIEt^hS&uM z)Td&VkD9q*)4LY)=xb%P&@9&e*zBIp`S|^$n`Duq{hQ+modP@0?q4W56C)YB{jKaF z^t@O<@y{9IyS#T%POOa(1BucL+7`Mk`l@>2pnjEQy!1NvF_AgoHou*G z^H6O3xd(-678^R7o>$Iodz7q3FkN{ZqxpPlw3h1{%7tBbK3{%+Bqs>C0}9@9x|PT} zvh>@n)vgBbp?#~;`=nFPA}2>rnhF-KIGKLOb8gjUYn#1i`;Tr6JN`2Gslm!m1%0oA zL#y{ms^uzo6D(SS4rj8B9f8j%iyb0$Q}tcvE(tZnH%%yt7y6`d&2E8Vv)bwH?0H$f zm%dD`FpW*NbV;8U*|s@HYcqR#$gMY4(Jw`(8+N}*J{Y~6dVAZ<)?C%S+nd3jTeWpi zRs786Cr_VVRo8sx9@1(NxiL^FP%U5)z4ulO&xZf32zk>@t#cq#)9h~=_jL#n?U?-b zs*5K+6=oEB)eBzV(B&UZ-XbhRTt0!}wOhaS_H0ne_`1+Q#*v^U^-}eVKB}f2ZtE^F zE3wjPo17B#@zhx_q0=+xPR>c!ub3J1pkbBq6nR2-ORW~E>O{_lUGbYYJKZ|>%IhG- zlQN-9w(S2S>nqsWdY65R6)UdArIg_A?!}>aaVYNYZp8}(EAH+R+^wOwy9Rd+exJS1 zJ^SDNoKKK9v)0TzGe21fm#EAwJ)afQFgc8KM+f`tmiG)SmHndPAUVQ8@1lvg9h>+hwppv{{DXxObvpVRX&*}n zf(M0IZ~8GrY;6;U)R{=Y@GLMazZyNJJyxA$OtaUIYr4rk^Wpo9?xN_^Q34fH|LGl~ zWOgpi|@ zkKBz*#u21#myi&8v-F|DG8GlcY?EHjnLmpsd(fSD3ll2;zeYF=4}@&k&Wu$qDc23vDdqPQXlkj?rre^uRf*zl^7PN4>fH;?ajpJ8*_jIVRd`ga}I_ zVqiIX_?JB5H|9?+*atB%EG%7hDef0^%1%N}orhfsMDKjXZvk$7m&n7RhP8SgB3f6(XKs`x*HDJq6WDNcT3ZLBsnSaxDv(lyGNS5^D+<#`Y07jV= zX|r4tNgaD&j4v1c?$;!W^GWb=j_f;c=chiaE&i-g@tw6;jz}RfeoU@*pj2^|KCY+i za2dWyyki_>mZz~GbcHXbG3MHI_|1fi^t`t(lS|5~UzTa%K9?20B;g{@4Kh#DnEl|$ zAr(7z4=zICQGM*iQwX5H%-FU8(EAyvhp{vtoTNG2UVmQF?9Z^uABezlNd~jN){s*!rijAYhxG_EN zOk09OlyiJGTI8)XnR&tT7&s`-*I`DmOJ~C$%K9A`<)EmL1+HTzcbwiWr z46&dQtu7^TBIo9clm_(8)1UU3VU>N$q)VewOS@Bxd~)5$#URI)lzO=BO4SNZuu5`k z(hMvYG#u=k!yY|EuM-#!BM%E{}4MzBzpw(S_~(KxwR`T2)qQMH# z>-6qjY%OT`s zMPRw{8|6~T%OOImnD)P^J^DyM-->FFJe8L1$s(1;0+n3p&`;#e;VPe>(}1-A*7K@0 zyio3q*0KU{38@C!1u2Ygl|)g=<7{D+_Rmu}BpC~iF!0+W3W~@KtW-!VkYs%O`t=); z?;bEGO<(mCB{rm&lnA1UrW6WR#gLn+MMMsFYn?NM}Zor_uKf4)V#r6Buh6+ zDqZyTC6{t?1Fs8X`5OHC`JYsqj_9Fc0sF7$o__^E!STJhl}JGO>$UC~kku8(QD0M2 z^MkQx7sYbPM=p@-&%6No|3@9hhz@&hkS8iABQC>u{{~OJycX>a z`t%31Tz;kL=p>h=O8_pJZ8h3xdQ#I#|LM31sPZCj+zPX}+y9_szDzX8jsmVq2kfUc&A(4qq<+Bptx%Qig=RypjpI+x2cVky4zSFpV}diyjOLi-qIc>3xl;BM~L6m+OS*-;<{ zy`dS;p-8e;1yv}udOsA}87iL=i8U!+hpj5V?we(-Pkv{r{t2zOH7wOPElB-%5|RI* zIZpzNI9(^e^citE$UN4tD|=t>^B3G=ucMwTVuUgWcQ01Lu0uU%2+xFE&AZjC;Rciz_>f*V)Ro{ zI2g~xTH`fXr#fospEBs$1G831Rf=#FD(!K=+m+ zQmrF@g+X-03is7pF^(WTsUKx-NY*lFeKOr3^;k77$@VUPIduq#uKx-yWgrSA_lFv{ z-%QI(IPAW9QX{!6#}hysiK8H@GStqou+XH=9Ks)szhOR0jLdPPB0NY`DnYU6Mt0n4Mk z$vfBf1J&3OLBu$cDzGWI{#T@EnlGJCz8K$XPY|5x5JF$~5yg!}2ImnM56`dyVm2D& z`1rU$$HwJwg5D&}gd0~_x;9)-*jDCBF0hzvKf?sbE-{L>aI7gZZ0irnT zXmgjb4 z^RjitQs@1;%}j|hhWtog1R0U5s-Tka!*`#v)_w7GBk=6L@I7Dq^VMi9-F5`Ie+$>6 z*znUyL;TBuj7U8i_hL`B=QgsyP7Ft%v-7Ic@X{}vYAevXw~gS2BG1}v*XwPK2Sehj zMBBPdVEmi>3Fhq*btLK(d4)K<_9xYO1a1B;1?};x>w}Zq1%rm`Hz=adNrFr!o9|gx zO_Al@AWqF=N0*5HGf(?|!f@46^wYYcQvaB#{qm+qDBMjr29YJ49&(wzlj2GZieah# zxfz2Wzk1oAVs^EnI&?WY(T13&M(Zl8NIIVIh}Nw=o$7CL&qw$uIatFXr>~6A=`jN9 zX_{v_Rf-Q$lF$gd254Qz<9w72TEq6$n`o`lt=;WXvzWI7wU?SUVFWnQp`)Tbw zhJTpMr_%3G-mv~(qjYumtGBMxAYSaDCU3D?p5ChO^_cNo%hOrAM5Gu@pSV~~X}}0& z#Nr@Jb2w!7cg4x6*=?oC4GWj~MNWKBW~UHj&;HP4qO z;hD0*d2Jgv3*LqR1E0=0bVAFD>MqUnO!ok14)}J-I^q0+&c1KI_F7@S*Vg3ZrsV_+ zkuyZ>Kg^b)@Px4zfi=)rcGPk*9V2qW*9_?2R1%W@%297}7{62L7AePp!-~h9co8zg zDphO%XiAz`>NNyIm{6Rxk4epfd);&A?BTKSw``qCNB5&6XDRjndI11X09y6%$cVrX ztcbcmG#&RIpCm(e=T z3jDlLv0?{OVeNVtb6AGM z5H#{R@ALq~aRy7lecMI=OR(r~h;6RX;Ub+=_P9vc|8k}5uKWsoGXVke&|F*P4D;5` zY(6!w?McipuUdjOe4}${r|gvlUx9Ap=Z(te*CO1W1ES`#$+tKlwcJRk98$?~N(2~2 zQ>bTu{$aJ)9c0bwSH4Ef^bn_9M4!WPsMwUS=Tbqx>R}p=c2Q-u14VVC`U}4~z(Jxo zFp1r);6sNY^qAMN7}Ror{}QT)kGA7~ov{&m5mhVTePbzb+BnmejCB>=_~YJ!R!Ur0 z4nJ5#?Wf9T;5d-Bk2kx&RJB07_nl8Ak~WhHkv7Qj?d5*hKI*s%WB>>NQpWGs{b3)4 zN3(43p*aMgR>o2&YbGR6%M1bOit8ozf79l~SZRttdofPTK`IK_f_&3y%kWH=1%lJ~ zTaeWIHR1M3wJD?5LmZ?IfiRN<`@9Yu(Ua}nKYEAJbvmM*Yt1W5G3gB_h92vtA&6*A zo%d@C?=e0SJ|e##y=tzjF08%iD^cwIa>AhdyU%{1()bU(h*SG6IP7@7_q!z}UY9|B z{qm@H6TM-{+Vm#k@*GexH+puB@h`>0IV!C7CrlG8VJuIxJfmM9`b=Y9)^U(KPS`^k z_6}{@E%C1N9Yx-##mHjn$c*r`O=G#ly(H2-Umy1Ek~K7ufdC1j3D`fXduiBGX%=FS zvb0&>he=#w?7ANIjK_XDe+^B)62t($Z=TO#HI>JMh|j5=u05#A??f%9QLGR+d>d?#{U%?Yf_m#<0rim9CT+@S6jo?K6{nj|yKYeK+ z=1(bDF}xC9V>+BrtSB17`#%!7gJt%02H$UHM5Ho9gonmbYzbh4g)G!uB|JqryZn{`w5qXexQ#e zDJ|0_*o%9~u^(Vi4S7;*x61QL0Og1Tal^>JH+?!+blD-PAm^M;kIX`b)^Eun(o7b! znYnVsLxSzBcRYqb!M~36w2#~56q9)}nX$ByV8@vuhJ9hc(UbpZURbCDpF>6CDExb1 z5JTPei8r9qNGHAz8>aC>Z3J2z8pzSG=nz<%AUJPiwHzz+mWhDZYvV)S>pvW)5hBmV zvk6o(eCQLVqgo_3RQ^{s$U(G37rj@k>Dgd9#SOl^Xx%{+#4uc7R0w&A_`R%F`? zB%b(rs8sqDu-sis*cSuNF!0oe{Ha?Gq%;c}tAlYdtep$jSRvE3(_6=i;$hIIs>Ou& zzU|chvcWF!A54P~7HPMh_vVACR2rX)Inp&M|BTqoNR*u@9G2Y746c^WwjO{I`&A={ zzfDE(`(+@RSs67dq960#eay`J#=@S`*c%^$AP>!HjVH$%wrpv4%+W^SqNNjuz&BrMjIjm3Q|LAFM1b7)qPu|8l|GWh1P-(N~?Y6 zR5xx0#b4-N0l2r!;7ch{D=F`C)Ke4hiDqDl4`5#Hn-g2sbehL+B@CW*PgA2oj319n z4T^KBD?)zD@TTA`)QD^dd-ng97T-xMqd&|_hDRTVu6jIwFvNAoEW|tzfgl{=QQ_0- z_(Y9}Ld@VThFG@LM{Sl}Aj_{r)ItT(3|4}jrs=%j$<+>&7lv8@{KrYZl&`#={7QeM zi5-hwrL8f{%j=2uGq&8gG&usY-4rrx=eXSb(Qc}~m#LeVqaU@I_4SEw!4^2k>ds7k zS43;D1!jQzF1Dm9%3o4CztPazM11YdNo2qg-TFA!b%EIMgXKgz+0vzc4uDqOsqXX5 zB7*$GPKkGf%CUvzLD*z8MNuGuBui$}VfM8Qf7+KcT0O=Rf&|XYi=;%c%x0;Q#ja^d zUCvqB?R*S71KZ@Hx97kg+2C@LF7QzVOb&tGz0yY#rvpF^XWv)&s9-@zZuC-93v2vS zDEi(UDoY0@_Ed^xY0l(enS<1F!d5^ooX}$wj@`vyU}r26D;fTUK=` zz{e+=78~ENa1t}1(yt*k-y`;FcCusY?@kJzmF*I%*>rZ=wr`SZAM+YG)~hk(l)F8! z01qLO^ygAq^PB<_l#X`X<5@p6u;%w7<}{$F+7KdG#B~i%;><~Xt{yHkmX&S9 zZH-pPF@YJu?kQ_)#57`dJ*!K(?D_SxC$9EHRmh9clqbo^e#Dt-YKm9F4Hfo0swS@X z?bW6Y2-biKd^LU1_j4Te_h=ML8!$<;hI4r)x{Carfr;f5Gs`DeOAHzBOS2MYmJV>K zBDhS*ax>}_Q%h%>j>Xsjk{7L}=ePNqtN3}Kb?R|;jhFIaq2(eOB^M|@k z>^kM>-ALSvldFfjBhAD;5)E7{{|Dv=QuKHNh6zjAa_ZtQ)9QY@#+HQ^X=4ttX|iXt zrzFzyNw`Vj5akapR@MD_40W0{w$&no5Dw3UA63K+xgSk2`$=o&D?S3N9Vk_<=xENP z#bG3FTnS;H8TmiWj#%H-B9w({2T z{OC#NiRDSxIP#^BO_+C!fXf+qxKOpbtE=nDEKy}Fy6kas5}_7*_ZD`V{Hjcz)Y2KF zP*wUAYv11zR61Ep+w>@^^Fla<=$KpSACs%>Unle^$$#o{NiF5oH<>FaLf9zJcbnn6 zB}--u9=g7L-Dx2>NKhst;Iz!?*p3I>SNcms{@MO8(jc*(u??|hZxhN#o7N9`PSZ$O zt`@}|am2A#yJUwBDGuozslLzgW;~ESo8paQfry#%>~Ohx(<9JF#}_CL`GU8A%#-GhYtvS?ULz7W)n=gA zXY(qMe30)*(`Ts&HSFwR6-f=%vmB*7kTcqcFmyfBvl#1g;u4_wLZ&_|u@IXfC)G3p zHUrcnJlQ^=H7RDFg|JpzWO7GABITX(7@;?XI>k3b8zxVoPI4j5%j&K#H~30cC?#%e*~Y@|ErQ|)G3d7mM^#aV0D z7Xct_4iTPQ5P8b5YdQVC>TJ9_$XpcV7@>whW)q8SQ|our_jw0bM{&^1spW06AHO<) zGn@Rlt&{bf_5*r<^usN7!uX)Pyf6z1cp$jZlQ#XY362D2NQrMqX*GCS{qqYC^*hbJ zsx&p+@o3{@F@G`ht#2f>tr7hB6~ehcEs5}4qs{1QJf~2^_|LM;DS-038(o2-=k&PG zng{g9h-M0`vXYu!{J1)J+DIx4BYoHGUBHpIhYtRUzHh3KDCt()rF>S?JHhs+A}j6{ z)IT{qGuC>1r8QNc1c``c6jPa%+-13n3Rjw50yCFpj|v8Y>#TQ&w_T+x1d5WnY~4{- z>IRK7Ot|_&k4^WjgR!sJdUAw{fY$DHb4U70xy;?{d7lsm>O$!DMW+^(wKB}DJeDz4tWo{_ku>=bwRIAj(9XHeR(l1rb%jLSV zV$EWcGRR_EKYuFw9a(aR?~;P$nW+pLHEKL}=p*6_eCuLOmi{%rIOq#q``;le29D~d z>$l}&(OF=|VB*7$XBqh?8YY~fR`Yqp(YYtn5IpAX(9Tot?tGrGuhaVVezCDRam>ao zyr#l~VNg5gan@YT;F>S!E#3MyL~q8?{8lK;)}Y{ASY^o=PIrGXHy)!QQUVcy)G&;6 zrf6AbST&mga?Dn1Fc}R#lbY?ooX#!N{_bl4&w255);_`9ep0UjDC|uo{@wjg08}JJ z`@XZqvYJ}Ac}R=G@`(#vA&bA32;b5Pyggsr-Og#nbM69t&RU24Uycn|ic$@TV1;iR z`93hI5On1DUdguaCd%73jBy`p;zECZQ~&&f`dEd0xlxhCOXel07zlUe_?7X?LIX|J zzHJ-=9hn%#+YLW#XN3y%lseJn_w93}5jM^nf7;CcF@bs>&ii{gGXu$VN0<%dWO}}| z@i4FMy(jPJ!|T13m^O49N3+8Geh=94JBgD_V_iQK4H=839ETv2P2K4>=jmn5osheJSKW_(Zz+$ij{N7RyGO*Yl|3lf7e$DdW9y0<%VRinMx~!i}>N zPJ?r!&)LcjiwF!FleGxX5sh8GH8Fg!FQRaG1pel3>6$-6eEPk_4CS~*hC`T1w6^BA z+r~f6h$VL5x~;aD3?wLZe64mOEev)eBw_Dmq>oQp+m$EzX5csO5%%jG@;vAijgZXW z$sTc{4^54>+xv5PcUlv$v(X{b;2f71wzl>G4 z?)~!ot@+K0M&PKlz!s9MbvD23MR#rHX!%dJA;4oYL)Mk@w1MpTQ-*%V5twm(Jz=Y= zjetP3DVbNh*j$m?ai-I7^q(|)$k__J?ATA0&8UWBx6gZWtf+f(;jLrIQ5|MJ_2jIfE% zX;Z4c@8w_~r)Z9_?`5=MR$T^C+Y4--mv&C7iQKZ?m%JPA6v9T@)JisQj_eRW}TKRV_#F6wTQ zF2==r$DxC(@>!*?2b}@l>B^*>Gqyc`>bV>|68$o$(yPlX~k08CfXLULbE>hg!peE>^l(Q7}; z0<`KJUDdp7?f!HM8m^2*B=e*Y?!g|;`;$8`Y4PZ|=C-i3&~?Wpa8lcU0%oXb93~GC z1VeJVLPTqH0a0MdP*h}izDw^u6b3#Qhwlp;t`eELpk<;8-6UN^`_{Af#DbQkRSkFV zHFZ>vMfj`3v>sLaz@EwVrB^FCe_Pgp zC>$5}L@iTxjaB9p#%cy1_Gu7UIv#P$5?6I+cV3*haC^8X33XBi$V|pnwl1vj^GtE2g1T3qm0gdr&2oAX-k3bK>NVyb&v`WxmVge;O zX-SlTc>^E26|em)iQYxG1^rWu0H_22c|_@C)3pzoc5$)JRHCKi*THGsFj@0i+p)Co z%k2V2(K~X;7C|>OtO~U5eOW2|v@{Vu?R-)<{BIw}iS&K&Bf2ke2$9@(@@~yz<7_`( z3hvwL#Cu{}%*M-^Thf0O9!y-bJIxcc?!=_(tf1y~?_=#8$5A^FRK4Kg?xG)KIR5vr zNNY42vGDW)7j!#I&d}&Il>a#*63v2diys$O`NR={3K#AZbj0I2CxSsgZ}Vq6x(HHD z@$x=`ce2vP@3JVnJjd5NyAkR$H8!2k~`7-&M5b&l%SFwm(T9$dnmb zuy&nN@;UH3xm4|^G=OoIjS!bexLkz(<8OX+ei#N4*p zq1VbA;<*z5+;jM7e}Ma}Ez%4ocHqsS zdo1N{;<&oLjH7rt3g`18I{C_P=9UX8UH21bZs*y0_pv#x6_Ho3tgQWGI!$eVg!Y?6 z`BN00sV$YCN6&*{{A>TLC~mai3*TA7)NYHsJxsu1BR3cBEao9aI5HlGeTSc#Tz~== z$1PiekJ$lu~rkf`7I+!=C6^ zhE7@lh;vsSA|Bjv_k2AOkqBegoKxhtnxboO<+1Kn5&+Dyt*CA*w*#%Y*WtT8Loe$n zbWu=*-3~dx806+Nw@mPl&-+ekIPW74jX&YP^Mm%2$4QCy`|$+sHEVwN%^9CHQ#Go_ z)_kwrUmr-d>3gP@r`NlA^cB5asBXFn9zc=>AoS07FJc)lz2Kh%kPFlN?o7*@J!|Jr z2jNuozJx6~7Q~*ESJc!o`B8F*&2Jb{w?k`E7YC|9k?s{X@3r+`C~Pj1nsU7NblOkO zQ-`&JJa7C(>P+!a!f11~3;$!*^GFT*m5^}5DN3G|CYIHdhrl=!jMtx!g>a?38TOU3 z67vzXWS*hD5vZ{_r)7v{oJ}W-bhk@wgGZVs4rffJW806$6N9kubPCFOxrh+?AQgC} zOjz zUgM*@l=h79rdejW%^$uxZMoX&VAH1w?>Y%TKTRk&S5y445?gI;UK$|@P#Z0kcZ2j%68@Kt=lztRE=Rd8&3kHl1F7%f^nZ*O9 z_G@|ERUoxe8auCIO+v8PFW`qhU_p#!(!607y!V+Qcrgf(jBsW+Uf`?+6Pi$_P22m3 zK%$nPcJ(O&b3*;IuO;SC9eS{Y_XO`wOs%Fu8Y^WsrI{npR1W9tq8|ff_mesh$7^ zg$@^&=`|N~ztXl#qp|(MaIm?$j;t@t^=;#90nbCk+(YS}JRfW48Hl2V{wxzNwKQiO znVU^6xgXuSybs2f15!PfoIwvttZjy~k!8>%0GR`mlL3kU^sN3NfkV)3 zC^G_#A*{8sXrU!zY1fta{9%n#F=8HoB9q|KuvJrzmJZ8yHQ{liz~$dEcr4mjqAs#B0Da8LuL%Wn1P(q;i`LDZ`Ga^-nl?4A zVfh7f{NGk8?Z!V;kn${l)05pIjFMwHue3|yjxZ#8O-U5ub<|IUE`!P9N zM`Qo9p4-fdOg}9p`l7kDM(Ui@J<$I@+R_}%pPP#=zA`_P9a|{`3oTW0oXc5yJ;mG} zO^L3M4X-84o!|X5Zt00`LhI9p2B|8GWw(80>;W;3Qoywu?Zo-gQ58lCt0grq5}U!6 z@Y5^*vP?o`q9dTEw!!gg%#uSWy%o9-4_&{1oL{3(LwS$a(CcefJhUA+a8A^SdVqAU z!DTRS)28Pz8cSrKj3IP9lWtx4i=37bH;)2p*G|q7OU^yDKD732{fyMW&&V7fAjP;v=or`$%xCjdu44zneL6^KbNkhc6hw_#k2Yu@GLJ?^8nKfHLU@66&hbP@9_<{6arG$=0qwR1cQ7E9_b z)Rv<6MZqsINi~#QSk^yG6LB!;$h{#h3L?l#RY!r}6WbFyipbq?v4;JN_!u1{It3f( z+!G-;?wGcJ6~TxB1-yq=y8M)qkPH8gxr=jvHJl1tKa%x3SY8O0#v%rzQe5?l3!Qml zSPIi7@E9{s3kr@_;pFFEZQeZOzYcQF4ngZ5P4);i`OQfz5Y`VX)-(F>rhOhrj@Ngu zS1c5!(|<4Jj*1wu;PO|9;L;1sV{!-)qtpjh4N*&%qgfbWLsY* zA$=lfjgQ_)^;QhHI8KkXU87fvXW-Wl>jla84u-~}#cW2y^?>{vm zp?=c1x*}4!{Y->6kxA``qR}?23SQO@z26UJGJV;2x`4<`n%f9Ng(LDA|Mk^{L9ut~ z(!t%rDsn@BX-egXL@C^oOv}=^tFOXXREHrwh%4f~q+izEx3Y8dp@c}_yO8SHFRs1o zquXi0yXU$si8d$-slV9Ud&yqzuepcsv+OP_`dBtTE=i-4+mF_;4!y^?^VVk4GrU;) zZkHXw_iB=B=%y6;TV6wd%u6=@!Q@$Kz~U3W+k@<1$t2bAQKu9{u%M#0#7}YY$W@ejUP6*}whu3tzVm{i zVS%fY^y2dtsEc1|oHo{e7x^=iD0gx8SGT_DWa#tseX~K-(Rb?{%jO;{U!h%z+9_T- zPz_nbo;tB%{$&@KL0@XHpAs5mpQ0UbJ7;@Cp@l2R6bN~dN_S*H@cA;9HDknK{-+)? zTxHbc&(6cFbbi-yI8!n#UWTWsO^I)DU0?ha3-fP36>SCh?{+F&-=nk)yauJ?$=3X) zIin;bBtp2jxtAT^9KrdvbOhzfnbB<%Lid?=-7j|zb>B*3Z=P6S$vwkh#Yi>8+jxZO z`!e)&8jzyl4;f>M#fx%?r}ivVHXD3z2R9&b6N! z?=f&xnM@ydB@ub=QI8RKq`sLiZ;wcTX<< z4sR`wh`e6LH&00uPmQHr;r=(xw#S5m0u@ex zGmnHd*86_V10>JfIxrMZzEQ=y>UY29zL##XEJdI{tEiuXihbjBofkn?Kg!ZAh!a{=E8lfWfj^o*?wSKdE)u{>^C#G=gO$%p|ZQ9O1|cx*gET)zWK)F zbuu5zPU!Zhr`Jo`CGX?PV_Mhtk5dF@1qz`1~UI7R3Ac0>F_5;A=oTKasxkfgN+2|GUHVlp&=6{fu zLe@UleTyOCn~o~c3u$%C*=yiQQ|#4j4DHQ1J-a+UKDIlU%o8JcoD3s_{3d4q{$ngl zfG7JjS1g3;r6|>)-Yiw0yLq<6?`tCXkAn`k~Rw!7j%Ve0$ zr&2MKcZ$sCgiYY-q~Y{t<}Kjv3~n+he}vq#JC-6iO-cn{3;_q}vm_QP)t+m(8e0U% zn%BD5UZ%_Ve!2_%AK0k0hG6fgs9?{iM4t0=!yNx7zYpDS^*>r_N0mBxy3-GZ+5aAZ=@a}1M5bry=t*|%li-7`Yfp>$M~9=D}tM-=wH&pP)l zIc?p~KINP6QsUVZdHAh-=lTHV7C0(m@v~~bYpPj0a|gBG-jrl}TR)vxh^)MxZoF0r zL3=UmBS@UT2<)Yq@{cWa>~Wnc2Ry3z9I53j?FjjC0Xv@y7wuMk$U99PZe%G-RxB95&LZPj$dPr)EV~d zrtn?kJ4hnEs~A~~Z8g zEohd^E7j~X&Ep2gulYT-Z&(ehmv~+!329j6HceJZQ#pE|wlS9`ziGw~bVK4y0+FAY!r=*%fT zTk~o?6hG* zxU)n}Sguo5xW922@@fncg!Dg=@Gm5LCiXG@;%w=rMlzwDdAupR; z0JFF7lwplk;Ao!Xg4x!gNetdNM-NX%Udi?5&khTWN`g}^SgaDX$a(TStuS1ssfU%6mp~csLkPYCN;FCWIKP@xnCbepp=6uY)CfPp)y2n*~EO+4 z9>PQ%6kCaGl1&n-M!rVPm62FNd$N+_$@T<2BxSz}wSGB8~jxoy#4`IwYY# zWW$MUiqLrHXU||?-VctvEgMH7Lw*dJ{>aI+8uzA(Tbk*C-W-RDuZALNFYxQZ2y>oV ze;S$a8OxI+mZSxrtWI=_v_8%*hYp4>C#nZ^~A`;*-@wY>_T|~i6n3rDLEc8OX7xRA#q$(@{H8QKZ zDWUm_!q7f$l9RPwd|if(XWuHKc>l`@RJeUP0XlZ6;D3@k@~8=F(-1hR?O#?BIGxqP z<;D5eHM$}}{y1sRlR*4jX|PBY_uk8=YxX*uth{;caIVJR%|o(Mcrm4rnLV8W4YT>Y z>*>x;EfZ@ig52NJXGDT*`gkOTYvF{`Xz+K)(tYcMpZCg_&K-u$k3YBk`i8Hi7=jen z^L~7l4T*RD%?JK%(7tK;gQ!fYKUKSb(&8=kYzt6npo$^j*;KJlaYS0%Upsy21lu!h9tB~t}6>7 z@Xdg9wCu+!i%09|?HqEt;F(kWu|sHmeGmpqKHouZi1({4S;c1(QZKKNq()8Vuc>Et z@y-~#W)RcRSb5l6+skF5569YN| z7qi7DxN}L#UcKj&Q>ef{srBzrhl z1Uv*@35fttKl{VCc}HK~@zLG~-NRT<;u3gnhlunj0 z2w3e{0IQ_g&N|FQQAkYaX&ANCg6ea2Gys&3U1QLc&s1HrJ;&VdPGjgJIl)c4l11M& zQ&Zb;FQzf_ZCH|3?viOWp7p;^v^a_VgQIs6Lz@om49AH^`SD8Ig^r`-WwYjTtTyO^;nfVmW&r|O2|)_)Uq-I}|KeZpx}_TqJ3 zBMI*|uT@r86~lx@Tg8Ds4P@5oU%hUl|CEc*T%8<#hBT{-&WjCE`O_e-x{zI>ZBQNN z_QS5Z6isDRsfIv&E|u=6N|9{A{#p>eY5ut8RMn3~ed_8RZ$v&4=mGB(ub%pUI)#4< zHVU%i=XbOekE*i{i|SjrKZt@z zjEXb^C@~`#4H6^L($a#U(p@vuAl=akOTal@4fGR@9+KZ z{CCcI&faUU&szIATa)9#VeE!T@Ye$u?>`?&GP3VZ(6V3d+y&)I0zFZ_>H#XvUb56W z0S^lgK&5pu?cBHRb%j0wd7p(*TqOp17;`DZBE|o6M@3RR8%KO{gh#%Fzv3t3*iN6H zvSLk0%+nau*Y2hx#zMw&F=^6?qDP`0p;NEq_qG1$0tXnbbSS@siM5@UZ?UCaA2+Fv zPIFnWGpa3U_&W2{Gm&Ui{}xvBK7E_IOYPKbVrQFH`@Ri9}29N=;!#zxGdd zk)9q)V=Rt*1ya%@P4_OE|Uekmc&iYwE~1_J_<%*;bhk z^-Ue+u8pPh-}+5|I2o4z$TBTve)9*P!h1)x9dkAte&s@rV{bvfa>f3 zX8JpcQ}_Y0{5|Ij_!hZok$N|Fu>g(haN^M|@RqsWA5Pq%jL0-K4IT@}Dw|aYMSPlh_c4mNBozGuNhwxRCE{*v zk0AOJuuI$O%p$Sc?KF#bnoEhQ-x=z)QysP~PR$uBLyT|igzURB(Yz>|{1T4y@1m*w z^IrGZX6`4=a1Eoe$CcMbY5n#(wj(}9@H!6=@M{rQ=Dly4qPCUU@3&&qNdTby1a>4p zc{*q=Zg6z6(RYS84A^HrYfu`N6zXjh)|EZjc+^xzq_DAiS&*o)V>-Iv(t5p_gdb66 zIZBWxC|!@Np9sNgb%m)C(U$N8tghT%3fc(uUGXWE-Fsn-!+~eb%e7hFOS4WSb}@+(WcWdsz;O*JY^T?F#uCK&+xzp8 zQ&*S0f$pOS^xu#f%5xQFQkyvPZe0rzWyxUKDL2|)#j|Rk@^>Xp1q@PaA44bghz@{) zVdHk6s|&NcOg@U@Of?J_V-2U~dTYN`t_~#Q-gzP3dSN%_I_{-9fv(&Aa&>63gfP3a zk?G0(^L^hFxT$CO5StTT>7}&ws{4>|=bnC^p}Rz9_xPdmw@lYod>-kIvjpO-#QvH0 zfaC@FZyFzvsZT?d6!brPbkWi$DJU^C9BgnSDEXYAU);ZBt^{7q zFFWU44A{)wTph-}E(+mOzFbFsY6ENRu=&zgJmjju5%LWB;cm)i)!tlVBOX(YA}bKi z$t22qN($4Yz6#|U?_^#}9uHXUuUqlVv(R4JzbRFms4M)a!`3&er;~Gm`5l-3%BaT7 zJUj(ubdp+bvXqitTXlU=Gwwj_eTfU?EuIaj}htt!eh04030~@ z%XCL8^?Qc?ZR_n-t5+X)5yXF}hhsO?k05JYQx;KVu9lx%mDjy3gPYKn&mN}^y^zUe9r;Ocb2;7WHS1J8D4I2;W9T%g zOTO-cBgpCV?jQw0z}vn3S`E~Mxh3b`xehPo8+!)?KWLSq;5#47?R6XbKFr0q%tn75byg8`Sf9Vpqd$O8RO=+7lF^U3bXCPnQQ5{<1%n2#!urpeM5# zzrS{$zyf?l*dlD$c|Krm#`w2Qj5*`n(li%kAWc`lVJFqPu)!x*KWAp85YQV1ToFhi z6JGk^lH3X92cSSX{_gY6&HGI;tV?tyz@E5kp^h$&kin(A5!aVDA}h-`r}a+VK<~XX zG}9`CIHD?oDZIWg1>~Rjso2-m?~rkSQpc%YJ81L!ctypf^}g+&kCC&}eHw4amL6<4 zX-;a+>+|eLgeI2jm4BBm^)?!O)|c?~U?gwwC}@Mn6fL!ci4b#)^PR}|g$eoHWS_x{ zyhWtaJsYMG2a^{fj5yuwH$7$J{O0t*luX>bZ#sYE0rnQS+^QDW&*=KtZ zwXuH^nZ0Jr_j}ml_NK|vFsb#Y#_9MK5?R}Su+h(i+#c=hkueXE$h~L>1Xf(%oV6Y{ zoVA=G=6PLw`uU2uD%N5p%Mh4w{yns_-~4=?jr6YmJ_-;79X4zB3j3 z_uuL^X>H|LKybyT2x%0lrfuEx*`|~4({O8N#<3V@NPdffoZH}}$|OhlJuc;{tMhKz zrDW#6HaHXp32+%(&3e{6S?WBQymEcu-FjLmhWNczicbRm{97q!dfhFdEo+(L?-%i3 zYQYaa{&F)QL5g<0eA#4ljsc&w3r^jArMhsx`f-omo--HEnc$dlBKDCI(`d`dPkvk_ zc1!lwCnw&5Y_DmzY?1G4=%?KYRnyBcXTJq(thXb7&_(~%4DZ4Wa2|Q?xM!VOQ9jYH z?(ClEm2yy5%_Q5JHq_CQbzoiEwGuv~kCDNS7Fs zJqFep)yx#_UBY(6pB&qJPWgEHV1s8h;!J5kXA^ZkL%(orY0Zot( z#DUPurL&f4$(c3gvxXs}P!>(o^A1{H5{pdX6;B;0!Q7yu#GIN`#Gch+!uf%B>%1;W z$-}lqi#X}IC{z6E5;o^*VaqE&!)+ivPb&Ljz{blW#{)ZA9#}hGVB|GtSJdLUO;g|S z#;;e^WUN2BaArg7*G2n-QG~?$jbD-z?e+6JD^=?ELuk@ab?}CiK@0`4Mlt+U`!?6Gb16w99 zVU-ATS2nA*1+8JoLbMepX-s*SoSAo zSIdXBmqW{rF z{yCl=#e*8GG2n7kO)X8+&ISJ&%J)F^r*{VqI+#@R+~XkvQzMOvK85Y#qs@SP{YiCH zK3-;5`}Hn$Cwc}SI1Rs-`7Q2x8TcI{y#40qm!?v=dF`I3!m8+Jj`G=CurV$tThAe% zRO6Bo0#c8J{P+REw0M~nLcq3A{;z?zGn{Xps(374E!A(4nHoJw^gH_C91IxdPHTS{ zyD6NFjc%j{2wg7UUN0Z4z&ChZ&^15k5|`*@XCuNzn|3Cihw>WKY0ci@loHk-i~pF( zC~m1DZ*b2SS8TeNMK=FyB1Ylz(f{VEc-3IadysRHnQ51Syw<}WaYXj2PW%NWbh)UkaH0BgpqKRQ7%uusbX}?e+vl4AnN5aH!dt`=R6uQNI z>zfx#jkmzlXm$xkCMvHdN(u~AzL*)Joins%kZ(O`zoR8Ok^Rc z2~`1`qtxS>J>UbyLdwm7;FpOSk?8wFQhScvRi?5H)m@hRD5K`^-=%-1Tc`XorzZZq zSxqSyG5kc6%k)L-i!g3(c88YQR7rwvm99tdOO{3M=A2GW*sLL4`2w;t=X%ek`TYx7 zf7E#Bq|QBv$-IwLn4@1px@SaHsS>#0=sU3D@DcM$+TC?4pGQu8oqZGvQurV3l@-#F zuO3lCT;Y-q=ud9{`4>RoX>|_nI*v(Dyo11Gk;tZ6P33TDj58OS@#Y4`U)TOkQ`P30FgjXV|sJ!3${1)#g4sS&k)yAKmn~;#DQj zS3)ojDI(;J;GLU?qt zv>6_K#J-ki)H8FQB5>9vH$+Si?xFr7tnt0%uC2a`0H@HwsGxI)c973CKSOp-@L@VR zWvJUr&w83j%b6(@x!mG96`k{MVaGkd407;f_1hast}SWBvUp%mAssZDPlOZUmg})EaAg)-+uUl18fp zY_#1d7t2%u%Cs$2PEXjwNS0(-G1lv9#X^ zWn%?wEStlCP1Rwv!ZhT5)r`zFn*_c~@{Md_(|vMiRFsEpO1Q#CBk8baTdn)ikatTp zk1DQY0Q!^&=g^!^(!?!Gsi`B1;f?$6top3NY5{!S=oNT#oYSh&(N*CoKUT)$>_HKw z7OmfC-IlQ^mS>wOOzfxOUNkr5XynmGvN}u6$rF=)5VP#XOgHVl9B_7H^uzTe3l&Zb z-Xrje=#{kKF^pJbm9>lRuH&y(3ZoyF#iIb;&9F5Bp}Krp8I1IQoRauB;TX!%&-}^m zU2=62rkIOcVRnbzu_`N^{YJMH?;_B?COpb&s?7YLQlV#TDX?l zzGeSi?VG;7uAP@S8uphLiMxjTSP zxKQHp>Wi0J514aB^DS`g9*>z4+wx${b3jlRd|!i!O73ZHdqe#-SzBJ3IPfs=ta-lF zC1BS%IRB8D-_kaGJp|xy7L@k=4iW~*xy5f?)dnPI+vPH80jvuheLDF6ey$TQFSfvV zsEG?RQMl7hkx98nt!>GB^1RrIe_ZW*ybh9W>>oMhd!P#6fB;-PG{j}kQ4NSS;NiA= zTa@n3hB-uNrKZOw>_3?r9Y;l_N*lDt%dEb0NI-t4w$eoW6JE5;@ASy@7H4`TUQCX3 zqlk$v?)v+WXAbx_rel^Ev&TN484c?Nostr*vw${oLm zs9e5VxIZ-ZgG~HcW$S9g$rr3menOkI3x!<;+SZ;!@dq_&zZ+T){DI;4q3L&(fD-2m z_`Us;%dXcKWKQa%Mp8lM^aZN@qg=xQ=KIU#k`9lz>p6vS|bUEkRr+2>(lx2V~3P%Y?(!Rz}BoV9`LK4&PVRjPSel8l%eJ@ zo-i&OR)Ki=G?u7{O*M%;!0?Myuw&%6(V=)<9m7I#QcJTqw(hnU_rB}MEnQC4pCaPd zHwMY9PkQ5oiQpH#M9WEZFy)q($`GaEyVmS<`1N9ZQrs4sIHR>3v(|9Jip6rk*}hAd zkjGDxq`?Pwq_C7yTGrgOHeI6pc46aF;Y>mQnMP(wZHnxK7C+qj8t!NQ1EpuZRsYzE0HIifd9<}8!J3&#uYHMd zHlr|A>TdJ$^+CW-$d?_Cxti^eSKi-^Q60qJ481u@c|mWputnJG&qn|Wm$hm2(M zVY3z5+r328Ix4`M@Ut4J#weWG21~0wNW%$PH5Lz{J0U5tRXm<*i|1;>7+ZvMH#-aD3^*drhbnpSgq_Kj- z=HvrCR+@@V>}UXsJ355lQV} z->)t4q*k)h>HURzO8Eh3BR-EOolee)+VI@)j>DG{B|sB`NDGD=Az3TPfTpM}K`68;I6%cI`qB2Iu1 ze|#{?9NXGRL+eMNF-FE>z82qR-TEqzAtFY*`wl~387!+vX=>P!q=fSQP)A&>WF_Ki zJEx|%Zaz80)a7j&bmTWv*yXpy0l2SdE-2oxP8AO~7W(9KzW* zt8R-=62d$(ksuE)+~p#iCagYDV3UD^0g@{(a;hmu<2k|H-rFS8`Zm;u_cePZw+dLs z-ogMw0(>VwEozi1!zgch7W?k91>RR!Laou4SbO2lRgkr z^jqF<^#2=ZRtT!FuBBt2TdXv7m7t)aY8IJHb5sga8{`L#$@DGw=!_u+1A0{jwylxA zv}hN*QK6;KB#wqPpSAclHTuHpl+mvC;qakw#hsCnms2AC(}Lj8#0p4NI4*qeWpPTK zRqg{D+>md}9ckM+^CiHF4{y(A4fHTAi&K`veP<`HScC zwQq0|P8Yi(wre4}<>TU?M*2-b!p#I7sUqq3f)BGRvnh8`pNa|5GkGKcbsxh%NLFkV za}kHn8$MSg%RPbu!rp1=BG#HvP^klIyHEJUc#WO{je6c+5q+MbbK{EQFt!Yr`HnFv zD(UM`d9+D4)#!J!azE9C**14KQ|JeX@;*$q@c^iiX)CD)vw$vSC76m`+Y zq$tzNN21K60~7UkMIEA)LnqTk)PVAAn%!e<#zfEq3(O8(e`=O~d1z^uGwG)rQKyfy z=}|ks_g>;Ea67(Pdjt3%)8cfsuk~DlzNd4~g*RJ&^j!HN(aJnC6_EN7w(u#6w^)I% zxZGxX*K(J-nr9bRHFRXE?LS!TqSxbxUFDri$7Lda@LI8^+DCz}^IO_uqBWTpoD?>U1%xIzx{^7_&n%TDG6!QPS&tS~B8*SBf zhIVuFg};oYCldAoGdd71L}zr#uVt5N+D3VFnuoZMC+7&qb6UXNgx7e$UD{_z{7P8a z2{aT|=r6Y#9#?@?Hd{D?gzS8NWCXjtd|o%*@!6SCSF+ycZO_geO>HjIXLkzwn2HvK z_tT|ya&DaV8oAjeTu69edx<`;kxkjGZ^$He=i__N5L?fc9oMDDFI#0(pPuX?63_)n zAsU%}mXJMj8+Dto{LsR+4o^TB2cKeX#Z$|OXFil`Mxbj5^Z=(ZV#z3(Y+P|A5K2VH%(jig=-8LtAHPQEC%<=sZsEu?5AZq5lZ7w zU(XU=FzN^G-;qYe_5ct5XDiLj1iRerJtL)IQV-2xE2b~Zu|wps9w$FM$`l-a6(zD{ zdNeBnUf)t4CtLffk;c*cMc@au7=pxTNTUxH@BJZ-@Qkd`0q&?*Oe*yGO$Y7zIt`SB zzKaN&eGx9ksN4T!(YRu^WJ;;Y0@eIv?6TqXNNCPpTjP2%<@==r?(N*H|5tknwri*N zZ?x?%w#|}Wk+a!!#m9cvV9A6iqRK)X(6EO0Ot$w{UccX7Cx{tf=p-1&0wV!2>)nJZ znZn=YZld1=pRJ>0rctu~Up%CvX{|3_npqP$DM3lgI|@7%K~_nk_|0e#0RV`83^Ga& z4;K^pn-M^W6rXNX5)(n?t(o}Z2^yeY#AD(S8;@ukg?}js;|m5bBc5HRwgoT7acnH- zLxL07l%qU=R^UZgp(_?8=~EE663#!(T@Go~n5&8D5w2Oe13l`$Ma)oTa+}dv0);5Q*7^AnzR>n_O%>654e1b zck_M7Xa@LrAB={0c0yWWYjq?#N5(@?4RNTS$%!m4$z&=q3?178k zP@bA`e!BZxMw3Kh%3xX(SPd$Q%mvsO_zJZ36li|HHjfchXOmJJA3%6)!|E5dn3&Ei zf0gVO=Zr8HZjE%>wdFUAAe-6FR1TRMH>tb0ZJ-nf@3K4GS(ppw6~|(>{w=wV_K$O? z2@s03fNli3>a`7LrUtcQDoMm81j-xuOHzSH`~dsSjcZNw#CcnZ1?k-x5xgZ6JanHi z^{ygpmE|2vv4c-LZAJTzyMXnd@u6c@FV8Na1c&!|(LF+V+N&>FfVs))C#|xcR#&~{ zu`WTSHp~s|$2^XK^qQ#)JCFCDOFHV7Ym_4JJb&19=Pq--42@K_l1=oC0=6F;CRseK z_7z2rDSO*L*97uKxWq9-S|FAvxo$OB>}YB%kWE7=HbFFF=k_+Himt0*ToO>8pY*N1 z=EyaS((18=jDPshyO6uGQd>+i-(33WEcC^VP)EQ*Rq?q@Vw-a}*3RbcZh@eh2q7{+ zg7Fd4^D*;T4B=;4cnXa;V8b$Ed1)pO-l07-_C}(a7%A1mF7zhI7{}(wm9PK2ng}KY zLSW2W4o1nB$3?;khGCdN6!&ID@1aG;%vnqKM?PLm>X?t1hdykWVza3tM zzT6-7z5X~o-;naZPFtHNX@NoveHwxDU&B*vkIBq8#;SKDp~Z8_eBNlUi++;PGj(>Ghv1uQ@wo?D+%hXesEEah4kl>@?{1Ea>_Q&;n1V zx2p+oRFaaZn|E%$`Rg>@M!zrR-(uPOtt|MH$lEP8PfJt=%@L*MQ0v8@%VlaUeS+jc zgT?KSdRPC8)ze;3&O&*>UU#aD1@t-pTv4X`+j+a9tV{cQfn2+$x0ln>MuDlV_zgdO z`dy|3U$nd?2(Uce7IzetpQb5}3L7fN``aZkt=}V&1!Y0Bww6Or`@UE8T%E7Q9b#A4 zqZE^5OnM)&SZ=cg5sF~cG?vmTTQ21jGqq&bH6!{shG<7OfbX`rg?*G&BX)~(YJ@#c zua<9HQb{#iC_c*`v2JN=N_&>6+49iN*)f8$p`To|b-oqgVm;-EBiX!BJJ0yp&Nujj zX0e#OOiSpXgUK<|kd_kN{~%jMq)KpLv~(~o=jNp7x_^B3&x?USW!TRx#7L3RVS2Ic zTOvyF6();>hG+*BO_%Xu0*Z$LS>mWNpjjIKcR8ayva^oR!@#f@r8_p|7EV%9PFk5I zJBG3Lvc8^kEcg|YQj?X5Q}_+DQ~%$(r|Eg6!3J+Cg7f?)oPX*JyVQ^Gu6YH4J**_n zap?VMpW=BMvdwzmM8^!%6Akg*VW@0*yx$2qrf#N}l9L6-#hA3~iEWj@K3C|VWHxlp^>R+61VD|_c$Efg*8=+y6= z(2bS{>^m^BY08jKfuzwYi(gWQm1K?*yteaTUfFa(BrN`}^WMt=lC7E1VhFVQzfL)C z{I{EMr}XZRuwAtLc{76kJ5-^(>utiFq+yTnjW0Y_d43(4{A(RF=UN9{=?!~oZa)4NfsNa32B;Jlf6Bbt-n57Zr#eblsKKS z@RRaUE_Oye2tuox&XF)T_#HIb_;pZak8HGKeMh5ACN7ZqLjS7GoK6{+&lUX^)|jWu z++rL~pfzynec&NfDfQ|fZD>YG2}I5}hawvsp%v@sVhhnf zb6Z2%B(4@P9&g+a_L(H4w^_cuIh&6QJYnPWL5%!1x&9;kQEI`Rx3B(Jz=r3|E`9%w zy86cbiTP|D^_^x4+?)O6c$#U^l#8f2yJF7$q)FlZiS#%7*oh#NFHA)2!TVELZllGOpGlR-&*-kgNmkbB<(Ly18n-Z3aR{ z8GhKqXEPqdl-x$?`Lbx^zwWx=nU4tcj!FJfc8_f8PcbX#(+7PjWj zTFwxsRjG&)#<26R=D(aVe+dGv=9ibUulH=~k(81pk|AS(*eFxSe}qpm7TSd|T)v#j zZk@$XBMv+l8|6R`9uD_mDqzwjYs}D{TXHqObXvzpf)3C48+6vZyqJACQRKxjEZ)VKe>1;)bDGIi zLa7o}=KGO`hsF}wu5WtndQ$LF$Et&jBpBa2(q<`eXO3c4783&4^DK8BH8s7C4>`5* zT*Z|K1*DR+I0s$L*XzPpsXYMG#fDA_z_vdg? zZbm)tubwISJWo7v?Q#2`JjSu_agR9A=2;h5l5iwka45#7OKdGwaGB2Ps8{2T?-Sm& zhs{szX2@0-RgtFxCRO}1%$2H{x98omUcRgQciR#@Fxy7xOw`(j*h@F!=xdz`izsEW zoo&AFF*`O8uA7iax9aba%MRs;OUKhurJ&N1fMVimLr-z)wk(TL9m9I#au0-8zv1CD zrIc-s*Zll4eBZl2qb}pnJJIX-`piyj{+4Fnnq=X}qS2MZ(Hx!}iHhYQqnfA3Pt;xWwDS8?f|y3j>3xZlh+_mt1oU3|w6 z;ChjH+XJipx4-gZ?Y*1l7n_9F*u2Js%DB~GRZK=a4CixJsm7bpQgSw?4}DsiZKm@^I^T z6Zp_&lH0CIPywKAL`~Y=@>PF+4iWgQ$kVl8T%!yZex&M4@3mYVWHB?>k~#I9a?73W z=ruG=Z1#{x6Y0<*|1Ef&&k5H=fV8>ut7;>~t7r-Eh%{m_P$1o6wW}Z@dATGhpM%0^ zZGp@L^LdtZqOa|Bo(QM6T5NizAoCrKut)};zuW2LKbF;ZD&J$;G<;6-Sr)rR?V?Ft z|K>S$C|%mkxhl&ayujYgD1X-T1IguES#;bM5=k5CYu9EtYbv z9cZP_)-BDf<8q|!f}Hm3&v!`#;7tZ8~*BeawqEW6p{c3q-CfYW|3Ot{i zZt?PdP4{aS{5c!X#Cmp$&u=^)l00*jC10ZJ(^04M#}MAzTpA$Cg{~0Q&<8;U`<=Qv z2bzTL^o>f-ZAU8cU{6CidGqP?BuIyCRgv3P0;;O|T4gG#z57kOWlpWvr@QFu*YX~p z%K~x5XD1y_(yLNwZt@CJ-wELcdGBe4H3~8-um|9GC~gwqqpaBlC7F}ARMvp%nD}!CwSArRnFJu0 zqG|Q*gtu3QbCv3jCbHL2jijtDQoV7)Pc`&0_rxEqh7&nS@4vcQn{a3cihAOjOzu4N=?fP}2E<2LG3M>nc^dJB;jZm{?YTto~r+KkI;{lJL zmgHHgs845Ya%2SU9`7XP;4P;kZQBK>^w2Hs6wV`{kcp^BmBhDuu`|qdJsr0>w--5O z{ptqaGc6J;B-Qp*h+veWOLN>SOAiphd$xwx*4ne#U+eZOIX4t%8rTJ76x;}R^DoWC z@Q4_@!XUOE$yoQv)BOvwCUbWr;w<~6#`Bz4`y?Jv^;uMQYZ~N@o-p1h#+_1Uk|(P# z>Gy62U*{a*tWKI4my*RB33{ALrTep;K@sXs>2&mR{VG$H)!DHiu-kJhR;P2GWBrVD1M47u_=+yr4ijqQbJw(G6^m?0rTOChi^#u$ zhF{h$NePT9!w%>HdZrN~Wo!!Agb+wyw3v7wc@i1RmX7S0U-oPLpUIT`gm%eP>i%|O zN1v65w?4{3W6X2$V+0#db#a_(vlw~eK6z2s8Y`mgk7Va^={?3w{^;aTqE8P`_M&Y* z21AGDJ+hB8hw9z+URF184E`$^aug>Wp}s7YDLy2$x&HQERcyQUByy`Zcv2rs5F6-H zE15eyMsh-wx!u_5;%p=IO!H_kOI)s4R6)&GtzD3Ir8tQ<@7EdTkeUVOSu{Q>3(fyj z$6CDxjcf z02EC1`d;ZA{e0m9$!Emjv7s(F1$w`Dy0&}kys|Y()UwcodIx-bxd4$<-)EMHdinsI zeX`u_odnWA%r7`OL@3!S02f+cLQW6ImNQv?;XjIt?m>#OqcBYWEGn;0`>Yh-29BYq zOm%1$99YPMunDjul_I`cdZ8~C=E>F#1|9UP{x3_k2Z!@Ar2J9^gzv0~n# z>`t~)agS#nJ|0O8xgp-JRjZlVR0b)g8r@z_ABsmC`nJjQ@6|5{8r$qNa|)c;*>$x@ zN{S{rtvit4g)Bep$-0Re*tR^ncjDVcf$icoXCk&p5KMu`@Nl|Eo3gG|LDzz@61a$# z{T@uYH)7Xwt3&Vdq(~M~ZlcFs_jQ;)p&kk4U=fLy<<=f{#LsfE3m6_V-s8#Oq*^)< z`K@8q8-j)Q_?M6YmnzO0J#f}se(KawnW`Uywd?|`!?584aqlu|fTU+9lfC<72n$Ju9l{c$=^ z3EsR?f#$~s#GDTJ;a;r8-pPF;Rj`Ly`NOL@-8kgwJIBQ&wY{+YZadn1zAQ}GNJ2z% zzec-N3~A;^MwGrPXeQ7>fBGa&`Z`69aBWx!^z(CzyMWNogp|=9y%Zg|K6N!ypJlFl zjILW>`?<*OQ(g6fdwHYu&w>MfLR%s8$yLYY4=gFxDYY~?QgR8aVTCcS<0=Xr9GWt) zdf0KV;?DbFoz(e?5~fIlz|7x=I^sLKo-KALjGGkaV299M+LYr;GZP7F+-8w)``(KU zU)3ZWK#BmEC4_rNoxc$JhrIj+wJ@irt76&YS1yyQN8GkUBuv#L;410;Xv{xEw0Qng z4*n;pz0g9Ei#3apSMImrP{BjGy(jnb=f^n16(R*}C3v4ii!lj$r6xU%i6tqej~#Qf z->W_m1zB0Lt_~aKbfWr8K3e~f9_hEW`Qmr{Fc9%9?ha9k1!D7~fiODSw7)6IbnF}B zn(+;D74dmWE8*AG4}o@4JjjvLhs{+ppgz*~H0Sj#xYDa5Uf*rs`zHEaN{X@;`t$<> zu2UmxP{Ch6f38pFoa}6uc_SQ#a~61C3V)*@hko>~t3~xQetW?`^ZZD4vs*lrv;k#yVzhh6!^C%!7`oqLRadGu7h0-pkqhnw}l!h#Z*v7{+6S`numV-5lsm zN5lVuO*V>TCau8(8Lwb^ym-5 zNb9)2f_L`aEDO+kPsoTegeqbp+Mj2U6iL6cN}nx>NH^{4u7!_!xI_W+7ekG#pTRei z+m-1%(`0@uUB0$UOE@aMf3?QVS~D7-B0tcRVl(FMP8S_*V8?Pb)9po~d>m;J!e{f< zZFpqhF}OQi_~#396intM&sgGzI|jc%$^E|$@v=}TJuKtq}6(Df3tNYR&yFL)`p@&=KXpi9$LE&f#>KFfL$ zhzVucyZA-~diH>BI05N187I0XA0QmM&T6Xh^)i8e3O7HPQhULVkP&p=j3oZ z3CjZd8^>JTy3a8Q{Y7lpzEW8&S!m|bH<*x}?iit`gPFE9&zq(5q*7}prcj0=9?E=& z0Hi4Q{Jc>j(3igR*=~0Gzo|&p=+24aNpT?M7$8yq5_*sYGcQxHnDIvCwMq~*iuZhZ zu`O@$9B4=W(s?wTJaCvR9+7Nr0S+}anARD0@tdDNT_lY56OF+3(*@Ij+R{F$iWrwN zM^|>qnb#?e4re}CBVj~V+e4c6$GgZZYLorIIlxt)2Q@P^t-ool=vdSo(^-?sKl&y{ zvpGsVrfW?K2-*2GwR2{M8~FTvv4pZ2uCux_%z!}%C*xK=ZgS#>VO0r-_m`%Rtz#5!xqo-kJp@>a3X>J^rJ&aNV>^QZs{%=#SX zJ{|eftuUDG%2rED0%=-hsgmPRL zO5K;Xsp(XS6MaaWL^k70%{Cpd4g=Llh$I=X3f<32vJuyFD=T=ksr;SGvZhDWE}5IS zh4&THeLO0Qi4BDN`69sr`bLwv&URxqg1jdIbkP{g^o`Sfl|ZWxMe2O?MiQsMjH0HH z>X}CFV6YS{VJRC9HL#O!hmh-&FkGUq#x+;H5XG@(^SjM;4j?mAUs!q1!*T+a1mpEY zd?$$VI*A*)s`RhA8L2F{>_L7d9FWga2vBSd3*COawITr{hM%Ad=!b1oS(}!^z6z$a zM8kW&Gvx<`H|v0Y$93l2XQ8~#I3H1l`}xqB&ORNJB)T*X93xpN*4zrq&$?tD)BKM0 zA5}GjaPhqZCRJ)tv={`w|1F(S+)t|42F2651Gdn3UTD2wqEc!%sfd-l#UOL@2jY=Tk9r zg+@^Mh-Z#}Cif<4KBa@l;7H9^qLY<2^@#Jfz$3?LB5*W!cW<;y8trS=8u#i24%9GCK zHz19s{Ch9uw&dB1k2Bm^#|N3j<9$N5Tly`(IA9h*p8k^SJubD$sU~AO1Lf(vbw5`7 z+eT*oj2~gej#PtSVAF`8<<@VU+^?JBPf^jU3jXz<`7xcUQzg`##Bj zX*Zuu`n3ah`=~Ux?(M$yHn9_Z?H~Q5hOK`>#ZqcOtXOlrL4nCFw&tfD_;iA+muxEVPPx{4t zh0%CbT1(uEja`OL zlNhP!{2Br21=VQtVe>fl1>#n6DFDhWQrJ9@^A`X~SYWHZAO8zOdn15denL&&mQ1&R zrg`25!y$ntCaM4J>psvxg3yChK2}P0@~j#n)Pc6iMF-;&2?);KIzkP2C5z$?IzYPY z(6ltHZ^$T(t!GxXt6w}z9ZA;7?wC|q9qkDj#w2@%aVmH~0kTzeM^8*f=|8&)dFuU5 zvJ8X#Tuyr$%j%;PI4hJOjw?5rU}Ku@Fp}iSSAZj5 zG6{tUFKSGt@>#X}CdC0dYflA7Lvd7KOvu&BhJkTj zp2<(QI4`J*vArZ_L34b&bNWV^1W>9@|4es_sZU)awnv3NU&)w6X;~R~Ux5wBD*dJ8 zm9Tlouxj8~kN1qG?+Jf=DdX=Zkj(ONyjj z7-54#Pr$Y$iS8WJSZ=Am>o>ltha|fA`0g-OA}v_QB<;wl{L#J`ZO5?M{<6vK7wA&~ zH@BfzZ0}LW_8aD>NN<~Ay0Wt5ugM!~HNGGB)T9@rf5*)Y=9ns4p%;1!zy^rFzrTCv zPVneX6u*l~!F{<`t5xYcYdZgGs2hG6Z^)4_e3C*>tjRDF@y!h!5|Ke%M#U}#d;@2r zkk0C1O=`$YoC=Ujv-ssNWY$Ae=10H%^R6yi$1hMDqlSI>4HhvL0A=e^wO*jFDTd zFbo!WW?eo?1wTG;ZI!tn>v5FWCEXM}M6TA|o_HAFz4Aao_&QLT-)&Vvy9vW+veL`u zcA#z2pa3rbrf{IMLTbveBGHT~v{@q$0=Cdq9i-AV-#Bq9x=k$t<_kwPAwRPsK0J36 zEvg0W_j~UJkMHG;`uzQ^vAsLH0WQe|KHM5i65=hPPPQ$~?$#422h%^p1$+PQ1Y1ig zFywaqBm|4Hv*)ZQXspWk02uwiT@PjL#>~I*0ok$K^8)*j@0}ZJ^eSRmqBTXx#36}` zG9E|p5P~hZJjsHNPv)%MO|w1tX?cfsOOH)cu9g?bstJXYQh53RU3!B~! z2cgE@R)OzV^CYDJ4NBi=tSN%u`IkQo>neAY+2WN0H-@ns8Jb?K zP`Y*0pCI8YPRewf%}#oL83muM8Xw&iy( znL!@7C+a>~OpmN*I%edvpYutma6k$tC|n$FVEXFvp$RJFmcJN|IwFmiYe z&v;dqdu}D|E zbX7g~?hj(}3^O&fnsWRu?%jv=F$R>q>R){Lq`1G!kIe8Tn?LOni|&yRE?3>9@0tud z>aKz3uu*jn2zLA$N=4bhj<_s}XrfH;#U`Ca* z!n;=h*#AeveSWjyIBp-eRjsyK#BP<;huV9qqH3h2rE1qMF=I2BG=dtnaa zriL=Afu_5b3OdRDrg+x>v}N>mau1|Fz?c$_-u+Vr{1+~5lTxv3Y07i=l>LOxvr%xV z05BGqPBxIlHkDN?6m#`7vE6WGk{Bv~cKEr`@A1ySLD|0LEhfy;LCZ(SoX%x}8^a(a z(A|3|yRF;geoXTW-m}Zrx?~cU?gfxsAdF@`)OC#qQ4` zS!uyW+fNYNAEwWFIa|^j{M3w1UA}$RHx7n;e~Fg)^|e5; zi)5pC)%}${&^3lF>y>|sQfm+W zRL1K5Q`*@kPAnj8h(vGa`zHM@L^{CIp2W`b{o_G?NYU5^yK3_!(6nn7n7u|f%a#CBh~GGFwD#B`n?ge+~qNVgkQd8a?SM9nyZp8)^c-=H%QxqXtpCy zW4tg$K>G~#(JGs;cXc70oW3e4fCr!tsOj@Sk2hfV^RhD z=2wpH_VZxB#-&@fN?&vnnEx@Q6=zQmLjueYhpcZdbNJX|#BYjV<0QEv(^p-HlTdJp z@Jmv6uNO7$zr24lxx1A)2E2z6{>9EF(NFz?n{b0H_d_tE<*S7%{E#h~o>;*{`!u99 z-Z#vanbYWvoQ}s2sn#UhIgT&wG;U$D_-GT+tgx_}0ER5c)HYz~J<9`*SNsl$V}t{_ zi)wbikd2Ey6%#b7BR5We;>fV#lbMS{Ntoecx`Du4NmkY;6FOC}M@fTZW}Tm<+;(Km z0#E%?-pk+N4Z}j`_4DprNr|NIqgvQvo@uZeAkvo6xv?-@J|}p#Y1DJ)ac`L53Pg2w zA|T}ezX?A!uX%hN#rOXHM&Q-r>ba2p)$eFJkVq_L{xao6K;qP_)bcK=?FPUSiM)(3 zzs?KHalsRd^>waS(f!_^y>D@&((8 z;zpPqyF6zKx1*(xWk5%KpAp+_@0D%1=&%r}4eI84SLxU$A=>>ZzYVWk>x*#AfX3pw z@PFJ3ihhX5b&r&!KRojC>o*1Qx@fH2`gaCDqgGmhGk0g~Q#l7{0Q{ivqas8jqD;z!{G-Wk-Bojg8Y{3xMO}UR%Y;DKB@rL!C2iBo{O& zU@(;?VHN+%j=vHn!WLNY@VG2`P=FAB#He8&0W|dESe_PX-Y4CBY{oWON~&SanbljW zx|!+I`kUX6n2htT?>?9|+kRu>A)EMigrPX5UGVdIdSoOwcJ?TeeaKl`)1MlANq@*G z{-ygjWk8JtCaj`05x7m0iA%2ay#9|LT3-J3Y6zvZq5R1nrLh5RmBPtRhGIO{6-i)E zAKP2~eP#7-eUE6A-_QV&QfxF<-+1HD+ge}Ct8I{N#NK}YPl+oCzlp8yj_W;)d~%k! zEy2H#gtWBm)|VLZJrk~%()jCznAdY05rUmWEM^`-5VxLNdwm5eauAZsh%o@Un?nK% zOjgUaM~k6(Y8{Kfo8O=fFNk{;3U3n-@`d2`n{$QCzT01ht$D1YG;3|=3#AIz>-5K& zqiwYm&)H2huE*}K$Gm1r&FZ{Uy6NP~poFRHBM6PUchj)xLNF!A95wuC*zCMI8sTw> zygE8Gzuh+9k)&iv@I*uAYapGw;`yAFe`efI|Iwp-jR1)>TS2c zii{Ria&0NGh3^rOF~Rhgi*l^?By z@1(CMrIXI-zXBA|!$vp4eVgcMuAE%MEY~G0Wi}6WT>SJaBgzuih@Y>nL-7pR7|RrK zEajBw!)W;UjPpz~ZsXZ52CiPw+Zk`B^r2k}c@pK%p~3F?0px#HYHk|Gkpxk&I0WHo zqy6P?aR?>w|Hv7zi~r(ix-iyK{C^3Ock3p*e|B$!_-L&8(S-Kg!hxSL?JA$gMx5a8 zcbAgg1d#8Un_sM0k$K>rmN~i)>2OqgV>Gkt1JpCm7w}qcw~O|>o5gUY>vgZXQqywQL>w_Y zK98yGH}vjm)de1Sz1af$Ub@n-=-hH%090@om3fbF=y#R;_(qPqA1BwG<`nriqu9}8 zlLZZY@vnrMzYF5W!xhC5t6L}O)zouj zeIQ?EP%RT(WWB9``o3s#leUW?E(yyVZ+2EDuiEix;Pix82|iV4Y&Ykv>qcG2Ot7pO-;pL?q$c0ACO{${&8*N$ z@lEsZKOGo$&dG}9|15y`Y@BMEvS(5jMH_Y;xZX}v6#WcjNhhD+o}C~f!)GJLXCuMa ztKhpMmOX#^$b7VW>vAmcwt(hB6A&4XM#Wdc&Ps2PG!2?ERPopfpae=g3-cYQU9yP_ zSW6-mmbu1sUqSi453(P*hG$WJ-j>(Sx@g_i`D$w&ePoElCPqh?0bJrmx2~Xk;aT-G z;iDdWy_$f8cs&YJuR)E}uQ90!9wL0C{(3`Yhzo9|?H9dr#q(jBGGy_huCi@<|KeF5 z><+0=4CP+07up&Et>2gx#`ybAKrtbl^2I8Zr+RN4-G_}=-1SJ^lFk1T&fMRWj&R=L z`+*JZH;2GNSDws-8ZqC?VROaeU=KY~|75T=SGL^h<)}j7ncgp$ZoPz+C||#m-sopU z$ekz}h~`KT5pLQyJzm&1Z;R28`Pn8fkv`+w!{D1JGO^!Ik&u;?F*Imo*Y?63z!=XR zEuFVH#KegvPUsZz9n>3cjkND9N*m@x5--- z-^5^f-|-NRY0qrJ^iGr$f>i5KTe9Np8kM-6FtQir&cZ7p zG9Le*br_=lMf1f<~z#3f7SXEN6$8l?+yx9@W_I;FHMA38(XIyhN( z=Eq=Oh^K*#W4@yvejK<16-xH$Lr%92qg$tE)tbgY!;9@6NQRxlzEONyPe#I~VzTah ztqqhukcoUmx6TyhJkrajnuT@ETyqgJE~l&&+n0MDDBV_9c}Ix!LS5+Ew_iK{O)rMvN^E)Zp!IYVnxw9{hUun1 zu2~b%zSDhQDJAyEN31l5&DOOtV4J_e{YBanS}ea+#jx6EtUkwe z^2V|JPaqTSuWTx@P_KBuK3^sB^{*uPp7p^<_NMh-DBt3!7T<6ae&k;X(;4gh=U`#0 zJhNI-w}mJ`njW%kshFv{?X28Ip+6o>W&L6FQ>A_>%YrK)r>#6C_P^~Vn=*OZuG&hy z1k*l)-%j4|R{WRu&7jmYsT&hRBTp)QdRGko9;9qf8vK0}YdSia_ER4u@dL6zmIUDT z)kI_%IN&(NwA+gT|5aDlbq~R3L=Ly2Tmr0wA|MNO;)G};{!*>na@e+E$Xj^0Bj1vM zgEG<Qjj;sJE-@G@6?#ivsZ?0|H_#8={g$S;&O*ST1-s;l%%sSWRU$h-Vp^S|b zrRUAFZ51x8DjNONv}PkLb5@=t;Kw z>CN=Vx`rRv+uVc9C&eyKZRoefzn?^vqMuZvl22FcyfW9*b)En-Z@K6L%rlfzg9skr zvx!LI)UQlkGuf#ocCy4-Fjn6L(up&qWJNSsg`fL?&G>Dt(G6q~+;F(w_w;a&9WfZjvh|2I*Y4#h|pOBDQwUSLCW1ioyl@%TqI98ZJu(zQ+MU>Z%t&Fu(7lW z6Tm(7yZXU`h-}$X-uGyCB?iZx zxFvLpDK95Iaqa#ZGAC_9`>O<9wA!)*=qR+MKjhDV0J61rQ?+UQQROux{YjaV=X2IW zayr*zWd^8Umvn3*k)OO(do{D&s}NY7<17D16!6hk+~;s%dE=SJby}CXLxxLI-=YjO z*X2>$#R_|JTRt0R+mT1u#-sji!ALhaC=(Nk^YD6U{sv&?R8t)0eS7nZn(z94K1t`s2q zKanhhsy~V9qBuDr~!Atx?lFVeQYCpD99eAyCB7@#)mP*{ir zSLZM38R$N54vL&08M@D8PC<0|2DZV?UmauM-(6EpIsS2MA)LK?(W`j)hlsWTA+6@6Mbhsx2~uH2<|?!n3+cCVz`~z^ zT5@o5Fh5=3Q00+B@jyEWg%*kaCOM_x?10vCdO z&o8&!xb|Ta9&9_fB>6&@k`22f;4uyGi=?pgazW5?4Yn|q z_3wXh8CYnD9Fre_*S*TA#PPXlBm@^`x-*_=>0+mE2d0zA?L>Y5I+{)Gn<6tTEg>cT z)lnvT4MSDMA@z@MDwWXQ*2R%qyJ;|P@6p6^Uh6z_N04<_)rY`^pD#7*2i(`fbH>P~ zGr>Qk+V*thG6x=V?zQ7ZV|#t%@oOP+Svd43rF{Lfk5<{5L^p{iG_Me=CN!`~V4?zp z##Z-S!}fr5i6RQ*mmGFVT?RRqf@%5j07G;w8_i;4kNIO^*~fPeif21=GX1wFFIl*mrD|x(K7H&PX-(rP zPYgG|^-{Z95kb@|U2m89z43QzqbeErS88pC-t-bnfb9L7?+ZLn2fVYq_jUUGQlE@M zJoD%5$kg^5%#!D;%(nV3xDfz7i#TQ1!O>~fMXMsUisf8)znGNptf+f7dGzEz`zgMM zlR2JW`g{`tWbd0~4k>U1({(q_OaM?64wqW{9sF#hq6lJ6Wkdp#>c`KJLO96-4aUhq zdJx6K+Hu;|(G3hKXHGb9o7{d!;AIo*^%}y-74!Gg>-aB8=96&>Sr9+mgzIY0I%h* zY`8EGJu)7o`VK?NBfh#dh`*1%z+cDZ|9>X)>;y%TN4~% z^@6?QRL;>WJ9glQNIq===(8Q(=N&~UH;u_?+^vnpd-}SMd2&nG(C|QAdJV1XPMeW8 z-LB^*bbedkr1~m@CyBV&knSP1h5iy!?|jK2C;h)yiz%+dr#%Gl8`^`QCgPR&c40`J(J1TkQx_Yl0sauL z9I!w!@jq?r$+QmP2V(E0YgE&x`Uhrd_)Ngs>0WDp>g#Y2gF%>nNTs4>RgvKUKq%u; zh;Z7vv4YldsHjuKF5=3dz96JrI9%-b?moPbt^WaxADXT{637ykHMSsv1 zK^mZ9KjG-$T$KgUFLJ@rU;f>*zm6pHE##*1t`xK1qIlKE#;47|0r{z0u3C0!V4$@-idj%9F@y@ zjuz`9bTdo*kMCHRl}#Dy&R=lH2Xm&<2;^igaM@#0Xcj2M7O*E1$O8-GwP}x5oza1} z6rGH0VDZ5w(-KACb0t!C1?dWQXYx`Zi>Ity!vW;#kk@w^&X~Q9APsY)_A3ev%wBA! zK#w#JT^8MV9d#bELrrc2f+Myozade)(c}#n2(E0-U|v`tM|C_G%)TBPD4?H6s5Gyo z94G(G1yu*)`jKQwT<)DSy0vw1ZIb(*w3+i;#7(FmB2AyDHS_5 zfgEja4P;6Y4_X|DX0)=Ynh%p4_;c*8&v;CrMM_%^h}-GPAvT|)3J>>2*013c+)4*3 z7GMp1i%N6J*`xM)qC=H**hCTwDR!@4B8VaNWGW05zAU$!kJ4AzOp`x)QLH5>6cML_ zlnkv9#rd_d8N#iYLr8=U8GW!(--_|#XJ;n?d&wMwBkj5Ls5loAlBd|sIw56(nlVbJ zWfG3fpk7)AcmRV&@kDU-CJ$7vqok0MM_=FP?=3az$xv5olksxoZ*Sqx<^xugTRASp zA#6v#nohmDsDR=o_5B8CGMrZ0G4!A*%&F_Jy?R^HcPEm8r&Zkj>? z)WSPr6qhpRrEJXledQWL;2$a8%7iRV$(3c|HOC;4V%01SAO1MAz?ZR14DfV)x;+; zbks8Y1Ws>W=w@5x8QYNX(Fr#< za36eL2K<60oP2Al+$h8V4=&wQAsIi zFVuOxgkN}yqF#$4IN4Nf^aB)c{Y(r{OZq#Vyi)YgmU_$McmL?A7NA`3=4p*$k&vXS zq93L8mnq1rJ6<4O&+2WULx>_5Ym0~j)sLk;VdBr?RsNE8qPhC#c}ipkP9(Gp^`@WT z#NdiS#!u1??S363Z;c0XqDJ?AIAOG{@h^b>9TGv63LIxoSr@TC65TK9u(?suZow6P zyl&9E%q`FINZNgKjc-(~UGZ!(KtxNP&%XX`i1Wnbgj0U6Ym`bAf<{PufU8go66Eeh zwYI()1FAgeA+DvuNcA;xX>g%UDMfo>^8l00u9VksvyIKU@(Ulb3|`2w19N;|8f-)V z8SsWGZ2ZS{Tjaq#C0yb$^#cXadHn6T!C?WA;NCzv;QjYJi1y_sK(LLKA4f6r(22H6 zf!eb(ujP}^P(El)HYiChkP*)KG9b9)y*Z#?Iq-KW?KRP&yCCOx=ax^sfnOW}8Ub~3 z;2n*Yi$8|cKIe|NNgA91WtTv!xa_()kK{>HjpjVD+P`BQ#|bWF%*jJh7kG^bNn)7* zmB!0wUNKEU=9ZMo0Xc&0y)KI%wlsQ4`wBAsMShCV;!<1cB}JEhaJ62@|&)7)QsJQKpI+IMd)dBOvwdKw`ov_l;s-MUH1y_@y6fAPKluSLaSM27j! z=^xIrq#+cxpFe7do%_Q*;ptk@veG7O31v9|R`?|?ke}2TZ`t;tc{9Ab(&Ufgfu#Sa zYgEWTI>Qo#+t-`VVR|k7j_7E$*iWC*-~r#dI{%2i3q#AObjAfGkYe%>om~%P94TCa zEjz^Cl4`~5+dR|lJxU{F{OR0{DM!KI`2F~6(b!e^psed zQP4{Yv#uO8a2Jmv%6XvvQ>7NH599T1?ty*a;uE(e7@ZaR*Bq?gxIc2MYR6RV% zBRM`2XM7v{;F`~Ft%w^}HSSPT@?y^8gpl)D_nFJ>?Cz$Y3RA_;*Q|e2@g!GtK8=HW z>{_4_=muVMW{4OM9_9F1Z{TEj#s>alV9uwWuA|NqylL*$w!?Mh7Hct{Z={+CT~l$L z%&b~ZW)nWIy85Br3(%j-zUDWxRjb9g!z=F1on=}m3#-^U!OItXt@#N-BMX&a9PbZa zCDSVN!|7e#$F-&iP!uT_d_*sYt)koiAz_Mn^7t%*l{P#o-AQ}WDVjY;#Qf%YN>p3g z>$#fxYQDn)p<~jyCYy)_9hW~p1wL_S*Yl~aJ{z$T_afnzk^!oWR|v5m(9s7cdwuFj zd-u{A)mKm+MR*(Nx5w!sE1t?KMMxLaMxh+2JLIf-o7~n+spKLFse+H37A5{wo+y2T z$bs!^gs_?uQ#+U(R&SJc7fj&kjkOywJ*W|-4*bU8PMFuQ3I$_yvNmVi8ENzsW9cD= zJd#04y9Y0e4z{{B?;;%zcj0sOCyuefl`sk0b#;neCj*u@Mhh|5Gf71{R2C>*R$D!6L@Kgzz?-y)f|ZXO@i_i${;@f!+ktgiEOY6Om{l4`9Ec&^F?H{# zAy&B~kfeCXtJ|~^FB7J&rop&&x)UgSFKVtJ z;ra^&a*;x~c?-=nt7GJk8e1y*rqJVsST(zgPMf+Jd>}+64h`kktFC;}X0somwUoXs@v(nAGuDRwrj?jkREj$S;o#`75 z_KHP(Z#Mkc_OFiGz?Zx-=Xoxj$o?5q@{n#p1yVbEFa6~WF?$&g22R8}z}4m( z+1N(v2l7W5rGqJMLm*eDcVYpx%xzTjUA^el=rAfnVJ-_-?^okJZn;)O_poH-B+7IV zH4XWdBg&HanQaW2OxF885K+%1^KV`bAA>`DZ1?m0wr}L%2fuS8Dhc)u;pUGYlBMNH zlPdEP^CKA_Q~cI&5KSqx9~~QO=zxc;(~v*3UywMD6Ua=9ls3kuUXz8Aq)I=joqlpp zPMTde&1y|p`cj_zh$V3gpm^7lo>pq#zz&Cx-|LORWG3Tyn-`d2#SM zJ1qd0j;wBcE#^tiK;qV1#a{$E`7y2`v0paVB_oL=tHiyF;vQwhmua}u%Dl;C_r+{>PqY}%7-$ieuR zjYZrHHOR*S#o-k4)ss<~DhwNgp1}7pj>I;>YB14w1*?fVxAh}x|38lX_4{^D)1i}3 z{d2sfv;J<=&#^-%DqHRme$eeD(xRwyZWuXm)+%b=3qLA9aHcJdTgCJGweCg3BH5|m z+;p9j9T0{KzztW9+di@Y&ZYQ#Z&oaH7IN;*4ai*A!Q;XXV}py!ke52A@DA zEca)Pnmesf?WD_cA(QmuI>9nZbl z(YUQD_3zq=E>{(qU4ehnELOoD@Zin@DQ32H01{rKxtVahKGma$tp#p= z7y{O0TEP|S_ny_O8`P7AU)v8eXpJZP4lc9c=}sGD{+gu8M2+s{xljkbx?|)gf`aMH zJ>uL{i|}fv-0fgG*In^rV{sCH)R;Dv%*vMqRhkPP$(t_A>nzo}ik*A=d!OZI*?%;f zm<(0r{O+cem>O8>ie4vSaM76)b389i{ z5%Hz7ohvD`OIvZ&h*HzcF&C+f-2>#b!h)EcNXWMp?)_M!cJ>#Z3nI?fC6O0rCwZ4s zvDRNu5-mz9<<1l882+P1nSoiE;#t4)`7}6GD{zbY!xBkD3=u|}GepOTNG@0RB7sblX zdXx?|qJD7ai9Vs$GbPh_`pUV;(?ndp;ych4FBm9}nc;BXP#Dx#fIeZ5DvmxhFpU-g z7buAjl#Oig#t`tbCZt{|qUQ)s)9#;wuQ(Mi*}kbU@R+%77w;~RSv_*q0RiMNc~aH? z^@B3U6N_|*5<;e>cKn*Sv#+OvU0Od~Q)leOnC1vEKM?3&!E~!$>1>fm%^py3;-Ydf z@GOinsHuM3&}n(+JJIsyeMV|0>U**RVQbyJ;cTNi-*I8QEeU;~=kT(vH_Iq}na`T6 zH3Wv`=bYxdXdZg!g(N?|VSEf{s=tYkD;YgU>_aZ%>KGA-$;E?J9wFSb($?E0Pn5Lz z;rNC5g5rFU;wpZ^-02-B5h13(ujau=AdFp?Ab4J)i%Yq^r;DATE@+QGm&RhOPABKA zguGE@_2P#-7jJj5bHFyr5iLj#$o+LgpZBp6>LIPqXz&N<<%HvyoOl;#2;R6qdl7Ms zT&WnEG{j70t=U~`4i1_T%_4FBj z1;#i>-XO>9)j!dT?b&u}m9*CvN_}Gqpnny2UEk zfQ`u3t$CCb@P_7H&ixEllgL=h3kDc=F1&*`?C+;T#*wixDPUz*>$oU$X1D=Odt~{jif1c=V9I#xKPy zw~EM2nOUJ&QbCr5ho8|gPrZs?TIex)lDJOV@@2j~iOphSLi7h2(OJ&P#XpC=09^YCqvlK z7k>NioYx$L>spPb>w$lpl|`p!u*08+>YUh(_Q{GK%BCbVeGKeJ)|X+0<%C>?;%*$n zXF^}FR{)itYe6nmLqj3Xf$&KtYoS2kP;+r&O!@;y>Wo%u)T4Bb1%#P(P)Nh|K#{KX znSB2CCZWYQlD&^=*%Lg>tq4)A}E?)(-EEB@b?$K zSQYFm{VWRw(%OEv6;<@mFwtPN^H!*8-@wdfn8DVtCn}EWH(Ze=dY~k$8b#!cmZc|K z^&E5LS(>IYo6J&QxbGcD5zt6ztYnalcL1pZ#{06I(Y0NKcu^cXCS>!o@9~F3HO59_ zU**e!R{Mh0SNh9HWb=fYbsW1QDUyL)$Qp<|8bqjz1D zH$IR|g62t&m1yW;3(d<54%+KwH#@$|<8+`{& z{;d=Dj6+nkCaBG7hA;t45$4;S9yw2S0IdV_*%zLEZJor}@tY^PamY@kd^>|6E>q5Hc;+w;ntdP}geK2es z`tcShyVvhP;IjcJH5p{xrcGRZnbW%^}Dm@yN zjbByvf2@C`aGA_IGCex%12IoCId@o)$W5fM8-7tHz=+OM*#599R+r*RnpaNG+^&D= z=^R>RrJ?Y}{g%9&&YK?{y8n5(uD8ej53D`#19e#8u`V9dv$WJ)k6j6D_j_(YX3UqZfmkhRO`FBRB?l#HAI?tGfD^SCulr^Y zMv=z$JwUFi>F@G_sq7noi98WtfIBfe^TF;QwG!P#Q#0Kzx+Da65A1nD%kHzVvG5Re z5+?*NgBq`IBFcwIeR9|{L$d;WG2CJVoSK2}0;y1=OnA7ieRG%)h_l;6FgpWd65s6! zf@fgXf{-J=K00V19^b_+W)dzxi`|?40^%$vcQD6vOYtqZqPk@KAmy~!DbUdPcf_*O zkOa~-pVWtLb|{0iIprWdVX$9{pvAhqx!at~**1_nLfj=ciyctm^lHb9b3miaK&%z# zaZjt&1uuz^9h_&bDp+ zhC_BDT(r8yU{(UK$9<#C>p7@m3#D1xjCIQss9n#?cShx{r)76h#_(7FCY_F`d>$ej zy%r|8603!N2WNB5>{cN?ocj-EO=0ae`-c5eAv>(<*PVS$J!{JXuue5y@;`)FvwOwO z=-$uhJ-Qr~eL3XJfqIbs>jFHTS#*QmHUQ6Kx}UMt2p4!RqnX5U5GT{rUP-~QCjeR8|i zr^-33Z>XRQJDUuwIB}N-U7hr({+C_mTH7y^ZX<@ut2A5C;ke#j7h07W@LgbS)@!ui zVEUnrAAicFFDFpb|TKwc5zzAGfdAtz~DU^1kmd zx(oX)4|`+$n+PF(aPs_@*>vY2?O}7p40(55)ts}+do_>kMcdsHU4sZ%KHs*QqJuY|_+dAzUh$I}002=yuD?vS@#kPJMC){C_=p<~l_G7?xS_hP=d%#^ z>YPGditNX0s?iuXx`J4c!sxxVhd1hm6SS8nJf)9cD^>V6O{;0agd$`yT`h>j|Nz2+Df zn@vDhRcf19zcD`X?BFT&+kz_$O_@EIkFOso47lDCP|Ooj4WSLeByNz8bCa^24aUh= z7YLeoe&L@mvJNlhVE>*a$q!5qQo+2W{aY&}dA#rtFp#fZLf339Y-_49T00>j^9%@E zVUGhLCU~I5^dQr1neY%LwzIwaI$k7eq|!uYAUQu}4++xCy~W&eTp^ea`7TZIr&-~i zzuz}RWdV*k6DN&+eFr(1y+5x-cq&@OA0*`4rH@op?v{b=N#U_knl^+viL_F76xZgb z#SL$i>6Iy|-v`BNouloMwBR|cN+Y>iCFy{?N8U%j@u&Q^{IVKvnp zlytbjzZMilQJ$SKv9B?346P5%2A*X5OvXb%UAS08MCl=(&B6aTmqNQ`%p$Lr>t_$MbahLpcO50oSk~18PA!NYo$*^gC1cJ4Vr_x0qr0T!^TnM+p<*|&2Z%sVA zHod^*QSXfsG}*_eR7eYiGQ*wqd@c0MW_>y%==xHaZgk}0{@dHLt%fW72QnT=@PlS< zbspN>*G2Pds3N&V&(Hi#bG8>=TZqP;=g@rDT@V5p;f`b=0D zj0>zZ++;{O8)1eHvmLscZbG6wr>E{aXYaOBdhYS6_~3OXwKEm76;hN*@x-EXqJ86J zleH~EA&ZOZs#*A{2J+1DxRPw~06kMcYOrEI69#z2GX5U7mI1bQ8h1GmmtM%0ZM!*# zw#G$IhchURU+AEUHC9RftLIVj@i~*<+E1Rd@EMv7jGm!^8ee&U-D&*-YbQVDQ2Wtm z`Bh9&Mh#4Io~=(TzLnL+aD0ZE?#8}1g@(d!f5LTq`&`j$>53mH1B*%7bLdhN3t^LC z?*jdhdsl1P=%<&leByq$xw!(hnWd~oTlo*$a}-hgx;vrX_PihAf*o&frtS;&Dov61 z_I^BSJpTL1hFGFF@lK>}#`SkQ<6{X6wL7pxP->{Ej(Bra(hI0J6eO!-nku)q`dWdz^_)NHg{YPK`8KuJb;t(1S2 z2(^dv^w*HmY(b>TDkyRc(%+1uhBJmisl=InLnM61m6`eFw8c}KgQ}I@kDxbO7oG{b zpYK1@2Wc7kPzJ`CTYr6xrkB^q01p{Xg0O^U+%R!bmgug9UOf`*Y7#DVt!WfTDRdZ1 zIgnRTo#hRmU4+5&VQha+IS7PNl`Y+?_#!hjnK|;b7W_zQJ9lw*fAK#h$iT7ure|D{ zsOQx+@~_-5sBMjx$13oM<++Pp^GCYL^R7XJv{sQ@7m(@wZLQRRs>wR}Rxg8p4b*pY z)HIqLG;ug9#n4mdV)TA*}fX2KG>AJPu5)JGqv#E`Nx!p}H9q7vBoE6Y|FDxfD9e zlS{TCL)srLOXne-u<~aUVf2#~nt(~zW#_(|UPalMcg2m|J|}(xWM6QXcqXi=WrY52 zhui#Y^sO^X7d=3IoUkkFfJN+mAl0Wesb(2SIn>_`c#nlKM)!T)DVR==UX2iE%6NQW0hgmCDQNbb>2Qf6L7>e}rxD2BIelCCm!!-98<58Lr(`?mBVm-tDAz`x@8N zFMd@KHEd~HflK4vDQRYrI}&019G`?Z{ePL6e<)2#9zl*51_7z!RhYO#i}o5;9@^Ywo18`A#~G+Cz$(64uNtYc3(Yl zHa9)&glMib=)uCN!OH5WE*A|Dl5ueuk%7|r_wiqYLV!MJIT8#-isG>-#>@mQ(~tqD zT=Mqg@D-Vo{MK1RRBkSGA%-g1#Fg7aPrgpe6rVeRaJYD`bcrq4I$T_t?c}~;Kz3$M ziulbu~xNd)c zIo#bo@$l2{;96V0s)TjtZll$kTTBB2W;5qr=t#diHa%NC3lBRTyJ)e(43f(o6gTatGwnHbq)S%#s z`nA;F?@TI*SHJ0v0ym{#`Gc9gueUlinkv8Kg$M!$^O47P<07WKKZ=w@*Hw78xtcL` zy5{%K*h5Uu=x?@2ZG8N_O#M%08VI_pMLIr&|EghJVhTGgPw|!0N~pMW1$-AURn=JO zkC;1uz7_!^w}N|O`bPz-kxU?VS+x@ebe?>)cJEX*UiDkO2M+fmOd4vP3OFC6FX^Xz zwI~VJG9&F%9FV4?v|?kfJaOQQAJXp=dagG7@N@3+Y)e^Y|LUnt&`QAe!t7pVe}5{q zs9{fX`v2qUJ)D}}zAsQZYDiE~kQx*Pm8SHXfQZqHfQr&XM?iWD5ITs_OBWI8T|lHt zk=~?AN$7+cT0##Yfe;A1`QG1q?+^IQe9oRV>+G{;?Y$0gS zK-Xj1g~wS`;{a+Or=*&uM;DYnBQ46mOdhew58sGbwn4Dg*XLXOv@r7%Zl!ih*Q6A? zBc`uV?rfR9h*_Do-bq6`{R&AgG34IqcGlWD-BJAjcI5}2A7=A_pg&l<7*D1u7{_~e zCMS*q&d*k?5zh2+<13a;!>9_SNo?67;>?3cIK|y(2P>UcY*~1JlSFch$?kOTc($LO z;-R6`mc^46ZaY!J(4|EUIVIT_KoZ9!$E!%bcHHF%ezi4T;*cGX=|u2_e_q5!E>yNcVhLnebm^ zwXF&E6MjYL=r{M^XmW*D$hIxmSvuwZRGjLdHp9}IHUbgPszXo*zkV|1R%5FMe0cvdxXUuGsc4Xfr8W z2;ou+d-Bd8C(E?)!oaiPaoHCksi%vf)D>&zU^?;~>11<`^l`#P9K6$fDNaHDbaGbi z>ZVW*qE5g{8LGeDQaxKHgDrwl%crK_G7MNkJr<6&PFfQ927ftDTCuaGwi{Z}cicmp zfFX=t+}s|+=Vnrl$Ic*B!|@7MESy^Y5MOSkhkA?xX`#L_TqO zf|dpPZhhoNUo=&mu+hl5D{KUNaMU=};^vQz6q0LRm#=W@STD7p{P?OrtRJ+@6}u># zeLTqh0C1RG>YQ>sQ4vD%I6qlzE39vt-@)F}sQk3&069p-k;nAG7Q_f1j{p71>?1}_ zpX4@Tl(ay3D1A$@_xO((#4|NT;X*|1f&SU5e%f5d-0Z?(UC-36Jnx*fFsd%kZ_>m; zURj8gfU5D*GfeOA3UFU&ZlHAh%BF8~zYmPT3-0;DO82A&%oa#rap9anR?bCr#iecS z|Bb!!pBIJFin_eB4l?T616KGKr!!Oo`emWeMJ#E&2vW=1nb8uK+Dh5Wrq5){(~Si( zgGl|c8_eaZ_qa{CEquS8kHSESp_h21mx!qskR@? z7HRb#Y~xPKa3AyCAp(e1OxI16pt3;l;FYe#Heb6nL1lrEad%*wC!HT8>%66Wn)foW zDmE=A+d$qCxXl?tjC8Fx^tgkT5e7mSkD@KedE759l`o#39U0Mcf$zfWoA@J26`OIu zzcnmxxfz(MlJr!ZsHdAQ5{)Tt{vbb2<;VVrq#fu>^D6i2 z^JVEXo7so3^cd_jA(hsnmc4oI1fi(LHA#2n(-r8MZIb@xFc5BH^?S>6v!tl5`zF3Z zy06g1kG>KYy%;1`(TK@CERL2=7fx#8;g;?d@P57(NOImPr@>U0zTH;)UZ78iJEaW{ zr(Uew1A8T9hfEYoWu$@pbIZohO#QiI&a$V(x~%MzJGCI zJRDt7ZkFTlXja?8xH3F<$lA#3v&KA657y^^+0E$jH!)v(%a+w-r^{Bs=hR+K&R~KT z>VFg<`Ekv^?2OeYBT6_s|3L!lgyETfAa?9SuiE;<9X%GTvypt_UWqFkmj`Dl>%m;FEcnpx0GA;P7_>? z%aLLM+XWZeyLieoIpP48ngw1m-g#Ipqp&WY%){cp9iFDUb;fe5W*4Z-4t}b8JUO|+ z3md}hGlfF2kmwcbKANiM#UO~z3Mbl7g-A&Byj$Z z5hle86%f7{g)cv-s%=;SHK@!4y?(oI#oV{t(d;P85quC?bYsr`@NN}QIhx6eF$bJY zE)c72A}I{IFp%053W$iK2>q1LFU@UsZx#J!(AQ_4W~e4qDl12|=rE75c6V=cUJ^k0 z)Lyu}Hsx7c3U6WhL%)wc#&eUW(2@n68D-k2HyYvbms+I+ zQ)gaQ;QLAkiWQtUKm9n2)}NL7S+TWjD6JQhBCTD`Rvjsl;KhOH+!;mIbb-8gL33Lc zM|^P*8i88?E6+21={Kvb#kRbavI791IuxPAm?`}7okx?7l?VSS1GIT!#U+DTuUA>E zM)}gz?$-mKmL3I$QK$qWv{b*#2BSGK5Zw2!yaKqtz{hdP2gDau4BPIic1n}xP z_08Tyz}5E5Q?t2cZr85F3OQ5&d3uaPVPx+5y0qW==+ytW3t$2^X27hl?lS7XZ}vQ~ zb?&#Pt@uITPca4;!~(TP(QW?%bl2YV;lI6E{ZEaB>87-0<^akrf$5d|B#Q&vyid!1)F3P_xp!o~ zM+RUkdXXIO>I^SM1KeQOVrP*3j?T(mG#j`i@ILPwkK5BFX4mA~rGFkhoZK@~GFzI4 z7sE1sSe&%O^IzcQ30JSpY_O3e1l08G!mybXHDy#M=0?G$fRUd#$AVwW7F?3L`dElk z43ozLM}BP*+Y@0zGZAX4BLb{^87~APj z2WES2X(pQ;V)f7NEA2+AUPesiaHkQwo_nS&o?#Kr0o*T`j5MW*8Fxd%%!w<_=k)(HlalA;9&pSv2mFAL@+c59NWOV2ve1*mB0OfzH0NEzC9EmuE$CrgCb0;qXS zbXru!)zkqUo#6=P&oN7+zQ!tJmkK> zeUn!anur z^XPui%Rtw%tCPmYgX3;4WsoPe)MDR%)UVdMPFJw)P73joXsN78TIYtOHD{Tk(Q&=j65=osuQ>+ifwf(&`Jg0FZ z#~u&d%q|UYKbYl3^s-E6E=jN)*pZF@g^NYski*RcT&9+e^!q3BO=rH)C;lvdJrCqp zkW*H>x>6+-4((kz4vLExSrumeM(P{L{ruNWo_P1$Y_q_3FF?iF75-Bt9hF~C8CR<= zTqcaR)S>SLEY0AmwwBE;mtbrilC4aR(PE&VKK=8Ti(r6Xq%HWMJ5iGSm1>j-gU<;n0p5?s1o7%Whg%KYry15aPI#c#-m%wnUl4jpswfjZ?QzaMxjy47FFYyn8vSaDmJ`fh zQ8}0Ut&BBFMJW8fk?K!n4_ErF@#RX_??SOZ7C)w$n7$a_Q%RQf?X_vHe9)(47-?>0 z$F8BVr7dWtjuw_(v7Ogf&~FO%7eW(I>zh)Z4U3->r)l_CM^bfU{_-;8~X6 zW3}l4FX$0TO*dDsxBwQmr4lt0#2o8%B=jqLb3JYg#9Cfg!M*4DFhn^m$aJnYVadI7 z`P%anO5w_8`M-^hc2f&F?bsjm3|>BqQ8v4lk>K62)#ZY|JI0X1Y1?#jw=FkpO$nXl zXU?qjoeo74#%g7AfLbQNOZlJSc!9lukwDXTitubC4%;~t7p?u#fvV^>H%-7qGR(nm z`tGkh_3r(%GcN``?k@wRVclY}(510;#$jOR;29H!OTv&vz~Sd)eUi6_O0(`1#PCdHDHn$`G66KN8=@DaKh2 zh>fni*mCuvQumGi_aEwJS2Pw?j()XnPd*y!OyZz*Fdgi;hX!sbt~nC;ZLgzwgo~MiuKm{mjZTGm4pi)mfL0Pm*n7mnm}y%=Dc}da-_n^%lD7WZq#dV*Z1kv&u&3z= zD!9A+{#L&cEi{p<(}>8L?rZ>xw_;V(7V$FA6>$ENn9L{E8tsk!P} zgnqfg91*E_1Ef@ps8sGS-3Bz|N-P>A81?cl9-k5E2b)tVhZVE_zbJ`v;<2fXV=w&F ze|QO=4Rhe90MoM0o9W!iZIx@y*5^^QoLTc=zokth-p!lEe5Vh3{lr+bcJ(e4uowwM zEny_BnU%wD;H^`bE{rRkyU136MHd&%^9vmT{q2cNr%bYE@5AIBTJnbd|OX5~FH?BG^?1dbeAzpD`x9GbS zf9Nmwmnkd~ClD^9igzo#BqzJ!_zPyuf=&FJd2tQV&@bjuf6!TSoP5`wQEl!bF#B`tk}%(WIm+m%O~SkoMy!J*4n;*CLX`$gh^B9dxm^#?yE z3XmhdM}JmC+dv?7pkoRMSB;L=d69uZ50{!T=%YFIUKK=;04N5_$CCs6N zk1zar+(Kt_3R~?NhA$D^+mUfc|NyQ zq91rqX{eXC$>oLa=KXxasMKY8Nn-M0Q;ppXhc_1U*>@M$1^+d@#IXf8es;~JWQEyM zDVSPxt`wC18+{Zlw{ z0M0$-QK)gr$ZQlMhw*f^d9jX7q+xS1GJG-`V;$+5k+F9T+#Tp51Vf>$#D1I4pyVh7 z?IVZioup6Aa7uh5C}&={(S?U1*65`>Ob}TMw*2k^QZGY)`)&(-tiL~VTGQ*WcGQ|NvXCjHCEf@eI>QD) zkMb@`w?clu@{T%Pa)3NDIsNS{Ct#P=x$uAb`cb#i4Qyk6>wEKXzp{%1vlH3X-j)O9 zMipCUnyflIVj1__*aqWF&4RW|7X=`+3E2;VMy-dHh1}P63U@mX9@XVrxH;_^Nf@d} zzO~+p>N<-yLrE^4;Bg4lq7^lhpT5_*|5saHX=+>$L>ahvp)Lbp5r|6YD3OR0X0De0 zYnAm{2kbL5UmuwRspOY3Ug2Ol7J9=-eQ&`Q647SyVd+oq-ROf0K-a_+3!k`Bp_w1mR66v%nep8eXPfr5X#Im=a?j8=8i zu(%Yt@l3V1VXW;xJ;cdL_^i`k&Mnq8p=!@O1S;wc`NTYRV6?aau(zdruc9P;+v16`lCa2(;6N?t_wXqkExKta{O7$NP>XM)lMHtQ$F_? ziDedmo1JI}Q|){`52DT+*tngA2j6(q)qyH2^AYjl6us`{N5L=N)-mB4$I$H~s&8Tn zP6tw;9=|)ySz~5HPtFo?}d``xM2JyodCV|01~bX7((o{ z6#R`_X>`xt>btIt9u!Pkg1z+}TgWIyeUNMPn*aOlppSIKun)}nJx;}Cu0~#`xN@Q%BxpV+$L+?5cI`RJcAV0qs!S`hOYYG zBcOmyCQDXm#Ykpy|KAaS<;t%S*(_PZ*Z`C#Keoirff>J-Q9#Z z6HVB}qQ!^Z-9L@UXV_b|RYEV*N-aIHkRRE(KdWnfn{0Nll&(5S%7PXv9a@sy-4=Ad zi1H2%(kuf^P5`DTW#OhzUO#;^e*eV}QUkKcri%MoEfgO4W0z$-V%2MF3Ut23Q=ulH zWEQV6;j8XCA;kR9L)kv^4ce_}Mp*s^a9oL@>3Tp0td*qnMQ9{|t%<~81^TD8%L;K9 zwwV7!sI8-^0svFj8w_wPgv_k564zNUiC|ugPb^PSv4y@!bw@5>iqUuClN-87JWR;k ziOdaO5@Z+18bEuNna`uU>)ipzeHX=cUW+A=YM5Qavb?>D+*Um#p}raU7cJA6EgTyn zA1lbsy?n(6Rq<(gb?z7)L=oYl`HOXf-z&7-2Nc96Htl7jM{aYSbW6LE30bfuA}n%{ z0ZxfT6)89sg?t$REcS`C6@iZ_(6jP=4p^T+pC_DF;F^@Hy( ze%n5fd2$TK_o#w<*$bYx{Jr(3Xg_j4X@8zX)t@2~B!e$!7fav=;0L`zwS~5F1ohU` zII*qby|BMUq&5!$8f-D2HN!#Z6`SSrn{O-|8qCi1t@p;-pR0ST!C#m&(U(7oiUal_ z5}A{q^rzmm8!%g!FR!J3cpdqpyna%DXn5ScySyy?W~iK>2EE;dRgThfDRN|%OV$_G zX~TqjIicCs*?qsOceG2tCoXbWi6s&e{u)Z)cmiXqmg_Li-V+F2+w8s4$4+AI_ZDOa z7kyEq2$C3Qh_MGSX^^OIa=zf%=AHPYmqvnAVn&y4I)N!9r|7zhXNIH4i+~YEdW|}s zuw)%E%FQ4qhaOZ`BXvY_naeyzIIVtk*(~i%eWxS5M(Ds9E4x#=*yFbPzDw~D$G)P) zz;i-Kp+o?L4@CM7G1qW+%#|MTwTm&w<(7ZS$@W?LfFL*piVE0wbcm60pRX)h7!CAm z8lt)-Ll|X@zYw-+plVC+Q?3cRKDB%s5xtwN4aLByPfygm@={d>`eEQ)oQm(#p=;Eh z%J0!L70OZ>3-+Io)5vs)cbR~GK%zYDj*+km0C+r{-?fU;E63$>e$gH6U9l7!l~-*=JgJzd_&T}c60Tzd?uByAQN+v8r9D6n z+=m~`XO{2n>Z7!G5;aVghO3OnTiO&kvOtl#{~ETL2!kax%l$h05=lfv*`6#L8olV> z)gf-HPA_$j_?33@?1YdWw;`|c8JwOHFMS329p&MWIIvGFVCt)?J$?5O%h%2|c_?mcsJpq*y!2{!QB zg-Puw$Y)O>T_big0T*|=x#mmytE8yT7k>9{?@*%eJ8S)nDSqSfdo8DQjuER=5}mjH zV~z4;K!f+!mxn4gVZh_IV|Gb0#+%_oM)>FO>#YMEH*$o!*x4?b4@>5lTd?U63++Hq zDpAJxk-;pJ2OI{%cK|@Vx6DY7>ZOzL8sah?oWYtm*;7ha0m)s$d>G~%<$D47e1xo)yr z1WyF+EK!uy;c{Y{nF2{ZCtH0Wzt4(k)Vt9`EEPrb2cjZ`fC4pFulccyiv($zJ5kx* za!dK~J-ZBUn}@$~()!BHeVCM;pTl-7eV^sX=&$vkAzzfKi5k*6i@hzo&lk2OTmi8{ z>@77~%4gwyccF(V)%}pCA*18If@i~HVBB^1{UmlIulZ4pdBqd-aE*v-9H?L*Z{?YK zK`2gXuh-ZoH;$H6vz&xfQ>wayZvbUaqfW`%B=kj#(He0zL$e>y(XSFeYc!h-Hx=zW z;zJ_Sk)v5sbmf@dr61`bA3^!wgKKFcmq*$G^C*0P&RAT!aFZGnx zRP^ajF2vWV~kcAth^D_y`g^nT4>tgPwA@D=+}mRiD2T5Rdi%@njM|uDsxGK)=dz zuDR;+7ZBTbMU5i5Q5c*=>4Y{D-0sYb#r1IsK3=E+(!Xezf6@Lxd>$YzjvQb#LAI7u z3wuCm1dzNSA7e#Y^6u6hfBXlomYHZsxV-JMR&0cu2-_CV+YnJzHz-!^} zXSFrqsf{kkNI*7p2DB!w8IM<^J>wt$G&mp_ta^*9`I%Wlz4(i%L`Xz5gmBQenkFz% zm_#uxEQDg1=d$!XL<6?}6@_3vc{hu&WADM5rlPkp_l-V?s#rP5lz&;g|1%GYf%{}A zz*5q#suyM#UR0tKdaXd^L@A%oo|G2xFU}umFvqoCDhSzRV+WHCZC_kNeAv>5kkCxX_S&=b zh?K4Il%6MDZPI@mkf6)_X*Ky-2x`ZO4hiG@%08^QYTn{QR?iLZQ^laq-(4K2_uAw} zHw6FHd0NV~6?Nx_4j`;*{>_5F26DV)c(Vo2M1g!W6h9TDJpC(yG7W#*qWH4H^0i5e z&WjPwulL0%nq5eoF^woNtXVEnbV@J?u=8ydzu>BbeV_~( zE&l?uaD5K#m@%1$?;COlnqQQ5eZ>FXFP=XGB8^Xy5ru9H6YlN#Zm7^=TGuFE+HnN~ zk>Z>c1V^J_{kP3HmGNi^^)@sJ6cR2Z9WZK9mt4Mvc zaX#MHPy10$g6M~As>JJSLYoQYM^hw0>f`JxlzGtz&fYw?Lcon-k6KC8nR)kotgavN zU9vw#g-|l(O1l=)t`BzUgQzVO1d2U#9}RXex+wJkVeg&*$TO% zo|ja&oS)?xHInuIf2epdEABf_TG7`DdN=*`VdLGRrr@ng*V4rvyzt+r&YD&ESplZS zmZ&r{gD1_lI&_Y$wXI=oNrm+u>2HQRKQEtwMj>flQjO;v)X)Y!HG#fnM{%V zRG*hy$}YAI6!P{ECd6Lq~HAOic8k4cFCnF2H>Q;l=5+T=C>0H#gjO3D?IWde1Cq!gN6) z!fk2alV)^m&;S|5YGB71=TluV9_{uhzc;?MAFcXaDG_u0PxC6Xy+0cNPgpr=nlYnq z=D@hbjkNs%bCb_*$CVM(!jd;)t}}K>;9lJBNu2dv-SW*9y3m%<<+ti)q($RBdmKmh zEf<-({Rsk=$|5t+s{4_2c?(SXDFGHtChL-D$YGpPUOF-6v{idW^?puK5~4QZ6Lf$W zokh`Vjfo4{T<2FI?1K83Z6X~RykP5Jt2oawDZ$XapCvxpVpaJRF%+2~8l$=37Pt)=>Qd@sZBJhyzGpe#Z+s z#;rkcKhe6q;vBbf!&cvzx!mI8(uKc5by6<;!^-s#JwFdg-b;Qp#(U!9(R06kiFQ?y zB&4{vqr`}nw;h?^$ zl;GNL9Vg4br0w5&^qtVA+7=)|>r?ze7e@5aZ#}4d^umfIy*(kjW>U5xlOi*LBdXcZUNo*O5N|`O%bNJ6f`?xBn`(?#CYL@J8@jN)<*c zHI3;*TP9*9AeY&{0K$T9nyJ=|P5r}4#XdT3jX0z4g|^l1n@%+vCZ(vgPgMn9PwqN; zC=%pX1eOzQh0x2~gG*~;-GYw4s5vG*3k)hoqc|w_hz)P9BVLCL%xY8x^g!FY?h(q- z`wYABw`dc0alBB+r~jueCcm+Dd^4@Q26YA><4i?&qngRA=3`OuD#z8OAzjkQ&)>ng z6J+DrSa}#}8fN!~o3GzH^I*IXhLY~SKQjGP`Iw1{=FdPQASPl|@xo300frGJ*|@87 zx&zp$n<(3>w~|hU7Hg|pB}vH4LJ4}RoWiFpksZbL>Bh4N?}2+oa)B~m8JnLeYn9#{ z&u#^7qx6Z%R@I~TLbi_u_xxce5kW2@l)@zBKdKR92OstFiZbCvyYFo%=4x<6^;Gwo8W-0K<`nYn_y1%zVP~D|3zqpX2PgazJ_BHg?6>{e~ zU;>C~Yv>(j1`q+U5cv5oQPzm{-eoRi=bgP%w zhYx4_p3^~`KOZh?g-WG09A54UPSxe+!hVzg;}s|ld1pO8YSTUe5=YTD#V~ItIiX4p zo=Sc?^T{_Z<(|1$)YqM_G~t0$zbEBUjV3R_J*Djn{Ii#wUq0MY{LKQ451d;uh08SW1@G}g4y)qjVLiCH z!@7FGUH@KH8`+>;?`-}vlJt4?Mz~aY@QyQQJaRlA3lfD9r$XsdU1HGV{xt@CedtL& zs%e2zUiRO@d)&zR=}7vi2Of_{L23EWcag>#(Rj<#Zp)hHz{Qh=p_k2g^*vt5LFz~i zjqJ5$wut;ambp7Vz_I?n&s~}Fd76stu1I(@VfRb)gWs8i);A2feprczw(g%V^Ry{t z1d;lOct_pIT43twgeCorGdtYbp(0W`ctfXa%krFNFXlrjwG8$r=Y!{|>1vjgA4auN zms_^#dm)EaT@`1W6$-EhaXPh{3a}tXtfTiQzH{j*SvOcNP|E$};dix;zScLddEiwS z8#W>gf+(ey^VlDp6hNwyKbKm7vT>X-KltEVZ;e$XaJ1Xm;vnt2t#AJiOG47~f$5k= zpGYZzx*vnf1`u_V7k8)Hvd>TPsJ7$uHr}&C_w%}qtCpAf-FYZtD=Q-4<35koM3|H` z0^R-#EEX)Na-m%_<#dxFX=RZ5KJYUvv+S73@dPfVDaPrz_w^vWWlk z0gLsvX+l5hTkmKQnR<5A))p(ML`z>>0#N_N@c&rN9=x~CeAd&M3Vnh9!=>-iaRcf- zmcCFk^C`!4$*KH`Mz7&m`I|X)U7xz%lmhi2ItqG<<3^8Cf`_pbLXBW>7W7*RmR1o=H{^kZNG|I$b zmiNq62TRv4A#gdefSCF=`i|?B)wwy4Oc4ecXlwS#`b#uF(=}Y!-Y&cHyWZA9a4b zjww3#m7`&g>SE7U*m+{=GVAT-u!fXqi{P7qHNmizFr7$KdfdCp*!xzz6T&wp+Ic73 zD8&NL#49uvXq!fDDnB%Sjz>-3#NWSrF75m7LHs*;wI3a7_eCUIO^tU-Qq(od zGp87MlMQ$$7aKf{Lg_Kb6?5T-XSYsy;$_%CGcOvvt7Toz}L*7 z?;UhE%XY!%`{4R&zM6dOGlAe`uIw8V?;rlquzIa|TFmCLV&-T6WF%nN`^j~s`nvJ= za`*9*tGACN-8`c)MNs<22Q$Xt6*EvZ{JaBv+Ck`+4)IhiYPz%wE6VWtXC*omh{m++ z`c`0O7Y^%Q0;w-|q+;G_j@sp-ywJJ|Wb?5O_^wyVOI^EsuYr6q$o{8Twg|hI_1yP> zDH3ZMX6YNRRI?~Q9pp{kF}ZnlveuR=j7q7r<6HKE-AhX?`wIovh_JQSPc}z+1jEu+ zVYN%V$@%ot^qUz6e4oDTB@cA+^zq)J>kL>y-+#{=`V| zoZYCjL#D6c)-SSR>N`!PTWx3M47Miv%@|e-kROGTN$u~>EcvotpehH zg6lCYdSPGoJJFh-P+SD`aAxA2h_iAQIpqu?Mv5TAZDa1rT9DH{XSG=ChPY!SZFz(Q z*iW#63AqSh`c9JeV!r*)gX`C+@?cHw#-FTK{9EM5hV7_~H^C`&Xl9kou!0?%jWke< zfy_!&c?c7Ai=XzYd~e^&F-75ie#-RA9hhSo*JRTV-)-y}i4 zyp7yohy3Ke0<<(?mCa{YCul;~x@w|=|VIPYwjyIlO}uVYc~n|wW!^u5q?1q)p3u5X3&fqcuB zhDED3Xz0R-gI(_V?EEfbNS{4kKsumb$5g^&{3Wc`6I8z^f+rUC z+~u9QpuBLpt{DF=adf!xC^+L=qoWZvA-Se6p4*BaAe94}5^ z1WSUz=P9YUmZD7640?8SS4W!%1$Rm>e;BkCQ3{d{KEk66M${arp%l$#?>cj((*na< zq3CKc1x3*hR%Xf$M?<>_*Ij_rI#u%|FY$Ws)!h#M1>y%>lyne|6!%hh$@Pu&d2L1T zj)&I{ck;tJ_#j`x4i*$I=q4VLBd}!hVQ#qWr+Ap)0CDymR<9He3CLkHp6!?F9Ya{} z#JtXdj$1jc2}xEZEg9xq|8Ht$O(yMi?UX&%1N3^gljnn2EZ z_SR-HG;tfAxGiF5Zs#}J-E+XcQoX(FbN5$QFc|~mv^*oFqz}KlC7|Gnt!a*ShUI4% z1_phQkWZKe0R?l!CP1o^h-=#+v*8|oEN-ljGC{1ag6g%k=U7S8CV<<-ETwAJ3Wfh> z8|So>Bka4Qw^RyBb$;X$w2>2$(lb^5akkfIN$Ovj(~V4_?Hh@ohWqRkCs!W3C5|$t zzWeOh6pnaD3)!E(tdd@;pPYD6*LT0H`*FV0QN4g$(tM_{Jy1JExqW--kN8<#BTOGe9E)N(#{*vUK5n)1&!w@q{DNrM=s zoL(Q4UyyeK3Z;CBS4bt_e1Sh62@MJ4cxs0Eb@CI)Acz>Z{5uA28Qvabj8D(W9~8W= z8r);6J6#rg-MA(QSr=InKmADZMw)u?5;NGbd3-{8yJJ{zo@lU&VjlNJJybF1{l>6* z$H7H|-~5-E-!Jn(xm>2S@6blfh#f0}eGPMcKJiT^;cr}-4DAPlb3;5bD;9nKEa_qX zJlWCwjm8vtdY1XTQ*qss%hs>~d79Al7~x9+nnQS6(1o{DhfgcUGrEt)AK95yY`|<8 zcT9b1nyy{Hz*&n!??Mu$evh)RTB{OpsdWXaM2nrv{8OME>n0lEtzwhOSa@&5Jn+HM zjZhzZSa$pa)VI)giDbcG)wA%A^8)HfPL9JHiu$syZ&)OL`Sk`*4!O3 z=CIBb(x>(#lmYx_YW%IqN`AZ`P3m%^U#01DYcd@fpaTDLw<>h-gh#Y4pM@=zS$BVL+@d6t|7iW z@AqP2cE2YUrcBg|an*p!)PZBH-FJR^c3G)i6TNuugP(jo`C#gGb>Tuj6&FWGWe>tV zZ|sC7<9aDh|L$d39&d#@eQWBJ<~*_7|3R@E54R_1=ZEMS>I&kc_5@X>8l#4lfVYxT zX8Vtt7U|d>bUEY4RB8uZb8d&|^wD>QGA_1%&Iy*ge-r@K0)eRB1812eFni1_@ zGASkA_<3+cUL}ivkU_RmiC;7Ena0`aX;40nv*=CJQ^~M5Zs;2i7_u1M^GdFV2F_`= z=3EvW;YuSWdoyugWyGic>3?sSaV-G&g!0@Y%sVE@%PnE$W==BSv;fbG_>9$dqXtC6 z71vV*mua-eM&B)MRV4+|VY3ok^C9e}(jDfRzJEBeFID+tx#=DjVQL=9rDD@wx>CU% znz%?Q-u($_Oc@OJoQ?K`m(C#9ca7e6@Lbe%!(GobJ2i}mh-H7l16GL;fgXk_s$a~& z`!2VZ>z8YDGzIj? zW?`@nk60B{YGOHpqTADBbMwzJIQKq#t;o#DIJ3Y>lg270p7yxD9*Y=hxqi15jin0C zKF6DHnh;%MS%)=mWy_O!MOIl{K5lF9Wk2p+i29+5)Y@^Y>y;jt=XM?T7tPk|B4hV1{5Skee z-6fl@->2sTx34JGr`}E|WT!2GkF7#5`P$@d6@|2b*KifPv39D}m;%EF-+W_J<($6I z4leQ_ONhQK2J(orm~VJ@#;9(4Wy#uwaU~`j*EHn3VN{*&@JMGdy%%j2=>q&103qjc z$~TxNjhTy;TRSU&sXRV|v#S73QZ9gKz~ld(2N*4kuyNyZHWBW6mXSB;yZ$oRs#R7$ zN=U?Jf+4s+1&%EJFHGW*SQkY09l z>##y;dx=7czNod6Ha>Mvs(4Q~=;Wl!1FIPNAn1twTsh|S{q}R#ca(|k8P{6=%AGmu zx1KR(yNxZynWCl`85`9(ww&;_nUK=AO#tqz9P!qs{SFa|-;}OuCUw}#C~7E~X=Yt- z313#$YI65)hl#iQPpq+w1C1vDnAo$^qgbs#RSslWh(n^lgyo{a@#5v~L z<4Yu^Fr`QK{(1J5UK$Lb8P-fHU!_N19*~-QCyJl19QSnp#gn+_YK}RqC{Tfiz`*zl zS%BcokF^_f*9dlf4{jBx4e~+1F%k@8!_wZMed28dnh!#LFpz>SHQac<;%fmSah zt6gG+ZdgqDgtjZb zSMm}c%gzANTFbEu21PzWsexR(IdA*AFz2p$B~x==@eDPK z0hWmfzG#pyTK?1c3ZH9`3DueNjC^Ul5cYdjH$c4+ljYnqrDw!_4a@O4Z*9)zx-=jH zS(Cf-v+CvwA14wswL97nxT@WCe2Kg*GfQ{iC*K}{3zMllJC(+){^2n#eeJ#Ii*VzZ z=|L-5MDFobfIaPrfojd0(Db{kGSt*&iW4Fs+01U4H*i!)!S115C&CZ?SUl&!m0w!N z5y2(eyk;Mp?TY-!3@wirPg&?@!U^+C&zq{l8~_n))uV-VOgp}?p|_n?y+X6w50Ex1 z*dIHkf}*oH(-Xq;KtWoBEwe4zlfhzXbE5n*TZe<1lxPxYNBp-`5g`8;4H!H+3Qwz#TR?o^UKchf8$MRh%LLwgo)0pf3lzAdh4bm)sUVj zgY;uzrGW@`_QgR|`9|W|#Ds;n54k`HJkr*|>iAB)GNJN;cl~uspJNZo(v0xItGZa> z_Fl84@s$sM&q}XvcR*KWoH=DOs%cI@ms!(}jYF9nQwj66e^dBFaB1uys1y{5v(YV= z|NW#k;ajP(^^iHMkLm!y^yf-^q-Hw0H#{wTq+QpaW17Ln2XX$BrvAw41z%-3FI0_7 z2|vsz>!RA0@m$?#hGPoQPXe-VRUdFLl=bJxTxKz6VvXjVVypQyX}!$KmY~9x_Q|AG z^HyKgzwE0rmBZ{DYUv1zwJTAg0fxgIz;4H zF*LIVgfmnHw9-YLD5~<*Oz^G~VI@B@^dWRhfd!M^z;o1s!22IzHQ;NP8PH|D$I~rd zQ+DAa3a9t=wg>)*WtXp|681TQ!|N3CxLOI}eVcm!2ys2)bZ>I-S=EcH`+t}?3$G^N zzx&gOl!}zZP^3f{CEbVvNkKqzC?MSp+eksAOS%LFq`OB+OLvcEoG;m)9AnjBcJc%s=KyzwZY8dR@HimQnJPl<>C#h%2?F;Ts32UpPXx`Mc zPqA{8T36dW(>Yuh{rxMk|2|~jYO{#A7ZNM-%FnnwR>2ZtuwRW+2lSD z?4Hz%`1e;@!t~|7|57+t!gaI8R<_Zf(`%u?xxy6wf` zjL!kC#!QVfIZ*-UCCq+{+BZ1;)ugO4aCl~g`N-mV?+;(0&;04X!#0Ej$*^~eZM2dj zahn!++s|P=g38v_1mmCX?oPV{pFe#{b&ny#l8Px=^yT8Vokv29V*hq_;@~716*r5( zJu1Kt3J+|u_2Erk;@6}HS$lBY#p?R+7FQo5J718@lIcGw&oQK~fw{SX`6_fwjwa$w z0SR%ge$h4&U|iXRt0HYCvzteEM~tZ%Bvy{T8hRa8BF109ep;6^_ zw5p1EG=<+|YeYzH4YfBiDnt(uDF~m8rb0@y%gt>+{N7^1jFAsP9}QH-fZ}E2b{{e+ z!DZX?i(?6i>tG20w)bRJ1!?f_+_cUCp&)Ss;AImiC0kB#_aUv#Brz&U2BPj;u^P&G zn!wbKq&L^O`>hh^l?$7ERThUN_IG-yqZqKn?OLj`y;mNKQ@@^4m?Blp5VNbg-Imx- zsAVsl)bSdGGFa_DXqzX;&?HDGoJs}VkYuw>lFobeDKmFE+8(9c*A^(>h^)*H%MA<= z;84|&o{(s9q0qqO-EuGC%d-jMtgmbIu&r@sY5bnxt>Uaq} zF5+)2=qUVe#iqt|WIw^}WF_78zY!f2Mciyn@Gl(a*3o2N ztpvQYZ!HweNMN>Di6;K@iuoGZD2)~VUPlOHe!Z@o9P4ND$EtCu=M8}cYuATwfaq?g zUS+{WX+fGhk=(dfbHDBwOo^O%`4#-HX6t(IJiny&-bq)H9f2(zB&`pDk*^vQp>`dX z?y(~PvGPh^p3l1)>YC4v-aamrnQLsEy}y1D5=Er{w%dTnM=3Hbg^8e#j#pQ#TUw>i zuW`71(?lu7R(Bf{FL$x9d$0VBoA_|XV_m1X*T!PZly1%qy+yrskYOR0x}e}(16RTn zOVzHvd|B-tMH2c@$}2aKHzDK?Ufy@B5ZfFXUo(D}Q>I9o|FE5v%+~Il@SdfnrX(ks z>rT(>sHcT(m4<)H5u57O9D{mkA4leCXp{qoG$!ul<>ia|3w3Qot)xgi{O89kjZ+G8 zdyOg>|9Jnk)RS>LPoivcp@QCG_@*C~ho3OkzO0MZybNanzpVZ4W$9LZ6K&)@w9yEK(x*k@M4NQHM`&E#&eA0L^dKyGCLA z605}4Dt8;xG`OO5WNwIAtKt(a#kBo$pEF`V+jzpA!6ep=UI|mxu$(DdG{MnIA?-mr zDxu8aZXsTTxq4%Zb9?fghj?kN^61>c?<<1sx1@ued%EAaDO$ophgYH50>00>q$e)o!c*V<|9pmPUMi)3yXqUWNT;j3yS@8Z;e9gy8!g<)^ zo)4d%juz$RqRQ~G(h$m^?(voZ(R*-&$#2`6LLCgbP=+OOKM8Ks@G9WK27ku%kVPm_ zRCsTTFv!Y1n0ntLvU%S)oFbO_CB{D_)l-S5eW(Qxky2@eW#b-uJv8c_Z%tPUaW6gwt{1XGMt z@MKqqefNs~3<(qMEQSwU#Mt-C#&c%ii9{N)1GW>cTVEGIPNY8qx5Io9(jL=11UfC; zpbM+oH}a5VvE;&N;_qx=H}?nf5A5Hld#vdx^>mMoKaQ0K1g{MB=YN7e8x&^JMSVmdvI(r{|G0&d5w$Oi`+jyC z=H91MBy)|4x7so*pDODWHg4#r+p|g|T%A$X2t4a6sVGVSg+412HyqsHROqg%_~0!m zXrFyQ(ngUUyOGQp-1|K29~U_Or(62Lk22bGQYnhThS<7yX>~g*gNnL#4F-V(QXVBl zV#JLD5UO^?yn9Y09zIl(F14z3F6aW5r9V;u+gdEa6vRs?2ily6?Y*{^!o2&8=vklT z;RA=N?noz6GH!n&P_mT&b}Awj#{6DVRa$YE>>2begIhXNAy<5*TdsAv+@z@U!~J1S zP9-TRhxp(rMBykIbMqpZ1xDs}zn&APS&VE|FNOb_CVEdtDAFAXdt5z1pUfbVS7xan zG&nu)LxNI_3^fHL#KS*PE)nFRv2#32TdQQUPwr_wS!~RoE$V+waB8@Y8to?~tel-p z+YWa1$9^u{&ac8`aEx{RlCwM9s!xPXNr;b=?bI4$`Hs107SpaxAOlWLh0@fE@>Gk#R zl~^q?{vM#4cIp&si$&71W67m8T=#)O{u>|+EuQ<8SAT&#+n5> zZAd3UC*xt0wcQo5J8WB{b)jl_v_!(0>Rao zWJ>SjZ8Wvr&%4=fN*Nwezim|z%J$tmsYv(ld16w}Oy$?Ca=SiA7>P7uN%MTZs>B#g z+SDU9K(R7@l(b7_tj4C{YL>|M{QS z{`pvDNSoD^sv=(ki!ESy@L7w-p$4-O;OcLT?0K5nSPM^*TXG0&wAfrV4C(r5l%^N| zMG`E-Sl8hBa8NNj^%+u_X8v(%h+e&9Ch9q_nh+yrtcbpDm~?fhQo(HU={>O-Sa!3p(0T?}NyRe~^F;V@Ak4eiIlj>=$`*Ef;OEjZv(VC)_z@n!((65)$gn(veg+~EW6eIql%mr(g z+H|KO4@wu0kWeL2U>GcGbEkOFM4?@p znYdOvNyMCmS0M8Xjeg8MjCa`bVm@c{7f^KpD-1z56Ue{6q0xcLtljVyk-O43%Wf=+_fS}5 zKw@|r-;V!~vHmeQMJgThrMxTJYx3w z$M^R9wzAX4oiD}ofsS+AAN#uL69mdE{jbAx>gue*qKRxKK0ogG8153v-~Q)bGEe$J zK^5>TL1Px8UGBXi<>mrJ$3ja)*(s(k=i6dT5yeTyQ%jp z0@8TdNP_`IYbJI*@ylVZinL-e87BWdf{qlc$OD!?1ew?!@)Ye&;^OHMfpY{^ zMfT61PHsExy1Dg=xLUdKsq6L$i*I{_`K~%sQ@QfM#(@ap zBa>E9wTx}@^&-d)sInl@^I(_tF6Da2qk!3jPr@H4C#KKc>|w|w{CXJm(dW^2-=2dj zYQTszGGK_ZL%s^&t6b>XYaN)1+-c$u*pKiTD`0!lLVMgNt>epkQB%Zcv)#|T? z%1ymxFpDcKev;@&S_c<>_e%EfdnK)k@Ogq`qwL19nQ|**xjcw&#giGAh>5zMZ{4Yf zIg^XiyEY<^ZIc?UxwA#3Z>VIvcn-Os=xmQEm0s_uYcXWV>Q3>>tryL>Uu((nf&0sF zTb+NLRft7>d0#^XFTn%mAer8dvi96uk0TLcmAv_?$Lb({5%)Ni)5D|V!y}k=T8P$< zET7ZACrO>f`26^YsCRfHJkd(BhauU^2&X3na@e#B#YFX@#?PV6Byu#yVMBZqbg5fyl2K#MA6%_hYv`vBKRl< zru!B!8_+MRQ~BnVRta15q@e5kD%6Q$wIxa?f`r@r9;!&$~NlkQ{v==&}N?J;X1 z+GLmC{U6>kTaRXM^maB%Dih!S{iUM8uB&~+E?Ca_wgqwE-n=%ASBEgp->f--tm(6p zolSTZ?D^X2T8GA^cDN5uEAWWLd0U*lL1g9B`hMAk)IPkM+b0;X^T|}k@osICg{__q z>kjN{N{R*slS5u?nFM}{4jcw8#a0d&1b{jAbBk2surV)Zm-C+9fIedzRX(04#5Bn9 z!>&D9ezj|HRRg)Y1X-Q9KzWTo`%}jiJiWqib049)>5(Ay<@K9+`1kSCZ$ZCuL3!G| zFr(5NnjL2L!7hyuLYth@f%MOgwL)OJ=t&6anmn`vnk%#%)65On>T#a$*^w>0Fx5Gi zeJ_eO{1nJVdLKb%x6{gH79J!QS8wz8I{lZJv&OaZkE*o#+m$U7UOD?@eDT_sQJt3P zfZQU5DBGmGA^ew?qWXT3#iCJ+P>EMWD>F2-!7{@F4( zXuha*tL?n5y3x9r2IVpofEYyJRPCdJBitE#3$F*LGJJ##c3#Akw7jAIyS$!eCp8*p zP+rK*ef^00iuTk_dzZ_(R3;P3H_mr*V@fo->yV#4vyt=j^l+}a2K8y8E{XfzShy0R z?E&VTVYpMsUXEto+7d;rM!W$;YV0_nx%>#u9R2?~m)3tA-g{4UZ9>!mSeUP8pbK)6 z7rPEjj#``l`4`jfN%1#@;9AmWkE>XUQJsiv$(*{?ht)yMWj%G`;Vy-MY)RJVKd+(^ z3_Ck@@8q(~TDTwnVy-wL9oIA3Y#zHX2%AWSy(>7YR&*l;8=_N?VVM&-nt9t#can1R ztwrRn(&{~{SQs0e6Bku0I?Dny3IIM_z2-YVSBIHiD|&rs#c z63O@HhkrWxrNN8wsuA|UOd>a1$a(z)@h}m3MIT2!DUXi~!vA8EDyD8(k9?{5PKT>{ z{VfL$ATtJK(hfLDTar4C0**dcZrc_(zYy>V()8tJc(?gZ``cUh@zOo-m0pQ;`v*I*}`l?uY-Tz5GNdB_&dzm{uv%4tQSW zT5Oq|v$I+N9kTWFWazqt9Dj2m8#GUCsrI)E)pWuIpa*}YtroT8(AQfHw|}<>iku7N z*8x|#XtC6;Pa64oc9-(*HCGF!m;eQ41q5qQThl%(uir1aZ*nI{SlB?Xx>a=~0WVMQ zVeB{x-x=gH=7yt%$>Jfsb05nnRJCXfXI^gJkF0nS*SxtC(Q7zAzGu*JwV0~i>jaJN zvC<*Gda(BNeCM4!jsy|x~0*N27N`}>Q9w+5mJ|p8BeWhk`pq`v(RluQY zG5iyzv0x!FKPG2!S$g8}M_LA$*MHn9V|i?*=5NgHEX-_rUA^pUt}izoE>lk46UihiIw{My32l7mhA)^3 z_m9~DKLV@cY;N*t~Va&V={;G0HY=Gz!=&*9h96t;pVAy;b5)ztuy5U{~4GwBGJsok&hW zFhHY>q7S_km0m&7XXmK(_J!gH^SZApw&K(ha_G!n1DY~M042*7gQWEX*_{;qrTKDS zndS&Uuy{(tRxD z1^#qFzprVcXC&nfMM~E8v;q0A{jz{zyL&tMlwzPeuC=leo2zukG6N?m&w;xmRIHXl z*%z-qEHa<(yu%MpGptLg$-@TKO;H(DzQyqnwrL~ViV~c5m$Gwr6A*GhyfgC!Y%Uu} z;cRrXB|`ylLIha|AdsYhM0NG9!OJJV`Im>T;U_td59Kg#)~EQIhhAqvf@xf!@uw&* zY7A}--1Ju!%Lr3;FuL53vH>2n6i;5Kd_n097gYtmLjpf{pkx-~`nER|f70Jo+_Zeo z;<$MP&c2NT>%PfVS;T^n0A#?rvuIpTUaUxivJ&IyN1cOWkiXcDVc&msYF49LfTJDIH9r8CFn@2v=t-F^@!D#d1*!gSS__AcS|Jd$EG0Qk$ zENfAO)hu3`ZCi%UCPCq*%JYD*1U&anKgl^2-p)-=&ots@j5qcv)o+KJ!?`0PZC zc8wvJvi|W~oPFD{xNMzwZFmupo<_Y&>}@`fe=XMhNtdcg!e?hDD(v1Z3G*Eqcqh_+ z;$7u9ZZbl*FMlLUa2Y&e0+`!s9#w>@o91-XNTsX8>*nrlxsT_Fu=d`0jl}t{#TjF_%UC&XghAU z%0)>Gu1S!K{lgH zb5nDVrz*e=j|HqhehaWpWZn#ES=gTZqXxd0l!prEsc*z4VOiF({o=^CcfTE{3Qp!b zZ_f>l*cS}$u%)_SIr#GG$=p&&MODzv;qohH@cC~ewp>gR&oPq_#2!-yzHw-~Y%l;j z%z=-I?zjQ=IxI}rt{V?Dz}e+JxYITxvmxrAi43|%*@e-zXZy4 z`Syv6R&wPjr1yLvN_+=v_}UIQF9A{aSG=^~vsd~84mu57U;ab{uRbNd><^3qJIP!RnosR7Z1g_|!2P&{*aR*=+`^Qns>1(LMS^jt zjkfix3)Hde&A;k1iL94dG4aPRU2H#8T0FH^7BgYc`_a4H!hOPRe>f~sqe9xLdFEpp z^Y>Z70sOE^#MLm1`9bx^y7lZTs}Io9k1ASvFkIQc{!@#YaI5Lvv+)leKd3S%7YVUb zY48~$*RpkClRa11zbt`#{MsdaeweSSGXHQ!U&q-`i?`MY?Z2H3g<@ENlUsP{Y-5yj$$gFJ8I6rb5liI7t z2jK^ToU{)!#v!|?O~%gVir1Y722~Et$A0bCtEcR96{hpt+Mz2F9Y`$!xBii0g&Sn! zkGoLDc1yp-5sNu)8lnLIF6*s9&UjV}+*BIPxc``EL=|L*-}y)I<$rnHIxuwd#veKz zR@CSs{LuW=!Ry$s{pQ%!wd~A2V9DW851|5rkk(F8i>!Y1HIWv&{s%Enrr|31U*^2n zfIXMF>7Y_B!R2nu>SF*OWz2C|f`#f&XbRB2N zLQ+p^j-F*mrpDW%`g>Pa%#ss_mMU=|#k=oK?KU6|*D0>AL6gxIVzOJz<92i`P{%IZ zWIRmlzgI)-cm#Qqc*ep>WzgDLoNxU_w*PL1bFEqgj%8J%eS_M0o?tx>H&grsR$Bx% zoL~Ewds%-Q!Xa&|`&^Vx@R$fQubtM4>7hv;db&z_I9~;D3A$dEXhok(7{9Ton82*R zC(@@l@}{U3hCfCH;yr|9vV$Q$!yOV(qcX% zmOL!4TVis$Twy{+5l{*YCwF=V=RF3h8Lcsmo}xQXR80MK$UivM=JyQ{odvrSr+aCj7XbL~R}rRb_=sL-7xx*|fJED^+uwKd>@IF$htv}7 zlasygzU&Puv~IZFuz-V_=O*V_ZU-ho)Sy)icW+SVSVN@E`Tko@<2xFYmQRt%s$q%H zZB78QJhq#bCiGQz{;YY~R^A22 z_@04uW_j`-Q~^|LgymD7yoSgMj7c~AYSeX&H5tM`M6|-PhKg{uS^F&u`n*G&?XzSr zkd7&d9dJibXleItiT=kempMG@vR2EXPW&`{Aq7LhD(2mS@@F<_-6Z`*T_=B*8XQ?p zCK?{mb`lAjo764mjy#x)RIEVhk>>SN7zclhoF5$y*I|Ta)}ULe|+` zYsRZ178kWz5yR>u2eOf{kJsevH!2{i*GuJWxHs!-+uRcN?FeJQeaui86;Sgo-D{Cl zcp-uAnh+D3+pT`vh;$8HUKaP?@4Q~WyLBc~|BMx>wx{dBG}FPU9J-$C-F4g6YSG}r zUZ#SL=w`CkFu$A$l6A;2t_p7Aucw3bDKxpNjkq4LQ0a-M)r7BafEERG=Ln0={ii~s zwOHmuL2DuRtq~k_k&f`J_2&u`BWBD>?W!-^m>7P%Jo5WXfjS)CXR+!wUB}He`l((p zc)nbU;(S>iC5Qd&rOEm-J&N-sNw;GU2lvA=UMr)uF;r9M&GDO&Z9*E%AD#o#F1o^e zD{{#F>ok)8B=Q4VB`;za4vD^~9?&PV{{ZauOJVyswqBlpec6-{dv z^L!h9THBFLgaC{9?Da!~`A^92_h*`B7)_d-Do3<>`|m@>R-3&S^ky3kLj=yNXOtrS z)Y}Xxeq)`W_kd-T-piFa6|g|o?G4%>KT2g%Ln7XgrdebD`KIPRfy~6;K&HAMHwy#u zjk7hIUCpGGRF`igS|?HU#d6N+Su9l!o^M#ySQ`nb-R=`P_!BudOYzSe9#a#!RTg-;Z&$bzMP#55r(q>Ulim=F56H>rdB> z3nuzi!uzyv=%R`62}Rj8OdEg3wnT>pVPL=T4&T z70vwA_8H+Y7w_lmvq0)a3^>^sNC%6J=e)#9dKkRPH20u19bgZ13e2AMb4b)P-Wgn0 zj<;MVO27uMh>opYuU)P^3`~E_>snh-x#}f@^NE#XKV*~E(a(2$)(tjA5p6QGZ7Alk z(DjM=ocFzC;3=*SQb#la9F{x%OdqYJu)VS>xr<#F$lBw-S2^(A!NdKeWoy9i1=>}H zuHhFr{>3aw(c7`{iYBZF=2G zajL}vHeOpoQ$d}7L}71);}#B%S5_pk=>4y$9%XGow|hZrM51NgL?60EcsRa#j$=uu zxM|Cbs$9qfzha2W(`-ncVoXqZX-V$c1i7M!ZK(uF|Euh@;8A1wJs_ZS%!jVXv92z# zIYni_r_6&jDWA>FW#8lVLEmC&GQ@kfE^~82TP(~p>x>jM&IobiKYY@ar8FwnjC2+$>vSs}F z!7|dxvVIc=f+DX9#0x}}W*D%97L&rlBI+1)Ta+_douM7h^+iZ2$we#40=JBH*_mhM zV&{PTR@%Z9+o|8c8?f65W3{`?k12ifI5YA~3)=vSr_aMIxUJtb(#$hw>BRxX3_mc5 z7Pz_YtxZ!5OaCDm4_y1VG*&efCW1=4A zEbEt5%>C#L`rgKX`WjeKg~x$#On_1#?0uO?tSjN9B!XU2Sa$5a<#)>=>#o-!`ZcR| zJ0UfmM8o{PW^%u+xY7#^=ZBk8NxJz5oU`6_eeTQUR8wRb7{zYvIUVsfGvsGv6Uf|K z^sc_|%td5=e)epzP9p?BvlU|!8&_EEL0t?As_!cVDh3k5Z(h(YjS(rwjdl!pA;L}l zmH6QZV#8G_9>|AD#@CzXf%WFAu+7)v1moDI;nT^&75=IUZ20tn>@9(RuTHKtUrEEO za>2E2N5_M)(b0xL$p#)7$!*)(NVqwOn-WhL^EV5`M4UE4P zeZzbmtKScl6u?-=)Tdj=yrW@ydD*|pv+hIN6&ivZ+yfU_MgiY->ciaLkoM%={4I{`)i@(XmN}dk?F5!~=^} z15t-dZ7NI8bqFkfMA!oV9ajcnN^5cI1c~sUj0a2foB)M~8nFGOPIC!#L*_3n?s6|x zW1O|yH(C5|CS#<+O=*y3I3~I``Wi==UY^(?uE|5E_543k=17#H8Y&-Sjs^u&*U5Ei z@^s1GVzm2*gaw6L-QVg1iVR8aW`Z%foSkE7yE(E}1-FBV z?qlgHxnmh(A0WaJkWIQgJAd>!A)fD4Uh)q|Kkw~wy0zK%b;uXsmO$s|dbDc0)yCQ4 z%OvD9MwoT_c2%Jvkd^s3ul@Ma%-wivNJ)!#*pEboYHxR7&FjNXua}{kqz2WnRy)EX12Ukr`~A>&N=;xiVW?nPO^i`jpCVhRK>tbK`%5BMegbat zrLCQp{lE|Jai`6}?b#Ty0=$0qgciVxaxTVdC^m+gBr zw&}<3`Co7o5zRF?&Ns@}R2-(|;a`?F`afNcp5Qos9VQwhI?a&GCZj;~sof)DG4@Qq zc|2gQ&cu}Dw_89Bi@eDDdC{9~4qBk$?Btn9J(&L)2ydr>eF{2$#!1*Pl#%)j_&ND$ zD{e*fH`yaPerIyGLqHh@b+5>GR>4a>Yd^fzV(_9e1Wxb8lCrNw_Ip2CJr1mkq?KcDQw1Aq;wV7c=J#GT0nn7}&wN9~b(3uvsu!FD;=M z_#WF^@GmPepx3f_cee6Fyh^tO%wA%wN4$VQH~57v+EEJT3?i`mvUtB^){TFTYSouc z7XIl~w*FadDp>i`44TxRo7M1fx%JA`n6FDRb$oZa8A-sp!^q14McF4 z)mAn@l<0$UK5}biEAQpTlC=8lhVIvIn7dmVKin{QdGIcEWh{1An^cUcu%ibzu`NWoU2@c{vn=IF1!Tl%-qJcXi*-=<0sFwtQCG6%W8g4le#M0( zNlEr|DSnMgA{f6^=zDxZo{vLe>-O&oAA%#AjWD_f5taDuD{ubfIL&f|;XLR2P#+o6 zgQe9(kf7f~NmFD~UE20ogEY*3A*s;>`^AVB=R;@U8 zOg{N>Z4gRp95JVGFhzZQsbRYG>5(mh(^GcZfbP9gXDb0MHwM#Vt-QWi^Mlr~^$50a zgl-&0%5gTI#R#&^gUF_&R=}b3w-AZ(SZSEcn5D=dG%KwssPzL1_Ho28omyUFr)HouH5$HEK4g7tW z3zX;SL9URQ^orWke@**iamze1Ju9;SUYEdSkl&ZN(F(4*hEJ_C>5<2qUT z&O{@?nJIUdy@B`#6F2*JKkdowQMy8Tl&YMI*srfBn#e?suhWCyw6ebu@QdQNOEsuB z-B!Y863K1RTGHhl8etl+P-U`lDfOm+QK{zuZOIAx=lAg>@X$$uTV)+6Va0%6ZEOV` z&1#&RX2gy;-o^IQ$MS);THHr|1MDo7(jshj{slLK8QEy)4S&#cX}Q_(=sAWPHIAlp(vN za%vSAgfxM%*M&|?cAx&+aT^qcsh{4s^^1EG4f7YeV0~7nH5ZXro2gxz-fur`MyDln zsdZ3jTk5rsU%&b#JwD^2!etPyC-d0ms}FCj{{nj1lx`$q8h*XCK?jzBGMU?tRu{l4 z%?sB2xZfFx)(IKqThJf4q))1vI;4GayfK+Hwxwy3+r{-RIfDHkRF1i_D6pq?3HAee ztDaFay|DaB;bYa#msRJ1`h!?)izBX4b)aW=63t!S|Up&r&G1m=&&OmA~*FkTAT><=DYD- z=s?HegjSZG;8pkWN^#2^3~AqV(Lxhc(etM>C3OW{7M*7Ci;NVawW-0vec0?Jv}5Y07^ye@XHV1Qst$iHgzUt2 zVYU5R;)CYI;3EEGaq8g3XpY-Ql~`SGAF{KJAz#`6_7x3Z>vANt9TL3z_yRzo>C1aj z4K&#YCr4(=xaK|Zqi9Qrdm&Wb<+xUE+2(AZttxy@^QSho*LO@Z5Q~@41~isSmai3* zfL(qoI}>e4d|F=hizKQv?tIkHVj<4YX%635*)Wul_GsRH7#V+NsA(+tgSq3E=97awMrav|1!Z!NGMCpjPXH&xBC$-gQuH1fP+x>=Z;$^CS}Ik}PUCrkh{v3s1|voX8Y-J;}QFu%iT1_$z$N#bsKSw2Q%;PW~ZgUQJxNx|Yj1~atE_^X7y;AkfT zGh|xPC4_f+n!*`5Onj`C@m=Uf;(yicPN9j@8AaYWX7UK!0&m5-+eMQ70y-DtZ@S4D z^X?m=c3E*!#oBJaIe}Ih=o}Ttq&)av3~n>AOH&TO6PnYbn$lHE58Hzi)qjh1x^NQZ zuP)O~4&50snu%VZ-=Fd2I*2O%xKn%o)1HLEcI_w0z^x;6Yylb7yqo|F1naHO zoPrxKU3O)|yTCJ=;yQWIfH&6dbiv@Rt61CbJ)<^qW4Z$}j{ZSC^DaVjipJ&{2>&{l zuguIZ8rDT`+)6g70EvurAH&@Juk^j1clq}71gs>-Z=V)Ptq{vk$>qHOw|C8>zf4^z zMS4IK)*W7ks5BN$t#%qJHe?a(m6B_BRDSrZkx^a>+OEjzK?CB1#(u8+={bz`DcMRt%BxrP_d*QN$)YyYbEAG-@Bwvq8pr&vTq6gBe%us>x;u?8l>?6g>;onStMR={oskbyeYe`ysbT?Mr@wBZbBQ1tc(TX zfeO^cVwe3~?JS(LX*_b6CpE~&%NJE01$t*O z37>y)GrSTgiN3FR$YV9SN>>?k#+#=8C1sc`R>SG;WVPD;5MTYHQN>~0GH?;+o%3R# zS&BF!J)f`N^s7&o^%G7bc|sGa8EE#`VT$r1VRUv_x+g-HDEW#af#;vTAf6}4b`LdP z5I%tVrH$fC^TLxQtu+w}#))|Ae>V%|v>)5(ZjdH`Bi!>u+y)Pk$1&Pq08M^5F#y^z zRJ+lj<6Du-d9^_)7!0hgyUi#scADzZ8S7q6Q#ZEH2eQC1R$vMQppZu zCti;b`fU9-N#e_m22qslC9v}Q0}1CJ97SMm>{*F^N4R6)k5vN>i?r~g<%j2b zs;e`@&RV=hvvj)7JfG%Bias;`do{mcBE#NrCh1gKf0e13Lj6h?));ebErxd<{&d_#H6Ui>WMs7TvS zZ_GQd3TC&R9Ux~h@29~Wr;~a8g*D#GnG=XOQbR0l(a_j}hRGwZhkV|gpab9m?dX-l zGI=%Qz2HUy8+}1Yvkri7%fj4)iq_sp#}pngtl>*yYnXu0*R~j&>d8X4M3Co9+|$`8 zuG)K2EX~8L zTsHUU3~$2N^%8rS$ctYw z>X4xIkyqzafZ-PHRYUi{%)=Dl9Fc;>z4NPrH1t@DcFRg}Ip!zTQg>QnY+WE+M`mkl zGwfO%L^B3)KiLB4i`$CmMI7mKdk0+oblqG})ml>C`MIIb4AGtnlaZ#AZ2_wVrO@VN z>&xS&i}$3PJ!y6F_tZ^>TXSEo?+uX`tnalLN}NxsEj7=P!semlDiGsgf!(Xnd!L0% zEuPuO7Bk!#O*6gb%KQ*R-K<4zvQz>GZp`P4&-@GB%L%-#ZV2`1WN6rsU^z^T7U=Dl z{;P7o4+}O=S59v&PdB#M>mdzdHp?dZS!nj|^`-FI@3|Ban-qQDzw@Nu*hr1!{X{?N zHdPf;nM^51Cl4YXs`{SfkQXx$(*&XQ1+gA+iG#E`s3>_ew~KtU?R5yLS#gSnBgf=ur0o; zsg>R|@rVc!^)QfhZc&SN5DzV_+fUfZhPr9WXa^qgmd!R&L|vc9X4(atLTIn!7u&u) zAgHI~j~#sFkI$+t%@aAuC$4|xp=Zc0yHQb0q2uS63M{y*&oO(}lpVlwJ2Y8u<}m_IDaS*sK$Vjue!2{5k-btk`_ddho9R4s!0Ow@pMKg|L)`LjWu`u( ziI=q3tXW!$f!S=szl)u4EvFLmQ#;8uhlA3P?Q$x$7WU%$370Pi0JHr_7ce(uWV(N) zm~Del@R%5SgHnH!80mQsDX0)tH7MBpj2JoGwYO9N+20bVM?H|;51xsBsjP7`DDJnB z^j)MK$C1$)wd^*Jjja*V)W$6>DPC8ZKLw_VXsS;1Lp zyhC;`7lcnN^+dJM-nntFHpJf`oAxv6QNOiu7Hl?(UPY@;oxT;aS?>X3s4wIDT2mE~);?%rg0~&s?}h99jBND8buB`1 zVq$@-+QvJ$RyRR|i8w{P`%*vqCt~ZDa?;vkiH_DqPPq39s^eh_w~J6rewsa49w}A$ zuuEh34ed&CRjNKMwd^Itku#VRA0FX#9HyTsUXmrQDzO3*M0?61c{bk4%)0Xu)2gv3 zE3~%^WT8>S-YIDjmkS}arR(?S6l{9*4+bga=H*;2qe-_)CqKQ*np%8H_-&iZQDyuu zI<+_Af==gEZ{B&22j7g{mD^m^7#odLxnbXdaB9|jw zr_fVJFwccvt=KU0m!khi#9Mbo;r(B{G}0>F803JYbc%q|h?GbPNOwpNjWh^IGc-!) zpu|v8gLKajGlUF13_}fd*XR4Yciks&{@81u*V+4=v)-)+dzr&&VqVWF<)AF~*)-qk z-RTr0nDAEUu9WUimEeqVNNkCmRT(vP)J1#G?IAa|FR?Fmt+1I@>;ag0ed>i_ zOnva`KA5rhQ|Pp_ww-E|Zs%)nNz1czG04&(V;h!sxujd}Dvdk$n@|@-w_0I~ zMk+AuIC$N~>ctu06OQ5#l>nB_)GDLUyO=Q=NK04+}!^XS&%wR%) zvWIP-d=Fns++6HfXoFE~O>1Dwb|wNaS;xPB|30^07|{qW-GshPT|s_b#oBL`sUB&t zdA**Mv9Bm2)Ns^=rTfPDwiad`AGUiCb|FXGIxx>{Z;!Y*0|QS)uaN7x2mSdzI5GKt zP!Q6vd#NX+xDcZfaJWtQh1xz-gY^>e{SI64(FvDM{l9n9J|MvK9`S%L@v~il|8L-! z`I?ygWgm?@B&MfW6&i-g0J%yA9J*L_mh`z?Qd%BT)t8@PdQ~0OO`Z;C_LLX3nO~1a z$zP#cCS+ZH?do{Iu-YuZo&BK@>2(#F9q7>9GvV{l^JNFA`)yShNJ8y#1P*#Tt8{C) zPk7;2;zB8d8F|J^y{l}-X?dpbT}$G@np?J-3y$2HAJSF_3V0bx#6FF}97NraHJI?&c2 zX?be-={Wjor1~V!W6=Rxoe_l7>bZK=E}G?bb{Q0WdjY(^sMH9Xi>jZdIV^X8X)I3s z5RK8974;WZQltV7(Ml+PBJCo&jLcuz;;B1)JIjH#x<9iTxahgR<$2aD{x41M2HJDC z-y<1X-f5?&TSDJ)d4;V~S`GMD^T(%&BGhI@y#Dp@^t_^cY3fkZFb5pQ?a^c{TVg8s zR#@2qLT77#+j`*fFPrK<*tV|N?PqV8A+v#wr_|y#gVOc&Le-|^9&^R@;z zd``km(;{>L zSvC(iGC%7oV3g;vNy<1CMctfm-k*53cdpjSM4#<1(tvIc9O8hNaX==;`>RPQ8SjxT zLBtco3G`dNpmzfr)K*telh`j^o9uNotybos%Rjtef6qCOfB{zjT786{-WH)SX(Dek zq2r!MRE5e+Tlm3NhMqeHSuA}#ta#q!_6iKRa(woi0)~9n319gqIlzA88CNOJcDrwN zkBc=i(>>0e4Edb#(js7!b$w~;P3p@1w$S3ggklo6Duap4n7#e$ zpg0?;I={O$TXfzk`M`S*htIxy*?C%cEWdINZg&$a9rNs`WlgVa@bF4BZ;c2SJo_mI zjYP8-W`CE*J$wUKJa0;ELqHL%a_@M~rtuuBAQIAsB=wYe-K$6nrHS2(Sl0~@K32;L z_b!9Iz;T;RrWZib(~zcCktQ?h`Jx6oXQcDntj?#2v6O|*!>&@Hiky?rz#Q6jt!{b^>sfv>%ewF_w@htE1 z@!WWF=t?Y%ynjk_SZN!1l^TXE8-H;{DHgmVw=Po<_%C;S7-JO4r)XMi$7WrH%U-!G z%U{3w=f)NV$0uF!#0z~PvUPpQo2FP?qZbG@jJ<$dbPE;JTb~%Ybxh4ZqP)E~ipT4QnOJYWdI&BdBjEQPG=0q-w>>kOweq_{1} zMYl&;gjHO8^oeNU3FWOwyA>KUrO<9r=`pdu8YUr&>|W>W zJ}7fYE;{|9#9SA=%%~2+-3#3F=4&|I9xRsm0Iw3K!9qKX5TP>M`D;0Anq(FU4f^M) z0%=j0Lx-7_v;4=^}?#(6b@I&bf}{XtUl2zSxB zeyhIitN!vwo21E<+=rg9&@liZs}a|9Ky{xB_P{|Y_AiQhqGVx z=z0H06-n~6kVoK{fzDK3*WJ0tz5jdapx26h=26<4DxC9pv$!N($nBvA=jv(E1=~Dy z>+1^Wv|vY~sI;MO#{I5)N8y-xK71aPMabb~LKRlWpitXu^PV?>!YhRVkY@$d;+`A{W~O zN;2q^QZ4T=*;2uMtkFGh;wRX?hk5L%z z19BzmFg)RxPdNLyspb%HSR4m1hBFXW-++32hm~8eC)0_Wb`lWQ`#8|V$;UX$s*doK@uedtCn;7-| z`1*~`W=zr_k*e={Bu<9DIX9P~63bIQfC+uhABGg~%SaW`5p|Y(h^C@O$8$!dIRU|$ zo?Yre#iEW}~=0WdGkRfRm^tNCMmy;yGn_vdnLf^86P2gH(pgDsRA1pok^$XV!Di&*knn5KzmfIO6PE#<2} z!c1ZJ^KHN%xvuR4m_Q`G4t(pSiert4Bk;%v-;o$@@05QD48Dnu&xP^e5pvJ86KNk^4`I6mqZcNkWCvHy@ zOp}i-{UG#FW4{OS(58{Gp8yl9uKdX#9@d&CQk~1=EQ*(-_<50WMRtaVn7yj&MviUDauc*VgjhsXXC&{yH41Lg zmp%Xf)^0wrJ6#xua%9c757uD!dv8wlM~T_F%qN60%=y)tame(nq^!P*srhs>JW4~( zXXXIq)alJ(uD?)UoD~A-K~FoN8{-se>nKa960GBR}uL4Nloo^3s0{W zT1o6V?P2It6vrG#RmdQ~a(avN7NLqN7}gNTfuH;kXi5{hY9W@a5(Gl2&z^PpS^$Gw zAE6xVlikebvl{evnc1tu-5cdbE|^Vf!2m+uFpHMA)|8FVTE(v(j#65eAb(j@9XS2A zgJt$oNV#Zh%psius0&*D-jsO1b$aRJL=!lo3e%NaX1+%dPJ~2j03)i%5wTL>*f_kR z19yPK$LAh$>&)h2YbaX?dQdpbi`4{WNYgZoWh_GXfu;!^0#EFHBU1Xw7(shYFXU&+ zusxXiwu~buIT>U=O?x7N6asW97Z}1~=9G;qDn_V=58sZo^UzW8uK%G~*ieJ_Q6EBd zT=6Vy!6&Z^Vol^rp6E6KN*h`;Xr4ypy=lL5jb=PqKjQpk`TFyV(Q>}eG?`}KtiIJP zs>NANaw^_9n=5#?T%{oy&-R0q>b}fQScP5L?Dv_lP|MtC4)nbYcV7huF11#WKwrIY z+xO%kBql^LeN|xSza-`-`IIu-_s|)FGbgG+n)a)eM&IA_NOrANVUdBU173H;_&;egPy8Y`h1g<#i*)~Y;R zXaPN45>;ADe>+dRFKwEudrm%eu2eiL`_9Uf${G$M1j4?ifn5(`iRO9Kz+i<`cYwKm zWr)Y|rAap1l*re>x$vTCIefV=N?Gvw?ohr9ys(=Ld}8bAu;bEiGJ%3ff zjAJP=%DsMClE04rpJV{b)QND8HJn4Lug9OM*9u$8*rt-cg8C}&OPRU9*x+%?)BC5v z4H!{$XUrkg{QMYp>zpA&@#vCQD&u*F(RiR=?u9$Y(?oV)l}!L4zjGZ~jFD8cU9!oQM8 zv?^hh4EV$_WT^A3{ArCLDP%62WEv=n(mn4`a9}mc}>gVQ4vQ@@8&#k1mNWdl% zIK_# zbo%wbyc<@%Apema+e}idDaXfv8AS|&H<=X2)%A@X^m>wzgWt;Ju3$A<9mB9G{~h*8 zKX1nIadA$By`PM^=m)vad)cbSmd-f}*hb`E5Y@n9&&3$>z{;KNd&9@WYKhJuXD`xthRW_BbVpL*l1g_a&Y6d)bR%TehmLWB-(6>0=P&X{8EPg z%BE?jC)$~6drT#aa7B>+;Ixz>=AzP@Ir$30>*6BGvu3EnTcdql?uOL(J!L@Bz~54Z ziJ+;UB3Fu3D`v-xyoWzEm;cCgmFBTkbgq81EAAG>ikciz>5xrt5AiM~RS=$u@Yg1x z@hoA4oY|5vY~wq)d6$87o{Hf?hgEqBNyfEHd9EdV3r_&W^lV<2ZFMWI#9?w0JOxED z#3VxA=x@&o*18RbsX+)@MYpzDThCez@(&_e_(arhi7C=GW%ooX{tm69)sXWCFcnye zi5sP2hAn<8-uUx=qBzBTQ)HAvnaz$yF>$-YHzea9e2noTm($b?kK$`9qEEs{Nbxr_ z`~jnX{tS}m#n-E(TOvkZLyKKzesYA>MV8v8{mam{)_#gk^l;M(E@Z#SyW85-&Jiql z)1K$tAN@k{5+9=skR%X(P(PAdFfyQ3x`!Ah8gZ7+ohwVI^U#q-$5ld}Q3Tl#yiS2> zkxqA&tJ;l{e9ZXi((!P0kp_dpgMX3a$bFMmsP?E+X^6eEkby|PhBMT#l2p>VwzMa_ zV1P7Fxu>>a^k<<*(71m^#bjArfWAqwx>5#B+;)v|s z8~A2}g%;^eo%P520AMNUuhG@|3}sf!h}QVTL29LNt`QU6&?_$RD=92JM8U>-#+p~8 zRaL9w3(=nFPf4;iPrQvqVkA%DkTjSKBs(N7>sT@}AtGb?10Pwk>U;lG)lWzo5qF0n z8=d8l@7IVuLi!x)>#jUxIm!ZMHu&1Nsw(Ej4+w5yw6>DfKheo`!0HQqmxFm+7VF)x z;yk`O%RAY#8b#;7zkboRelH%Q3OF)1+no4!I>}0{bTEY^*AQe4`#_sOW&Y0vtxyZm zG7y$nxNHwGqZhD}fqm4iS9F$YZo}8~Dw{KqXbFMxt+tU7*|!Ao7WRLLut)VKDkjaA zkr7Bnwe}A0iCDLWKh6*7eN>iyBx^nM#Pq=f4*B~22#U}09eC)W#_2|HAui+_A|Qbn z$sVki-I~U2Gqp*tC_svU}YuXjGX##eDj^Lbz6+<6a(8(F8PaEzBjyj zT)$LlfV~0_*pwBhjhXS^7vL}VHxx;3aQ3?35!*FZVWz_Z3MQpDlfW@uSKd(GIQe5L<)vhHH_w}@^A*)jpNJC zFwPJi2edsVL{w=JsI?+5-sI8i$?6}Mx6-q!h$+mG8V^xf7M$|8&A7)fQ@5VPb9BId za3Ui(L*-?$3Ps&tc}?ey5B(Pw>qm)F5NCWvA(wTRE}bgl-QLtVw(BRPtpl_&KWWhR z4`<_`XAfoeX!;@wNovkrRUQiR4fRMm6LuW!l5lC@H>)rq@DK62hooe=LbF3|Obh4q z2!c{+(>hgH-LbWwofCsIBEr3Qf%2xeK6>gf%8`t)z9`b$$SQQnY5vl>3e`N;Tfz#K4klwSisQwXDefX9IzvMwt;XDE9DMt1oj44}sY$Q)$CgCdj&yFc zla_m$#))2=-Av2j+vA`#9|E2)J%5*raAefF;mn^^vis3(hni;fZq0Xk)iCZ_M7b<` z#iuu{vlr2EkSLTtb<&2T`mBjL3sLKD>2PcmTJD<%h>63 z#nsCJ+ zWlg~|y|QJZRQ9yZdZ*m~Yu@2Uq?&K7di5|~1b%`|{m(-+Kjs?cSaF-^Nb;`DKE%NE z+w9cXd50UZ&-jpD4t9J%V45n)>)6o%c4ag!GXnT9H{yT-4l) z6f^k6d&R3k3U9{N4Knu|d)DvI=nphAcys7Pes;=Tr9~;HCt(wnbwV?UQ!Lh<2;=yf z=8QwtCt%vgBed)QU6`I8JI+d0#xr*B*CHcX_!;}nu3V^~&ib?hz@CSiKqtc{#J<_# zrJ(E+P`zD@)jE+zfmBgA7-1efI=1y*?&O~rrN0e#me89rxbNatY%N0c0aE~Ss(v#? zu3jOM&(S91@=&LSkLmt}a+h`Yb z8KGy1#+OUNE*8AAvb*=$f|NPX-^0ONXS>6&HWzry9)T1b?V6U-sFxZMI5&!SA5JP? zybM_G-?esur4JHZ;0kD->Dh0|VQD%pJwb}rph&t@>42MYf%aAI2r3A&O60qElK`=yr>!$yBE+|r*?P)qP<$)s*#x62T@2iHeKX~}t8DO?&c&joo5q>*M zcx9)mPVn4cv*;ifII|dD8zF{PJM!&)nGZ&6b0|vlsN@xYw>n1^(Ocp_>CINPaJZSS zSjrNE`Mt#xOK?}`UFN=0v+i=wvcZcqB=Y$_96c|mmeeUwO=1TmYpRF9Y_Veqfo&OPmsc2Mm*5%@!eWP zpp%L){=DT#(c8aLj?cPPITgba>-T_ny7}7?BV%pg9KOIm6dX7;4o`UCKP3xR!{l4v z+@rj?ufQn;{Bjjo%`Wb(nyQuZ66A z?*6Of0bnQQ$-uTiiiITzR&g zQca>FHT%=#XGBVDR8^nOqA95;QQmYUT<}UuJO-4gqDq2TG;=sj!J8*%Zl@qStTCA;)p`z^ zGLPwg!Z~?T)&h={NtwSUFb+C-ePpKr=HTVx46=DQWMIW~mDV{B?usYUTRd7q7}W+& zYr*@zdG?C^w^{r@N|^wa>JAbXW#R5@>$H-n0&io=icSWunN$>4)nEMJ+GxRPjCM|bh9I9c621tYE0)&?wQP$< z){bPx!`ZbZ4(1IwxNYZzc#%Xm$o@;38&s9^7)Q45PfAtGvkR3QVL=o^28Wr7DT4?c zYZ80|+}-Q5B2;@=16l{U>;UZdxW7b&I>q~zQ+*7pv#ecZaZ1JGj+?~PH)aZRJHADA zF0=|K((&<}=C+F=Qzyfm>#mPDim9kC(CIB?-|!;!R7_+!P!w{yz+H_+JP>xg+xx(* z69tkGi&;#NVAU|Uytx0lR23?XDw-Vlz9G+mx%6NRn$RE!Lqf&b?(>kW|zC|#1uh?in0+r?C~pS zWEKXoN(ja-fQ|P96rR;jONNF^)ywiT|9Jty6CjL!{nPgP(}+7Wm)&XB8r$o z9{-SZX=P@}_w}d2fzT%YMDCE5Q6e~xM>F@pQc9Q?XCara_{3r>#^y`Meu$JqCs1=- zY9v~QJ!aR^&Lmh6jN&}t;k zCywU+Q~(gSe|xuw+{?C*WZRfOtUD{}>!N>U9=EOBt!e<>rhtVb^+P6d-m!KOOR!09 zSK3j&=^f{1J&*}b;LSl&@--n$MLtQ6KVW8wtCcT{&2Eyd%_HY3Sf${gT-!s$lp(BJ zv%E$$w9q!ajggK7eQ&nBJ`lViY_rZ%{bUE@$=P^tDEWZsFKqggX<+7?hgC0zOI|2z zgj*LkkM%h@ks`h#+CEUU8i3{j+mkW)?4Mo zoStO*#84i?Eyu?#ycp4kjy-pg>ql|;OPdyvHI%gh%^kc}z^e)h70#uskP&t&H&L<* z4Vrq5tr0whP@adhmtI{whUh(U`&J`hdM92#gIpLD%Fez2M-QsPMOhNMM<`p>6&5qF z8NgFCO-A^*n|{UTlC2}+TfLKb_iL+%wiQXb(`tmJZ|N0l(|&*EgmLwv#UOoei)(Lj z`tUPQe0-KrS0g*Va1eAES$chWO4-zxzH;&jZ+<;s{>9mDoMH(D_M1>6^LeaBsH*PP z8{uW%z|E$1rJK!E!K`*zl?O`3qGHibMT%(9QBBg5c}|-I?H2j$E0@PF)C)Q8^`+kt zQ!kQ+P0E)`-UfyYRIoda&3^thH>+e9<8atd({Xf#9~5pC^3yggQfqe(AX$99_@RS%vsVE#_8y$2b=N zI`Si?Y=7&IF7{CslipGIV+XAr{+FFk?4l$ zdg=n*tRasUcl~yA#tIx;Lq#`zw|G0d*d*l^mCfNu(=#FrIJO@1ePnstKs7{be}#8! z%I9G~0Sf%dnvCtKk{gNBRa+d0iq9c7!)!0^W`7w)>=5caE!Dc==-vcjV@_u374qYg zMo29}HuaKd*}r_R^3aAMt2SY^oR{zHr%cT=ehi+gh;tVZ+IkoZACf+_{{s!A|640- zXXK=(kZ&61V!I5X`MjT2=V##)>t9>j>c+aqj53hn z$oW>~dTHZG@Fnn)!ovO|g=X`{SE=op&-f|IbRHSuJ<|BpL|E+nHYLtizK_bvSm;|r z@>a2KKz6nZ<_~Yt^gg_VBTRY>?P;)`5#(6KIU+1Z7Us}AZ&h&iT<|#veNfwt6F-d7 z>ri~kDz-tVDtt-&8|oFpbtACTTXlnY$8*c8qX!;pZDj=Vn%eYrus92kkc=@v{K2JrZM$zp_}96!ffdDHWxR~ z9J?7Ax73f*N%cQA^FAu;$$5{+$n$68-cqb1O_=e3IEv64foDQ)Z>pbfIof>}{Q1xbi4dW*CiSU}(?J{UVOi zUD=MTj-OEsy>UTD1H6Tv4K&w5*c?TxcU*__MXf%~sUylVyLKCY!w7a85c-Nva{E z$z+D-DkD{Cv$N~+Yr!B1tOi0TRLqf6E~F#lmiw2&&3-E`_Ifb(Uas2Im73Zv=c=pawt-JAqho=XC_rt!}nH06JZS-qq z`_jK`u5?x$YIe$^e=J4$Kk?Akec1Xl}L&?Zg>!8DHj=%2QN3BRys~)C9O1;pI%jm%ORgP*nbn zUauP4d{5ZzI_tlKp2;5IEeM#Gv{%MGvp;!QXh2F-!6t%l$USZ=+It82X?PU95}MmbvmInYo8XL4^2y-pXaf z`)d?U$3uXvzlF2Ker^it6fRl$J`d7cn86z06zu1G3P+2sGiiiwk7PqLeb#}Y>r=M4 z)|MN{!Mk56?iZn1iv2*`Q!%43hh{Bcg~wcKy~f0hvfU9I%VvS-_*5gBOp@jT2+!r4k(plu<9LK%uS+lR`rgL3><{Nwi-RO$|Syt|C z0uqam2EV7gskYwkHw@%=W$vF;>6P90t)~h0{}=Hs8N7dYf(JyXFDb4}r8MOpR_7b= zzu}LHKAk)zn37&%8c*W1fOWMIWCdk9skv#nRvUe@Zy}`-Z=(=}y>lX+kN$LZS*E~4 z`%;i)b>F0TC<;JM$ET@>EX|}7BB@v@k9eb{uo#h<7^%Ak$nKgWE1EhUD-Q$lDh*Au zY9`?;-s~uD8!pI5TuTZ_y`5AD7f$Q=O(T31oTtE<5s;;mkCS(D)PeHZ&k>I51$id> zd=Uc88A%=O3@mw{aFe=W|3p4j?^@8eUrfxYIA*bdLQ<$-Jv`1j^Zm{KQ4cD-RHQ_``_PO zafb;R@r#t2(b2t<`SSkYoOT4=U#Ov9O&R)|@vc!^Ya~+OE9-*j)?$$jx(b@WvPLtO zP5UWFaVyxqrl-#Td!Ko0uG#%Vu>63eF|$bARqVx#GS!J;<@HglOP>sc}>#y0YJk-02(QMe~^Pf``%RY@T5Er+A z-;V8&vMO)UW$b3^ay#h3ig1mHwKiJPn zi9lfY?qpSc=U#gCH&A70FDJC}Xrf`$V`VNpn5#d#f*0qf&%W^roAR2QgG0>B!B62) zpG<2LtP?q9tbD|Nr+!~`s2i`6IHf6wq4?K6D*fm4Bu!>-fAlQEE-UF<)X5DIrA)>n zAp6!Z_9foD1Ntj~4H2q%MH_yM?fMchgxAdHL{+bvO9ux1BUZ-nFYmjG~^EdB2^ptOHnSB0p-9$RwG9w4RNS3T|9g`bbiK{`h zrdQW;@)b9E=Bqu;%ZHlL$pHC3*^S>U$+sHWi1jST(ke#A%mZV|pVMi{khar#d42Ed z`yu;4LrLPNeuBS3XEl6MQb=UZL{s2bB%t8q7$V$iu*Pr|b_l`siOUmvB5ODfYcVmx zrW?c(*xC?_IIIzgypyL1NLu8!e7@Wu6yM{*6VFN!`@88b{Y67@*oBea^g!EQbgF}2 z*|5%8s`sA8m&R52`X0HJpOFG=%M4P|#HHKabu|yfIM;wMMQwvdg~Y>cu!iKBzWU(p zVVnO%AENuE@Xng=T5~~+oQc%VYSc|^Qej#1Uts<9w}^UfB^PRZAlncfA9+%{JUzGj zXd8gtK>k$8YUVhN=M&rCqOVVPMP)~Q7e_Z%kE&+X3|s?cL{FxpaDJg@SVj-qo$Y;& z1+?q%vmzskt8Ux$MSf_Kv(F`yI-%aDFXPs_StY-2^$YgX%A%-%#^OKcno%C$doh%V2??8ZnfC z{k%CrC%Py;(VM`u;?j7n-wM}wSQJM)sGT!I(IC)XB99rTG^)oewjt))pj4o^3*ssm zZx41OTcBaGm9a4P&`7t-sxRvWU6M^$;7#;KOYSeN3=Nqs*;-YYbP~TvWp55ayA*k5 zh~Sk}W^NX^MDpz^>xAvVl|P@A)cY{D3;5fJCY6&K)J1yzV{SVg^Owxq(#H~8tbEIr zbTLJ`Ia>b8YPYHVoKa7!4Ug2xC0_=eCAhZbeB!1P!}}bp39QUa%{=s*x66v$*y2yf z3JSCI?2a@_b02r%PcYi~yYp^iPr!nYJC(q!h79xY!^Xqb z6n0=D&1Wc4FDprBa`>}G9$|oTAZd(o&c7Bgaq<*kt4-~7KW8iT~TzR%iljP1DZSBvgVUO!AKWXWL<+s%i zeqX_64^Z7cwn~Bppc!K>Oqb-)XkPDVWill;-3i}$jcsJzFZ}dw(6hM4=U>Y&D+jKR zL?%5LIz|pvqbp^v-vizu;p{*ZxDPtIS6xCw@YzrI~xV68cix5KP2NO4XG=jH?2Z+8@bj&M0SN zFO0zT_Hi{0KwV_Y=>0Y-ggT=%!);T_ZBcytYDWw z>Wx}wzt86)s2n8a|1}jOd78@>tC^Wty`8y&%UJBjACsaEV~9=OwHK2i9`O*#pwJd! zht>sf)}{krIiii8|A~3lCj~ow9v+fNVHq>MuQcGoFIQ)Wt;hG}7VN5aZj_O~N2Pu{ ztHHhdR={H^@V0TOGe5@4x5|H;x1X6SVW`z0?(5(WIW9A>yso)|=Dp$v%m>uDn5&V; zPp5vpjKpa&*WeDy8r;;y=?w^=4VjYyh6$Ap_77h5$RK{&N;*;4{`X8ZdyHJj*c9Kd z8SL8=k`TO88mpum8=}!>rR;$DAp~vkzu;_%-U>C9!=X zv7Mh(@<(LE-NYA2%kA3nt)+>3@Kzl*&YEBCA&2fSKU_n0PeHgDsk;G7VdH+Q>rm?P zq17){(ci!=wSDyCv8a z@})!9DeIr_r>eE;vC&T5zwdPeZ}IAzb)R1JHx*OC_6+lNiTwCdS_p{9TW6yRc@iWO z8H7Fn0~i8yo&19UIk1gCZlL|lSZcJ_Mumd|uk>hprn3oeu-yFn&gn(|s+UgEq3>56 ze!l3hm(`Pohe&dSnyYadyzsvu<2(wnhzstGHzZ%INs5k}bHB zY4!k57=z_XE!w^PIxqM3;lzLjCzJL-H{{%bq_CBD9%~e`)TP0-CBwykttMwgvf4}r z>241R5f_G6{1fT{ABGIG!zSRa`8xV!qI|t{55n8KwuKIJCqq74G3V_1p>o5hfYlrv zLC9!YUB~0Fi_Xs8(k|Ir6wMuirURqdbhk9DxR7N76^vQ&I@b%?pzRdX_+3vM$N)D$ z$iE+~OJe-p`z`W%L_=|girPvOHi|D@C-?n8$dh@nub$4wk~3ZAWmgWYyKmcf?4jNr~K zu3uoT*Wl5AUmSfTm%%!`VL=&nuv(#ZciMb6*uvlZ)p}EZ9Vtzm+%n#&Ty*~!co;) z_R+;z12EKTP-efj1vbhHTAA%RZ)-R+dZdYRu{_O#bf`qoGqHkhws@Kowhg(gVf1e{op`U|+LHtDXuuO$kPx3m@r{wq4{E9;o(|KT0O1Gn2ts0pOG zeNdB{@bx`8C2tLNL-6W;$E(RVQ+kojj$f%AN2~+Zuh$j>U$5TZuHUcjqIZCIJ5tL^ zlw`L~TG`F_xQ2V!weh+EikZowz)L+YbXtw^rFsP9cC6=SOmTPR{k=#65#Sx?{pBz` zqX=7m{-2Q#OK3tJtFrua9-P(5PcP{<7r)0$SXceJK7jae_E)TpuNMik>tHC_?U?q!#A*NyWQ?fX+JSePO>xM zHO#(fa-H?_PIUd-=D!a$=aKCAMjV!GE-!)K#<&N@J)q%rAz8c1LqKahWwZx)|FnB21jAqXn#5=#uQ3I&-_ZX)DPG|M*139m z6bBCG=+Kk9nd*kyT&*6ag~B4fbQ;Pb5VtT3>a`wr--{?%*cZ9tLm@AemKXLWvHjUbzckkMns*-1@-JTOF zg~9iI6!*Jd++;5iQF5zc({~Y!whx5P;)@mbULqYcAXAn@kAiwRtsVrVlq6XiKSB6y zrEB;{eNZyZZF(9(s{IZJjAQplnd}b5Wr=r%ZN&4QlNoWEC|zg{0sn_T6J307Rgr}x zA6rY2><4Eb*SrIrAhTu>U#z#;p|u%nf!M{+F|fF7@zy@eR&qzvuAA z^Z7zSt+q@70zoPn_dG5y(38#GoX)grZ4I2(peE+Jd~BjfL9)0dH}Ll7!1+&sz}0@z5@R*JaYj41caL&W(#9SqCcVoLM#JEEuLGp>(WgkT{-#5?4 zrtL!8t) zm?c1qaZAmo4@$EAn@24{mL+2ImUxJvi)fpf1o~!?0lG9}C_UA!fe+7f708oz!$g`PL}#f&$t_#c(d1kByOYvj*8W4}v!CjEAn`Zm63 zFNU!I8F`chSQ9lTU{+2twlA-VdNkyC_lH+;I?$5Mbn}0Wq(_lFjd#c;Iaq9L3S5Y^&Sl-=X8I0GR$8}T6$#eNrOE0Tb2Aae`w-7BLp1-UbGDM(_mUb z34oz6GkhztWn+hkTB#ndgWJ2~`>;tD2xlCefjDeJtzD?gops3ORp(t!|NlrJ2q{zCMH8VOI-@FEoCU8V`A=rD>P&=@h>{9|$MXp26 zaO80IV|6B`rRZ3p*93Y5+^OHR9UpS+2}E$yJW*cKU(-B^qTT-1+o1nSJM0z7QFIR1 z#2>sDCA{Mb(mXlH@+UpM_T|@)!d|A20I2Hs3gR0i4VwoU*)E`$zvE1h`T;d2QgRI?i;i#WJ#Lb&tK*!S4c_WKj zV^?ZHX$HyH@Z}wOYH3r@wu;M;N#FZ+*;r|)R12`Yj5zF%6tP}lBLbl3)@GO*;F(Ei z7+e#!y=C&{Om%(?PeL?Mm;}UFPs;AApW0$UetbHVJNJLRDy*B1kJ%=PGwi*zwJ0Ah6Q$DfQvUZ6D#B0qHp=IyNce zL^n|Z8BN~C!EDuo^wbc$RFf3TUW4BkYr$Vsdr38G7ouF|qb&PnDhk*su@5i)N$CDR zG`#g+6W$;8uOfn^(nzT&AT>hicmo2`Ai_XeS{Na<5u-y=q$Nh9)RY)CS{fN4ARFC1 zMhymI-^b^^@9!UQemUp$IFEB(&+A+&nB3~}saJm;nHN7c-O|*5d@oLdhF0j=nirtA7W8S!Di%}*_+&h2O+V9lY0s=nOLI^smM4Z)kNP{+Y&5)EjN7jt(;CF5m|r-;?h zbaj6ReNn@qg*9GBxzW|;j;5?4~2Hl+~Rh|3H>3Bwg zPc$qthKU)h1Ce@S-Lbg0Rpi5i_R|pobBZcX(_wWkABrv>dLdU4FNQ3*FXI}v#(BX8~Stz|JNIXr`+jpvCYd=QStD) z$A)#FXIHakuYw--X{Bi{KB}_);(AB+2*W*IydFH!H?L=1%>1ZD;_oQH#q;nLYMaCH zcY;sKSYG_5gAdO_al~aA!3gH{F#?_lQEMll=8a?odW-CzN9y;uh$&A>4<4+{KKa9V z#5m8|`Hr}@=UH&aK5~`iyxVs%Y1>p)0J53m>E=`SR6OIhaz?iTbCJ-q#?V0jnY_7>agK|7 zqEA%3Uy3Oj?~3pSKAh`v76$WoQ~yDzJ!H5WSz|~;b;Wv+(ln5IM@OZ!;W9pjHQ46( zYnCs)DoD>r^S&_nA!oYla0PKYD#7G|%)bHsTSxB=*Y7_^F-b38s3)+TU0YW-+Hkw$ zkZjgRnOQx$b``>v2^3_(jvGyvG}N!TxuFSGq6kCS!5GxY`2 zoo^ft#ihL;T(q!mxv3uyT6lGdZIatxH7Tawv2IXz{*+ezj9)PgvgzVe*rN5H{L75w z4s{*x&D%WGPnLF=wyl&u85{N^%aC;rwE<5Doj)xg%`nl^IvPH4lOm(3i)mu8xfgbH zWIPIIS(bdLffa;$I4%)FeP!PxL6*Z7Q=xFcK)yZ5o8q3^e>@hb%0x=-;v{n2mz(L@ zoo06zM>y*V$8|sGy%ksR!kSoZI{IFS@8Rdu#lBLv#R;?arxvm0b?vTPi_hQC%JknQ zPnX!pIaVKPM7k6jo4$yPN+ibKQ!F#&wh56F1iWFCNSCp`qb;NUwZ|mp7FpQWT5G@b zt}y+TRUutd6-s(*r}!F!FscL=kzFc)%4;nJ(fFe0CK0cfxndIJRx}~x28x;T_j!Z_ z(w%flX`aQ~tS6t+~C^PJ{g=#*KV=z*t3iV==Ch2Y+a8 zHNm3Ozx&**afJF5-PiUETx8yEnzy{V5fozh@-FZeMgCn!&+1S%-v9I2UU@r3FoCae zveZ-0jE^?$Rv_3YU&Ys$*!$Cjg)8GMf_p6u7sh>|Dkqap8RDiorQ}Hd(i`qc4$g@h zXH^7uyQhkIKjKB-ML=0}y{vE4l=~UiJ~%Dm6W5gg&+G_QgyHn~I0n|uUo@H+{dx5~ zcIekid+_i(b64_u+iQe%LZ+WOZ{1&=Dd;9xpEJ2;OA!kRIefCU!7D#8iIQyH<2^yN zU|Ucj9$C3GWOQ0SNi7a++L!po|L9)rydn{h?OkDyLqi%~Ka!=93LeDuXcAw=cGuX_ z?{O@VoL-3SP2cY{XY$x;4B&lCCVH<6fdMU@|MS787Bei=L@mBsgk|eWa_LK}9QEBl zD&cewI2%T~E6OjOz{aQ-hL$LFJ=~Lq#q{q-csIm`2*!;3u5<*=)VX2kWYvT42Tg;n za|_8@iY2WyN+VHB+EfB$u@Y}&U)_39r9fuNBd}@8FD(8R9sE8mYhI#GW;pw{5D!)w zd#6ZM?13r`1J$F*@H@Z}6b(ax9k=a8^mYp^(_e!5BlW!kNO=}P@~ zJf-{o&IT>7YCepm4$gp}rwV46HSSDFh+H5=qDrOy4L{Yh5qmQ}08G|?!Oyl*Cft`c zkU!zr82O}p-tgypD;48=k5;rlYe>gr9>|)4!J`=yEMn6GVJqy%M2`rdMML{`tuSvl z3%B2Ut)C2bWw*zM33aK3iit`>jOVEmiTjF;=;CU`J9F#Mm+wr!!4BRL5@SEBK`qhA zMC@7+n_MP}Jt&uHL#lYF00)i&h0Kc-QxueJeA{+@cLBT93R_mZjHM0UY{s^_;Wj+v ziB|SY(_qP&=&$z=-8g6b*!W35s}*|dj&BeYCsJ}PGn~V1o@136-T^BTk-+o9i`kano!pywCYQh$k>kfgtJobLOep((HCW zPj{uZ&?D&E@*dONflZ1&53G^(CHs4>rVHl4$j-aoeJylr%q+ki$tjv;hRI2H&6N-q z{y2LY0fVCT+ujD|WewBMj38x0*a2`hd)M=8OSk)eak(#*8IANb)c#H0J@fc3vYVmN z93hmlY;6woiUs3oq>m_&#VMa8)yk?oDeHHQD;j7s9G~?V$aeNL?%Mv$2$zVWEe|BJ znM>N~NNc^}$Ry*jHt}~Ny*Zh(R$k;)$tzU&2i-m8C~wK)dzVY-ZBJA7Yh%fbWHh{k> z_l})z9Lc6PFK(&V6XNhAb&Q{KwKagvC|JOwSDQ?)M zeXiT$2C_8%|GNNWGJ`nOP&ZEKBOei*v@uD;(#?E*f^ZUgunU&oGE4qre>yMk(P@Q& zjGUn3KL^{iiGb>_lq#yNb+;7AHdPl(Y~NK|7BDuRs0+X{7d}E~#HB{s9$0uxO24N4 zcwytqNfouyt}6+vTBhyu{GM2*p_5Vu9_qC?$kDXQ{&A|s%5O0JN_dHy_GZxd^4wY?HjLKeT zHniC;GO(@HHHA>=Iv7~)lr@F8dq%S9S7VADg1%xNirXvP1pKOI;9OBh^r@F1oleL7+*C2T zqB++Xe%&i3ERW>bWM5>pmj0`(hV=Mu+5N>DZ-S@IfFm=kFJm`i&!&lD?Zfd^;7h@d zL+5f}@U%HfjcxV+=V$-^y%0;!+T{H-9=t3)HFP{O`N52W&EEWKDuxH z30e!x?MT%M8R)yTAPqPh9k!X*O(CuK-Fh+?K@=n42<&hug|ZOO=| zm!<~mcRzA3Dg>qUgjxTRSn?>SFRU>O48a7doD(%&QK_sTQ>M$tGcB~uZNHCS=qW<) zXD~?ALeKNER+e{j?4E>ndjo2gO%&ca_fMYy&@?}xsS+H+K<{BzeLv;k+GdX4AID6i zcGlkNDID7H>t&|u__kkeEB8B=TUNW+V=}c{rS&Dy_pX8IzjpbtU=9Bj?dCPMjXrHxx zmm_*K2AI`oJRAA@&HY8Z_}8|A8JqIK1-SeJg}b`!lm!bwHY%Pf=8c^PHm+*V4e|_C ztC#2hHrftJBbN+YEWUL-$(CFjU+G86NK?e-4$Idc2AYn+kFxI%oW%5a9kZRQbsLx4 zdVIf*S&i|d$9f840clG0qky+CiN4swjE7Q8(vls{oFw-D8@Q2rOZ6r8IiqLo{?Z#Q z36GgSg=ON_6famj|Kcz6V~D1P)@UL(bOI{e_v0=_k++)Th9hS)AtQ`^u?k5O z_mHoo)6whQoE=)MmL?-!$Wj6LU~8_C=^`+*;!wyY<7^)Yo~=I>H1?bSD%2d|)_qO` zEZRjIwl+(Q3R9Z_*=88vzxJ^OehFnhAHz>Xm1!H2Z#z!Co+UkobmGMsVMRGd8nz92!l#5_?A zOKjdeaKrhp)@3Ids|KVY&r`uOIme&9$#v5wMy*J9#Z)}a@r!>*&!2pNkMJ>nK4-&f zC-YKYsn?DZD&Z4SKU#c_pBEs5CXDS(boE+CGJnvGyuh@huiO3Zl%;(6kwK+eoIcpv zQ+hY`bS0!L!>gsmjD9cM+*DC=Wh^oGQh~8wq|cMmldo2C?_8K^OmeCWB%psS=1!@f z;Ug0H(X2mYrK#vRdTLZc`pi;HIvwuysQl0W7LHorJKq!x2N};k5yqcgk040pyBrlM z|1V_loa)P*>K{gjLb1&7e3~_7>Bu`9%N^|M{pPdAw)qc#L0g{OXEuQl0$-@$W7_1@H;E9eu!YBU|8!UHh~-gRYh`xBY|IKL?R;U)V}t>jG{XH< zKcf;BO+_?{ZPVAIU1IYy!X8-kbcckjErmo6%b&d8ksM)^%gE~Twu$(@y7J^t<8V-I z(9isEv0#52WiZvUIq`dNBg3j!eta7I(0P-hVz)-Y_T2*@Zk6)^(22o_%iK&Fyjg$Dd(()+%W8vr-9o z#Vc z1BorIf^zh979odeSLjb#;9C(8HB>Lb6OLDh7sn#LVFjW8L?Q7o=lRqa-NTFsciPSZ>7)m{wmz4unj?git$stGZ!>SW%`%f^t{9kq+ztDer`za~h; zI}SmcZr_>)L9*E4t&UY^>$3Y9z`*WDW!FgeiHcRvnmreLQjH15Q$>;Y47HT{VFO+P z6Vm?tt6fX1>w}1{?jO)BJb!jZU#~$H@*)V*ajWrZ%uami4JcIF2FEqgh?T*JMiM>IARUo+mC&wluj=*g^VPP6|W zU=?XqGz{^FU(0UgO&Lt`ndQ9=o^4z7QxxWxmS;o#$iaAahA0y!TUdsNv85mRpUC(2 z3=Po@GcFsr`h9xY*8^eIKlTlj2yW0a=l76P!F)#R|5tl1|M2h5;_}6MP1?`tA0Yje zs5C}}{J%R<8*&Db<9_iH$o&1!d{F&s-@3ZM{P#6UXN^K)2C^IJP;`aQ(?j11`5I3O zxtSsKvFxjBpbAd}xxPMS?1{eo!CwQBnj0flv@3;D*px@x!Hlbg3@A=%J62S`!=KQ? z*q$H%z9M{$vEo}v5!CBgCh$9}Wra5^=&+?7`#(KFr*yGgAhq6|{G0ar_R%UFZ7 zE&r3eqCrkEcd}OBZ(A8Gk2|ZfA?^~OF)&{c>#05Y(Ksc22fiH(LP5{#HTf6$$(om^ zvr`SA8*X{c@oDg7Q}liQyzznCQ07v`i)m@{&VC_uAm5H>acFem)ZDW+!yVmAn>g+R18P*!X&=mkoMrO?(3_VVR z?XPiRU+{Z&|7LxT5v(N??uu<*=LrA?DQc&Jm5)W$A=9;8l`dUUfRKm9yY>Djj10b%!E1sXign!iaH$!^w4~}WbTp#ty z!&y*8xxMd(o;J)F+_|*jSHQz%oFw<}uXYW-RAt%h(@2FmRt4ygXW#p(%xC*zUT(kYLO9e&7~A)!-E1S(6S z@8FZz(_Xi-)NTiHf*j{x%VnfsQGaug;&{ zkQ|V3Kwv!D#thI;!|X(Vueq3?Kr^!Fcu*JfNWQ_gKT;T9GxSs935*o1{QO!pphd@1 zb0d-`cm?Q-{m-LMx!yY8OK7Ia8#wKkms&gp^Q&il`yZuZ*=aEcvZC#bm!dS&3RLR3Vw};)J%UvMkiCq z)1`4loDC%cf!6tl_iKkX3nS?w{N-;h)-V=}$C*J&pn3Lu%dkQcdonli^}Q2og^F&>6bzC@JNu(wmri$7Xb}Va4$!zN>NfpLEuN& z&qgCddh{>_zv^tM{+C6z5K1i#K|--S`g2LT8+~j0Nj`Qzjq-`UtNr8f1pRIOFG5$1 z@Q@nnw|5vqnMuBPJ(uap*E~t*p7J!{ZFjFdJq>a7j3n#VhXO;`2#xA33p+BTp6@Xq{)i+fyanNNG<cvVk}rtMqqF>1tnTEXk4R-_P#cFrak)Yj1Brhwy*8WrL0)dFCN8c_zwpI=A^s ztglyD@i(_AbU!7|S4OH%Z#9WWJ+gV_w0Kg0_m%yuIf1eqzK}H4SHS%~hmkH}R*mmNbUHn8*%>aAfK22ut7^#t*UOp*(2;!8_iSv(<$5U*BC`N&Gg0m z+9H5!_lfKkq-zvue!U$Vg2gy?9-}uXJji_xm9Kl1PbH4Z@*>sGzsVobfC&pH6iP?o z{(p@${gfu4MMu0Gqo711<&8zcOp|{FlOs2*GS|+rx+K?st^ON7yUB8VZ{()x$Z9 zTIn>YMsrv(b9|BIwg1o{mHLBE<3B$9%j-}_+O&a+xfxvp#0lJNQD4E? z4l(>2@+bbY{~|pg&Mz<)vFGH5*3bRDCKF@=^^q(5ej<`pexcc{Y_!h=Uc_u4_?AU4&1J~q;2mjMy z-u&-;og_-ZM-4~!3=Ne}a!}Qt(tum)coTS_3G7JV&s~(w&4!KtO5wF47J~BM|21$P z4p+Pz=g+{j%pe-|<$Ruj@IcCi!r-M^K$=(g^uS#fbn*05!FfXg7J>@0W`#LtOgo6Sd#QgGhD710>`>pe(xSnDn=L=HvZ>9`Xl4nIoVE=l)Ji#jHd^JM?U)hiHC zrTlk0G@f0#TQ)hSon=o8-_SLbY{?2pQL)A0JFShAqkd2&OY zUYquPmGRXeXLFj0ZBPCu?a$+)b)(JW$#1nM-jCl?G1tGP&|@}nPaDvDG_m>CqahNX z!Knf~NE78TkAgb5)x6Or>vWkGnx`bO$#4ItA+TY+-ie*!V!{gg#bt|OM<=}8xH!?O ziKM5l1x1t|Z3l>u`5gZ7w_OGo3&H=b{AKMKvAH=OSL?7O^Aj4h{-s9xeCaQEQ0@$=5*EM3GwyB7R7R- zFFz~UL!a`?oK^;}Z0G0}+NVE>(Uj#IR0DT&S&hExS6Wbkj=gCRqnP3yC)UG=?pZ#Y z&cS-XFpf)0zc#A%fB(CD#<1(Oy&Dk4Z!_R`+INy^jiYUk^YKcPinsTT87uB5AKffdDF4gJR!_pw zP|?KxdNGb1<9jnY_A!)nwX{T$t+G1T)iZjWGv8;ltJ{2!vD`HCX8?9LXSRo-`2MtH z(Oc^~RPo^WHkB@+h|3p7_hf=8eaVJ@+ZXd+-zCeu<18j5^e5w1k$`~P(MSHiC{=pa z?kM#f{>K7je3Gtcl}j-Jy@+83rzpO)*dBx1TQ!-R>^1kPHRv1%Z~4RNUKV=2{ls%x zNO6XEXgUcAr<7}W9JQ?aqP6VQ2+)i!;nrz8TJ8(0kSZ3*8^85uISOh@;}Ne!W30_u zLPgs(-v4CbjJ<=`tUH>qAZ#jClY7p4(%LuH_&%rs`Quj1CoqRiA=gEO{~kx$T{3#u zNJ$EL@NN3ETdgN=cGPYs-Q8KHJeq_RKx)S+u$`~$<0gf$S)JD#=f6@ys+;~qX=^dC z0r-^Cihr#xzP^>&dK!uQ4SspAG5_hVHTP++&*TLA7>!B%T!k`m=PgtO zp_P4pJw}4hy~@w@O=%G|{V&_%Ex+H4==1+QO!74nMTdVVP0wDiD$%GDsikE4(?220 zxB0(-R!`G{32OgIUr=w;hJq&GiSJ%Udk($XnE1+2mPcqLQy{`^63Ie_e}&N>QD1&O zRGp|)b}tPbe>-k!pN=doH0|P9N7T5{=x7V7%_-Pc@36*aAC!U_xXw1P#6|3xx1rZ* zIC8g0V1`9d!e$(%I$;UDjvP1@SI?qhIQoK%lTNZ&qdiPaTz$C1p*F4zb*a$nZ<^-N zXbsDBDTgJ>>)$@1loPf_Hp92KS$7f@$HYVU3rk>+iT`BAWfT*i3;o_aue2!G<-iZ8 zl5Mik(K)(nPT|3?Lw8vv-Vyz+mRZljN|4AyVkK#X58h(FuxSQj9fT5T=*kL`e~ffkvPz5AhwN@FK~`6t_D|C#Qm2UgIHMcm%V0par|g zJdRkpN=IA(&=?pGG0vyXWmr}nG37{QI#Ej2nsJjT5F%uP_+=`|;JBAal||U_f94Uo zcwr_3y{|Sq-$_YW`?fk@m#ll8|7ZG( z$m+lX=V6NQ4-#8tWv3lS(SUuj>|)0*TL(WXHsoEgwL&EIarPTZL1pgGc8`#>V)2PX z_c+Ol@8tw(7eCIYP=C}6ofL5%DG^?IZPul3jq|!2I#H~?7fr`G9xu&k3F80Wpwp(9 zdz!wYbq{r`;^z;WZy@R8z|ER>6!h*ZJ>MT{KY(#WkKEF(y%a zYWZP!@YFElHmZ~o&}=6ApD`78-|Su644p@g-x&1l${Nh{Uf93xq&yN{e5JJPeC*}f zxc9;UqHWVEokJNVw>HIAM`au@xsf^HWHArB9)kHQC{mMWy%ijThNQ*ZPkqZ<2=mWj z8)x@?TgkqJG*&P@8o;VcgMQZ&{=1b>H?mDoCT)>?u}B&4u3FkWBkbaLGRngKnYhvm zN+==zww>iR(C_*NOM(tCk4-~QACBv`Z}S5jAAypSM#YPCrfeKi4*Z$^I6IzEjwMp> zpy7&4oI*~BH{VAbAnU1`=ic8Fy_2~w%>Uy({yofus*^}m2OB?r!H0lKysU{|ktWg_ z$h5@9-U_{rJa}AJJ~5F6S6FmJkN6KPZ?!} zQlLynWzd-|(~U#}4T>B(p4*(k1KG}El2JIdb_;TDNd2#6Sii5<4! zqLQGO$7(h{iC{dz=)piU^lu6GH0(^guZfUYr*FD;-;Z6hzeaA5E!;@vFU4uiF9U;d zo?~{DgK4{`aoziGLH0BB!H(;^u!UUSqp;(Cdt6*AjupE6AJeI9hmrpH`B&D0KWd!v zG`EpY^2K|?zPFBRa!oe$1;?TWZ=X&|;a@4Q8GX|GPg=$NT5q#33xeRR+ce)H9A(=l z?_3l{%J^`;mtdl1x%NEp+M5NtH66{G!CZq+Ickg(3+^!|Z(bU`Zja+)w0(DP?R~Fp z&AUo_sFtrEUw9GbhY?qIWp6qFt=)+lbws_N5UW6ex%W( z8zJN*^@iMgF-M<%`CAad#njI)c>+kr&eAf>k02E`>if~p@y4QdxHzQZ^L)_Zz-^fHl-^KQ~Qaa zOH?Mz7LL5mc=sD@VDn5NUDL@7Zx0(6B++evM?WHeJ{KT|WXuA*j5IhoEkh>?h1sU1 z&N2<*t$OeHUTK&>nkl{RkOD&S0c~G9T_%bTGh&U;;8#zumDI}LWLN;!^UTlE=%O{* zF}nh*QAq!9qn40DFxs6pimZ(A-MyFL^p#o+aKV&J6bR+PxcWr-VEKs(tQfBP=`xz@CfXjcY>b$fpx zjxFa7Pv`p;hb{|dk?i3ww@o=Rc-8(u?p%+C963LTgZrP+T$UW#jZ5|OcRXJb>H>b@ z8S9Jn@<|z3j?mA0a@+#1Qs|ElvmxKv9#JW=f^v#&@}7hRFH7FZXcD~5{cZB9bja9C ze%u0wT7T0*YxH*KBH+a3=wyD{p11sa1>(?KV_!fvl`)@Sl-HZZD3sM$BZVnl@UDQ! z20Y~^iS^H48g*1iym*eS9#oCA_se|w>FE6Xt|i~IO^8IIOb+rMMaeDu;Rf5oJcKaW z_G$tEL^_TMbsKU++I7qnT>s^7>T3gdI79KHhbn>Cprx#fAuyw; zr%u{d?vh+hh@{5n+rQPq?J7LgQPtLsbuhg;zEgPsXdW_<-#Y7J9>szlPEpD#Hwah?NfCYR z9}oOok>T}Fv~_zG;dRqqxa)d3l4S`^&kD@vGXUG z+BRViz)c!!(=3uOZFT?B%v9)TI~>x2-7bO6^G9%Do9%=Csd3MY-5lZ*v$PwKK+#PE6H z_n}(?>xVe(9InHYHF~$rT>LkF)zpbofa2brS~}1f?lpMW^TeIw3!gQioNr4vy&!|1 zx0JOV@tI=Jpl<8(n}8NxSU?93TPoQttn!9hruU_EouzoZG3Ut1r3;x^g z*Z=F&Zp1*N5tRapF2Ay)wddeV^}YG4L0(KsedTc~_>v^l&j{X%1~%+iYepFvOJM{G!B2KEYr`m6+3*q-Q*r{-5&buwZ-}x`g{G+drjR zm=g2F=5lwvan^Fdw1)FVk8=Cbs{a7jA={;nXumA$Wv{@6$NC)hX7QvoRbX+YeY2ji z#3p!kjmOOZe7%N#5+E;QSo2yG73`xfEzItPxUafJ7cgIB^=Zek#QUoVs}N8W>w_I! z&Px_OK4&|!aEdAvE`&}xj&jl_}zI2Kxu% zCJZQJV^RPC{UBjFjH475B+E=>S?D4mW3)VXV%T#Uejd9(2quvTtW0cu3pGj->t5bc=R5i->cyh;vA3(%lgUgp{7< zj)i*nd(0ur#!hLP!RJ{!NXOsC>U^*3S>xFb_TTj->F(U*0kP*b#>t*;nE=qNWm5we zDQOCAeYGS@^;T)337?N6rJM2X;=;ZG}j+Y{y z0~JXWhoOeR-%bDA6Vex6^{?YVj)e7ZRkf~5a%UDG(&?E4UnJQH!<-g>9%rwQ2|*YT z9CEg2M3^j?n;UBcH=RqKUarXI4~)sJRnytS70+FAH6DxJ!|&Np>qCHRXgBnaC54s+ zNM}f(GUpelveL!w$mj7xHyG)Y{4)zKU8SlXA!m&i>vlPe*-WkblH%IvIKxsCcjr?L z_6exhF^q|A)^XML7EkC0ueOnVjC=n{e8zz?X)WpCYeL=G$BkVnfU1A)0m2n%>8B3< zB&YBdke%nN?Tq>fY1>A^5H->%UkhF)d4FsN<2%y(U2-yRvqR?f37f7UN19|u*I8+O zqQ-^$tZtnSrfume*`E}vP}shn0e3MN^%v-Kj!yB%*yGuDmpp)u64q(nkYj0mR~zM( z3ndNTLnC~fr~G*i%7y1*oC4EiKuQ+GY>Gbx#Nz@F{B+RqY))HwS7}P3rRL+O!76+P zM`LZv*NIa)n6b7)26DU7|BUhyRW7H!{kA&=*Nz=@Hw3B^s(zec!1(} z;#`%@bTV`}4dbj#YB^6k83WM=A4Q^T6IdISZ_-#BXNem&4HaA`r{_`p>M?gUfY^wp zDju1 z&eo3VkB?E6nBx4ye;qd&YfmYh8-)O!<@x7-Je%6M*?J#`AozSnD%M0u`_9U<$suDt zXwg(io`XQ+M?Iy?qjt&qngbZdie9czk?_#Gg;H@=AD*n*8G^9wY;Hd-BQ@E;PV$^% zXN1t)LD*?1Lu~J5s<)8j*3Gh}^ME)w1mn1sFG1SirA_eHw$anf3`T_9P}{^yEIzI1 zSjacUfSN!{GXQu%hrh&C_k){GK#*&YDFXD!6TFuOyLm@aJefm%Y{Rx_V)u&ETW|~5 z5QCr#UY?13zEwBffW@R>fsPsp9@08L+R4Iw@z5I{lquDU zk0B ziTH#kuXAvRubF;ig5>GqA9 zgIWrn$qw)1orAdLriJs%9{uJEE!^wSgFXI+#`?`Bo%-R2+RjUtsjdUQZFnQRtIfr+ z_W=$t_=8!-x+kNkV9sKcA;J;L~6PUyBweQ+aHiTdSe+{!#C% zUy92&^zLxl0-no3P0Zg2h|Swoq504%h{Pcc0BaM2KhOiEQUEw26UWQEtEiqU+_1Tzu@^P7x?w&9Tq7}f_?Qs;0zecnxvgnJ&yC`Dj*P%?7=j;r?<4(pa~&((7h<{_~2 znyHQq#Y@dI=7P^d=0#T%2Rh;ol!mD2E+tTzqBEe=u@@eG=62PE(`Rc+SU5x5)RZk3yanlx2uZZ93Fl z9%pro+nB=QZaamQ?{xiUNghz{pW4;cm=b@(KD4wxmoibm-s(n}Nvu#@@#g9;W(!bG z-r8$@K7t|Na`U_u-5<;K$%W<@GX1KX{*0Lo6X-6BGyr?4)1c(BJ{336Nm*rr_tjQt zUB)0B9(yIPw9_Nqudtk|d?6N?Dy_c-*Ef2pi%ezKQ zKNU(nVuG@GE;;uE?EZW!6Y6pP4BOz7n}%zdM_D?P*?mdYi8&Z>c19#!_JTeDpd6Y+<@}6k-TWI?MTW7sU1Hr z#Eq)KU^)Hq{;N3t27}OJ;}GUS%?Iw6XuN(vGDF%yfKQ#HhgFb58-5CJTBwBkm4X+c zo+JQ3CQI%i*}p*3+EDL+e;!x=>I_gNqS94t-PZSz1CiY?a5X<%=)KIggv= z#*~a(Sh$1G68&3B-ZJiOXHr~;n5RLfKST;IZs~y>Z*kNb`)v8cKQ!9?)oFK=LMvN! zMEN(;dQ0HF(;7E)>W^d4>z?4P(nJh>2Z=1d79p`B64EGGr2w8_%~HC=8Tek>S51cS zruh$776+8L%bpy@7lxj0ty;0x#KN(sYw!B+K*xEO+bT(^^r5e>R5sf}39+I3HnC~< zI`PQ8h17!*0}3A{g>@DBv0xcFPZcZi1WS*4yC|*~nO_pNM5hQm65TUztFn!Ym?5dB z%vI>NCES<{RNqx|5kGjIN*^j9H~6{@({U9&i+4u_j@7T8-RPQ9&N`W_pk+c=?~cS5 ztNk3i#L=HHm(MokvtnnUOwpIf6I7B^!9h_0QQn7vt>tIj&@0R7)>0nG-2zm9$Vgor(Y8Cj*7(oU64jnUc?Kqc~w zE4t-mZ&W;fKXJ8Xpe(J#*LUrMRY4=J7Cy+DyQzUgJ9sD=NrkFl?4~Y3vwHmKMGVR< zU?JoLv6M`Ww(__B=f4o>Zy_dhNAk`mIqtugClcay8r6V4?+$;7G|;_!DI1Qgq-rj2 z6z^(|;tOrmt2qwhPycN!=OSWgk^_6jUIF{Y*qYVUOWy*-c#H%u2#kEixOe!Gp5;!H z3GobYpEMxOGA)`wCF$;8pEvnPgx_4mE_o)bJqcYV!@_QZwGc&?1tP*abrgQ)ne^~o zOd#};!TBm>ib&If)M6p__mr>QReliXA~Vvw+gm~#qo1IWd7ZPQSml`m+59!|`tzf9 z;4hb^e2lt|D5xR?k4ZI1GpB_FWB4`b>||cK>0>~%iWph;Yv%%DB!Vb~*@~WUMnvQ0 zIU?Gy?VfGhldwi?p1liR@vxX8d+^Z5TOG8YXtWA*Ui z=UO;Lv>RtYn(=W{)0C~PC~;R;TJ7s@p^7~ zy8W^C;FDOQe$Z(N_k^Orl=vjeL5F4(?kiXYWMt&jx^EEaGDpo;{QbZu6h`w<{sWtg>4f^Ru@<&iPR@@8&T!(+!MNJtqEy0wqwy&^@ajBY0F&i7 zba5EVwa9(_B^GbMXHa%>ic3NLX?di4;nDWAnJ2RYY^Wq`%djB6cgfZ9BwD&#|9J}K=4eVAZB*e_$#_EGN&%D3WH|Izoiq6A;loD_FQ8$z-(BoG)c zpkEQXCvPPiI#$0rHxfVnT)ADwVpkM2+xPzgDL~f05MxAKo$YJaTy=HZGAmKiO{7=E zsNOqc=h2UNRGS+dx87o_o=4fc9dvNJ%bjC@1+VXhm#({JNxT1j@7EQ-m61vBd(V5@ zu}?pyuYiI3xW_y;Udn^Iq7ClPF1f73S3f*1%edcjw>^5?Ss7zXvo+zUBk$Ae>F-v= zh_}^DyS4A(*?P9w!c99qaP+k)Bq>=NSyeJKP7-sOP4V)n!D z|DYWb<6V0{@V@uA=RW6mJN@8J3Ar>|0Qbt6dE~<$5kB0k!$mvv+9cx)eRjMGDZq?r6Z&P5j|@phkGPF_Zbg6Xqs;VnMvtY|Ty^=C?Z|uGyDL8W9B+O@ILKN{i9%{JMz9skv@H^wXZ`n%71PPS_<=*k}crmUGizZ0j{Lm%{z{_^ca zl*<0g{4nX>KiAB)*Is>1J3O*r)4-)p@4jfaUiWT`7I(u_pUH!F;lAIU_Koa%VmD~A zT+x-X4zt(sOXvon$*;TL)9mOMA$XMEk^Z6mqk6L>oVYj~CbLB_|Lj=cR9T)ed+Ik% zj$wSKc70^<&%)#i{7BAW(bM+3GBCmcBPyY^a4nFOv$8_fkGhtz5#Acgr-WJ%pyI_8Fj-Ry$-v2?Fdp8R< z32Ccc3ccU+sGVdC8Mytn+jU-Aa`mdy<*!DU5OJLbd_yfns&J$}!4)-y8jd~GAE zDXD+Hm!7hvWh!uNY>2Gx_+GzcS)W8A5mE)@M$5}9g39&T(lj@?Z4%D}!4quT^zy8S zWECa{hqr6;3Ej=-MafS6wj^cTL|~<0uB(=3pLaa>6M;85J1ZzarwORfQ@wWe(!7Y3 zGYKZMw=4ycBUdXB+4r2v7sOZ#DPxG_-H;eTN~ZY{gxP
vgW0yCw9^!?Y#M7OMy2m5@(Q8lLOS7bG zYnJm&prWm#F84{n<(>cL#!GH>C@DzP!`ijWf}7LU^KMBtRd zE30{H!v^^Ep^tb#+kMgQd6?5Xa%INYob*v=TootuaT4RNyZjhz2`22x;3uq!*LBvM zlphA}!Qk4*R2XuYe`UPfb7Hvna^PkHdCxoF*%aTEjJaifI~GIi(^+xx1rVO` z{w_^A&?LuecGOX$7nS?ce^wp9>^P?0!f85V-0EWCQO|X~BF3Rr2479$aql6pm*-;w zK9kE6%^+1bkE39f!k3~1`?W`MV??VSNjAYV_-otxfcXS*?kkPlF=!JCubwVKy!3Tt zcn7;DPQ3#d+KVpwWOZn)5WNI%y^~zVf1UlnXk>+RRsz zam*3DS*pktKSvhC082t-Xk@0zAQ@I<6=!Y?aC`A>Bbv^n?q9TvE-!6oniauK)|(4D zDk-^aY2N46(e3)x zudWMp+}FWNVOuoU^5(CTz3cfRS%!UWOV-Ce->&lkwt})c*U%{@j##~@` zS*L8_-;ucW{$##gC)97fIybVKaJ^pwW?vubVO^SzFXoal%rhD7a!rf{`{gI2lO(VC|wsABpodBBS|ej-9)UR6gnx(|P)u(3HBm$K$x zKDe4OlLz?>bos)6Kw9DG0m@)>K_FowF9$^#i_n^t0d~OtIkO=H-#*BNQJAk=mOb(r z(0!F8hK`qu=4YT!uq;x6F-g56j^;#gO$X;K7HrzsO6u9@+j9)>^7ghS%eH_ zID#yeRm_lV56VE-}Gw>lzjI$m_7O&Yq*jS-P55}{#HRzsV}wkSOWSA
diff --git a/react/src/uploads/components/UploadPage.js b/react/src/uploads/components/UploadPage.js index dd2dc980..8ea4c15d 100644 --- a/react/src/uploads/components/UploadPage.js +++ b/react/src/uploads/components/UploadPage.js @@ -21,6 +21,7 @@ const UploadPage = (props) => { uploadFiles, replaceData, handleRadioChange, + downloadSpreadsheet, } = props; const selectionList = datasetList.map((obj, index) => ( @@ -41,6 +42,7 @@ const UploadPage = (props) => { > {selectionList} +
diff --git a/react/src/uploads/routes.js b/react/src/uploads/routes.js index f9d22f1e..6d3299f5 100644 --- a/react/src/uploads/routes.js +++ b/react/src/uploads/routes.js @@ -4,6 +4,7 @@ const UPLOAD = { MINIO_URL: '/api/minio/put', UPLOAD: `${API_BASE_PATH}/import_data`, LIST: `${API_BASE_PATH}/datasets_list`, // backend route for retrieving list of datasets (eg ldv_rebates) + DOWNLOAD_SPREADSHEET: `${API_BASE_PATH}/download_dataset` }; export default UPLOAD; From d4aed6d2ca5f3c41b96ca76254b6c8f3acb5b01c Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Wed, 28 Feb 2024 09:45:43 -0800 Subject: [PATCH 041/152] update minioBucketName --- .pipeline/lib/config.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pipeline/lib/config.js b/.pipeline/lib/config.js index 3c56fbe9..742c9aa6 100644 --- a/.pipeline/lib/config.js +++ b/.pipeline/lib/config.js @@ -42,7 +42,7 @@ const phases = { metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendHost: `cthub-backend-dev-${changeId}.${ocpName}.gov.bc.ca`, backendReplicas: 1, - minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3Gi', minioBucketName: 'cthubdv', + minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3Gi', minioBucketName: 'zevadv', schemaspyCpuRequest: '50m', schemaspyCpuLimit: '200m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160, rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '1G', rabbitmqPvcSize: '1G', rabbitmqReplica: 1, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard', patroniCpuRequest: '200m', patroniCpuLimit: '400m', patroniMemoryRequest: '250Mi', patroniMemoryLimit: '500Mi', patroniPvcSize: '2G', patroniReplica: 2, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`}, @@ -54,7 +54,7 @@ const phases = { metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, frontendMinReplicas: 1, frontendMaxReplicas: 3, backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendReplicas: 1, backendMinReplicas: 1, backendMaxReplicas: 3, backendHost: `cthub-backend-test.${ocpName}.gov.bc.ca`, - minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3G', minioBucketName: 'cthubts', + minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3G', minioBucketName: 'zevats', schemaspyCpuRequest: '20m', schemaspyCpuLimit: '200m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160, rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '700M', rabbitmqPvcSize: '1G', rabbitmqReplica: 2, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard', patroniCpuRequest: '200m', patroniCpuLimit: '400m', patroniMemoryRequest: '250Mi', patroniMemoryLimit: '500Mi', patroniPvcSize: '5G', patroniReplica: 2, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`}, @@ -66,7 +66,7 @@ const phases = { host: `cthub-prod.${ocpName}.gov.bc.ca`, djangoDebug: 'False', logoutHostName: 'logon7.gov.bc.ca', dbHost: 'patroni-master-prod', frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, frontendMinReplicas: 1, frontendMaxReplicas: 3, backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendReplicas: 1, backendMinReplicas: 1, backendMaxReplicas: 3, backendHost: `cthub-backend-prod.${ocpName}.gov.bc.ca`, - minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3G', minioBucketName: 'cthubpr', + minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3G', minioBucketName: 'zevapr', schemaspyCpuRequest: '50m', schemaspyCpuLimit: '400m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160, rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '1G', rabbitmqPvcSize: '5G', rabbitmqReplica: 2, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard', patroniCpuRequest: '200m', patroniCpuLimit: '400m', patroniMemoryRequest: '250Mi', patroniMemoryLimit: '500Mi', patroniPvcSize: '8G', patroniReplica: 3, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`} From fa79229d545c42f2480068babd3818d50e801a53 Mon Sep 17 00:00:00 2001 From: tim738745 <98717409+tim738745@users.noreply.github.com> Date: Wed, 28 Feb 2024 10:55:53 -0800 Subject: [PATCH 042/152] fix: 169 - get uploader working on test (#184) --- django/api/services/minio.py | 13 ++++++++++--- django/api/settings.py | 1 + 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/django/api/services/minio.py b/django/api/services/minio.py index 33687f61..48c75d7f 100644 --- a/django/api/services/minio.py +++ b/django/api/services/minio.py @@ -11,10 +11,17 @@ ) +def get_refined_object_name(object_name): + prefix = settings.MINIO_PREFIX + if prefix: + return prefix + '/' + object_name + return object_name + + def minio_get_object(object_name): return MINIO.presigned_get_object( bucket_name=settings.MINIO_BUCKET_NAME, - object_name=object_name, + object_name=get_refined_object_name(object_name), expires=timedelta(seconds=3600) ) @@ -22,7 +29,7 @@ def minio_get_object(object_name): def minio_put_object(object_name): return MINIO.presigned_put_object( bucket_name=settings.MINIO_BUCKET_NAME, - object_name=object_name, + object_name=get_refined_object_name(object_name), expires=timedelta(seconds=7200) ) @@ -30,5 +37,5 @@ def minio_put_object(object_name): def minio_remove_object(object_name): return MINIO.remove_object( bucket_name=settings.MINIO_BUCKET_NAME, - object_name=object_name + object_name=get_refined_object_name(object_name), ) diff --git a/django/api/settings.py b/django/api/settings.py index 352956df..17f84e17 100644 --- a/django/api/settings.py +++ b/django/api/settings.py @@ -159,6 +159,7 @@ MINIO_USE_SSL = bool( os.getenv('MINIO_USE_SSL', 'False').lower() in ['true', 1] ) +MINIO_PREFIX = os.getenv('MINIO_PREFIX') DECODER_ACCESS_KEY = os.getenv('DECODER_ACCESS_KEY') From 657eae4a4b9e6a3ddba38f0f40e955c91ed4f7cb Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 29 Feb 2024 13:15:03 -0800 Subject: [PATCH 043/152] update backend host name --- .pipeline/lib/config.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pipeline/lib/config.js b/.pipeline/lib/config.js index 742c9aa6..b03f1886 100644 --- a/.pipeline/lib/config.js +++ b/.pipeline/lib/config.js @@ -41,7 +41,7 @@ const phases = { host: `cthub-dev.${ocpName}.gov.bc.ca`, djangoDebug: 'True', logoutHostName: 'logontest7.gov.bc.ca', dbHost: 'cthub-crunchy-dev-pgbouncer', metabaseCpuRequest: '200m', metabaseCpuLimit: '300m', metabaseMemoryRequest: '500Mi', metabaseMemoryLimit: '2Gi', metabaseReplicas: 1, frontendCpuRequest: '400m', frontendCpuLimit: '800m', frontendMemoryRequest: '600Mi', frontendMemoryLimit: '1200Mi', frontendReplicas: 1, - backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendHost: `cthub-backend-dev-${changeId}.${ocpName}.gov.bc.ca`, backendReplicas: 1, + backendCpuRequest: '50m', backendCpuLimit: '100m', backendMemoryRequest: '520Mi', backendMemoryLimit: '1Gi', backendHealthCheckDelay: 30, backendHost: `cthub-backend-dev.${ocpName}.gov.bc.ca`, backendReplicas: 1, minioCpuRequest: '30m', minioCpuLimit: '100m', minioMemoryRequest: '150Mi', minioMemoryLimit: '300Mi', minioPvcSize: '3Gi', minioBucketName: 'zevadv', schemaspyCpuRequest: '50m', schemaspyCpuLimit: '200m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160, rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '1G', rabbitmqPvcSize: '1G', rabbitmqReplica: 1, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard', From 31b03b016913b469f32f5ab67121204a57e29ce5 Mon Sep 17 00:00:00 2001 From: tim738745 <98717409+tim738745@users.noreply.github.com> Date: Thu, 29 Feb 2024 16:12:18 -0800 Subject: [PATCH 044/152] fix: 150 - add xlsxwriter (#189) --- django/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/django/requirements.txt b/django/requirements.txt index 4aa55ff2..b66bee95 100644 --- a/django/requirements.txt +++ b/django/requirements.txt @@ -14,3 +14,4 @@ python-dotenv==0.19.0 pandas==1.3.4 openpyxl==3.0.9 minio==7.1.1 +xlsxwriter==3.2.0 From ff7136d7926cf935cfaa5d63d110bf3b9866acd4 Mon Sep 17 00:00:00 2001 From: Emily <44536222+emi-hi@users.noreply.github.com> Date: Mon, 4 Mar 2024 12:28:02 -0800 Subject: [PATCH 045/152] Feat Cthub 168 PARTIAL admin page (#193) * feat: adds backend work for retrieving current user and user list * feat: adds (UNSTYLED) frontend work for listing users and permissions * chore: removes idir serializermethodField * feat: adds styling to user admin --- django/api/serializers/permission.py | 16 ++++ django/api/serializers/user.py | 36 +++++++++ django/api/urls.py | 4 + django/api/viewsets/user.py | 56 +++++++++++++ react/src/app/styles/App.scss | 41 ++++++++++ react/src/app/styles/Users.scss | 25 ++++++ react/src/app/styles/index.scss | 26 +----- react/src/uploads/UploadContainer.js | 11 +++ react/src/uploads/components/UploadPage.js | 4 +- react/src/users/UsersContainer.js | 38 +++++++++ react/src/users/components/UsersPage.js | 93 ++++++++++++++++++++++ react/src/users/routes.js | 9 +++ 12 files changed, 333 insertions(+), 26 deletions(-) create mode 100644 django/api/serializers/permission.py create mode 100644 django/api/serializers/user.py create mode 100644 django/api/viewsets/user.py create mode 100644 react/src/app/styles/Users.scss create mode 100644 react/src/users/UsersContainer.js create mode 100644 react/src/users/components/UsersPage.js create mode 100644 react/src/users/routes.js diff --git a/django/api/serializers/permission.py b/django/api/serializers/permission.py new file mode 100644 index 00000000..73efc23e --- /dev/null +++ b/django/api/serializers/permission.py @@ -0,0 +1,16 @@ +from rest_framework.serializers import ModelSerializer, SerializerMethodField + +from api.models.permission import Permission +from api.models.user_permission import UserPermission + +class PermissionSerializer(ModelSerializer): + description = SerializerMethodField() + def get_description(self, obj): + permission = Permission.objects.filter(id=obj.permission_id).first() + if permission: + return permission.description + class Meta: + model = Permission + fields = ( + 'id', 'description', + ) diff --git a/django/api/serializers/user.py b/django/api/serializers/user.py new file mode 100644 index 00000000..fc2b490f --- /dev/null +++ b/django/api/serializers/user.py @@ -0,0 +1,36 @@ +""" +Further reading: +https://www.django-rest-framework.org/api-guide/serializers/ +""" +from rest_framework.serializers import ModelSerializer, SerializerMethodField + +from api.models.user import User +from api.models.user_permission import UserPermission +from api.serializers.permission import PermissionSerializer + +class UserSerializer(ModelSerializer): + """ + Default Serializer for User + """ + user_permissions = SerializerMethodField() + + def get_user_permissions(self, obj): + user_permission = UserPermission.objects.filter(user_id=obj.id) + permissions = PermissionSerializer(user_permission, read_only=True, many=True) + return permissions.data + + class Meta: + model = User + fields = ('idir', 'user_permissions') + + +class UserSaveSerializer(ModelSerializer): + def update(self, instance, validated_data): + request = self.context.get('request') + permissions = validated_data.pop('permissions') + print(request) + print(permissions) + #check if user exists, if not add them + + #update user_permissions + diff --git a/django/api/urls.py b/django/api/urls.py index 2ee60438..1d2c48db 100644 --- a/django/api/urls.py +++ b/django/api/urls.py @@ -20,6 +20,7 @@ from api.viewsets.icbc_data import IcbcViewset from api.viewsets.minio import MinioViewSet from api.viewsets.upload import UploadViewset +from api.viewsets.user import UserViewSet ROUTER = routers.SimpleRouter(trailing_slash=False) @@ -34,6 +35,9 @@ ROUTER.register( r'minio', MinioViewSet, basename='minio' ) +ROUTER.register( + r'users', UserViewSet +) urlpatterns = [ path('admin/', admin.site.urls), diff --git a/django/api/viewsets/user.py b/django/api/viewsets/user.py new file mode 100644 index 00000000..13d0629c --- /dev/null +++ b/django/api/viewsets/user.py @@ -0,0 +1,56 @@ +from rest_framework.decorators import action +from rest_framework.permissions import AllowAny +from rest_framework.response import Response +from rest_framework.viewsets import GenericViewSet +from api.models.user import User +from api.serializers.user import UserSerializer, UserSaveSerializer + +class UserViewSet(GenericViewSet): + """ + This viewset automatically provides `list`, `create`, `retrieve`, + and `update` actions. + """ + permission_classes = (AllowAny,) + http_method_names = ['get', 'post', 'put', 'patch'] + queryset = User.objects.all() + + serializer_classes = { + 'default': UserSerializer, + 'update': UserSaveSerializer, + 'create': UserSaveSerializer, + } + + + def get_serializer_class(self): + if self.action in list(self.serializer_classes.keys()): + return self.serializer_classes[self.action] + + return self.serializer_classes['default'] + + + @action(detail=False) + def current(self, request): + """ + Get the current user + """ + user = User.objects.filter(idir=request.user).first() + serializer = self.get_serializer(user) + return Response(serializer.data) + + def list(self, request): + request = self.request + ##check if user is admin before producing list of all users + users = User.objects.all() + current_user = users.filter(idir=request.user).first() + if current_user: + current_user_serializer = UserSerializer(current_user) + current_user_permissions = current_user_serializer.data['user_permissions'] + is_admin = False + if current_user_permissions: + for i in current_user_permissions: + for v in i.values(): + if v == 'admin': + is_admin = True + if is_admin == True: + serializer = UserSerializer(users, many=True) + return Response(serializer.data) \ No newline at end of file diff --git a/react/src/app/styles/App.scss b/react/src/app/styles/App.scss index 66b0427c..b05e911f 100644 --- a/react/src/app/styles/App.scss +++ b/react/src/app/styles/App.scss @@ -2,6 +2,17 @@ Base styling for "App" ie General Container */ +$bg-alt-black: #1E1F21; +$bg-primary: #244074; +$bg-black: #000; +$bg-white: #FFF; +$default-text-color: #000; +$default-table-border-color: rgba(249, 249, 249, 0.2); +$default-table-color: #F9F9F9; +$highlight: #0078FD; +$default-link-blue: #568DBA; +$default-background-grey: #F2F2F2; +$md: 991px; .App { .App-header { @@ -38,3 +49,33 @@ ie General Container padding: 0 1rem; } } + + +body { + background-color: $bg-white; + color: $default-text-color; + font-family: 'Roboto', 'Open Sans', sans-serif; + font-weight: 400; + height: 100%; + margin: 0; +} + +h2, h3, h4 { + font-family: 'Roboto', 'Open Sans', sans-serif; + color: #003366; + font-weight: 500; +} + +h2 { + font-size: 20px; +} + +h3 { + display: inline; + font-size: 17px; +} + +h4 { + font-size: 14px; +} + diff --git a/react/src/app/styles/Users.scss b/react/src/app/styles/Users.scss new file mode 100644 index 00000000..564814c6 --- /dev/null +++ b/react/src/app/styles/Users.scss @@ -0,0 +1,25 @@ +.add-user-box { + height: 4rem; + background-color: $default-background-grey; + margin-bottom: 1rem; + width: 40%; +} + +.permissions { + background-color: $default-background-grey; + align-content: space-around; + width: 10%; + .checkbox { + width: 40%; + } +} + +.user-input { + margin-left: 10px; + margin-right: 10px; + background-color: white; +} + +.button-dark-blue { + background-color: #003366 !important; +} \ No newline at end of file diff --git a/react/src/app/styles/index.scss b/react/src/app/styles/index.scss index e9655906..1a7bf42a 100644 --- a/react/src/app/styles/index.scss +++ b/react/src/app/styles/index.scss @@ -1,30 +1,6 @@ -/* -Base Style -Everything else will generally be controlled via bootstrap -*/ -$bg-alt-black: #1E1F21; -$bg-primary: #244074; -$bg-black: #000; -$bg-white: #FFF; -$default-text-color: #000; -$default-table-border-color: rgba(249, 249, 249, 0.2); -$default-table-color: #F9F9F9; -$highlight: #0078FD; -$default-link-blue: #568DBA; -$default-background-grey: #F2F2F2; -$md: 991px; - @import 'App.scss'; @import 'Login.scss'; @import 'ReactTable.scss'; @import 'FileUpload.scss'; @import 'Roboto.scss'; - -body { - background-color: $bg-white; - color: $default-text-color; - font-family: 'Roboto', 'Open Sans', sans-serif; - font-weight: 700; - height: 100%; - margin: 0; -} \ No newline at end of file +@import 'Users.scss'; diff --git a/react/src/uploads/UploadContainer.js b/react/src/uploads/UploadContainer.js index c312d410..8bcbf52d 100644 --- a/react/src/uploads/UploadContainer.js +++ b/react/src/uploads/UploadContainer.js @@ -4,8 +4,10 @@ import CircularProgress from '@mui/material/CircularProgress'; import Alert from '@mui/material/Alert'; import React, { useState, useEffect } from 'react'; import ROUTES_UPLOAD from './routes'; +import ROUTES_USERS from '../users/routes'; import UploadPage from './components/UploadPage'; import AlertDialog from '../app/components/AlertDialog'; +import UsersContainer from '../users/UsersContainer'; const UploadContainer = () => { const [uploadFiles, setUploadFiles] = useState([]); // array of objects for files to be uploaded @@ -18,6 +20,7 @@ const UploadContainer = () => { const [alertSeverity, setAlertSeverity] = useState(''); // existing data with what is being uploaded const [open, setOpen] = useState(false); + const [adminUser, setAdminUser] = useState(false); const dialogue = 'Selecting replace will delete all previously uploaded records for this dataset'; const leftButtonText = 'Cancel'; const rightButtonText = 'Replace existing data'; @@ -34,6 +37,12 @@ const UploadContainer = () => { axios.get(ROUTES_UPLOAD.LIST).then((response) => { setDatasetList(response.data); setLoading(false); + axios.get(ROUTES_USERS.CURRENT).then((currentUserResp) => { + const permissions = currentUserResp.data.user_permissions.map((each) => each.description); + if (permissions.includes('admin')) { + setAdminUser(true); + } + }); }); }; @@ -137,6 +146,8 @@ const UploadContainer = () => { handleRadioChange={handleRadioChange} downloadSpreadsheet={downloadSpreadsheet} /> + {adminUser + && }
); diff --git a/react/src/uploads/components/UploadPage.js b/react/src/uploads/components/UploadPage.js index 8ea4c15d..81a51c86 100644 --- a/react/src/uploads/components/UploadPage.js +++ b/react/src/uploads/components/UploadPage.js @@ -33,7 +33,9 @@ const UploadPage = (props) => {
- Dataset to Upload     +

+ Select Program     +

{ handleRadioChange(event); }} /> + { handleRadioChange(event); }} /> + + + {user.idir} + + + + + + ); + }; + return ( + + +
+

Admin

+
+ + + + +

+ IDIR Username +

+
+ + + + + + +
+
+
+ + + +

Upload

+
+ +

Admin

+
+
+ {users.map((user) => ( + userRow(user) + ))} + + + +
+
+
+ ); +}; +UsersPage.propTypes = { + users: PropTypes.arrayOf(PropTypes.shape()).isRequired, +}; +export default UsersPage; diff --git a/react/src/users/routes.js b/react/src/users/routes.js new file mode 100644 index 00000000..945af831 --- /dev/null +++ b/react/src/users/routes.js @@ -0,0 +1,9 @@ +const API_BASE_PATH = '/api/users'; + +const USERS = { + LIST: API_BASE_PATH, + CURRENT: `${API_BASE_PATH}/current`, + +}; + +export default USERS; From 6cdb4d471ae8303e83215ac0a9b405e641c24f95 Mon Sep 17 00:00:00 2001 From: Emily <44536222+emi-hi@users.noreply.github.com> Date: Tue, 5 Mar 2024 11:42:12 -0800 Subject: [PATCH 046/152] CTHUB - Frontend styling for upload and user permissions page (#196) * feat: frontend changes for upload and user admin * chore: removes react Fragment and replaces with grid container, adds grey background to file upload grid * chore: adds responsiveness to permissions --- react/src/app/styles/App.scss | 18 +++++++++ react/src/app/styles/FileUpload.scss | 20 ++++++++-- react/src/app/styles/Users.scss | 12 ++++-- react/src/uploads/UploadContainer.js | 41 ++++++++++++-------- react/src/uploads/components/FileDrop.js | 5 +-- react/src/uploads/components/FileDropArea.js | 23 +++++------ react/src/uploads/components/UploadPage.js | 22 +++++------ react/src/users/components/UsersPage.js | 28 +++++++------ 8 files changed, 102 insertions(+), 67 deletions(-) diff --git a/react/src/app/styles/App.scss b/react/src/app/styles/App.scss index b05e911f..89b60e10 100644 --- a/react/src/app/styles/App.scss +++ b/react/src/app/styles/App.scss @@ -13,8 +13,10 @@ $highlight: #0078FD; $default-link-blue: #568DBA; $default-background-grey: #F2F2F2; $md: 991px; +$button-background-blue: #003366; .App { + background-color: $default-background-grey; .App-header { -ms-flex-align: center; -ms-flex-direction: row; @@ -67,6 +69,8 @@ h2, h3, h4 { } h2 { + margin-top: 0; + margin-bottom: 0; font-size: 20px; } @@ -79,3 +83,17 @@ h4 { font-size: 14px; } +.button-dark-blue { + background-color: $button-background-blue !important; + color: $bg-white; + + &:disabled { + background-color: white !important; + + } +} + +.text-button { + color: #1a5a96 !important; + text-decoration: underline !important; +} \ No newline at end of file diff --git a/react/src/app/styles/FileUpload.scss b/react/src/app/styles/FileUpload.scss index fe7a8d30..e9226119 100644 --- a/react/src/app/styles/FileUpload.scss +++ b/react/src/app/styles/FileUpload.scss @@ -6,8 +6,10 @@ padding: 0.5rem; } #dataset-select { - padding-bottom: 2rem; - padding-top: 2rem; + margin-bottom: 1rem; + margin-top: 1rem; + padding: 1rem; + background-color: $default-background-grey; } #trash-button { @@ -18,13 +20,23 @@ } .upload-row { - height: 15px; + background-color: $default-background-grey; + padding: .5rem; + margin: .5rem; } .file-upload { - border: 2px dashed $default-link-blue; + border: 1px dashed $button-background-blue; background: $bg-white; padding: 1rem; text-align: center; flex-direction: column; } + +.upload-bar { + background-color: $default-background-grey; + color: $bg-white; + padding-top: 1rem; + padding-bottom: 1rem; + margin-top: 2rem; +} diff --git a/react/src/app/styles/Users.scss b/react/src/app/styles/Users.scss index 564814c6..20bd7092 100644 --- a/react/src/app/styles/Users.scss +++ b/react/src/app/styles/Users.scss @@ -1,8 +1,6 @@ .add-user-box { - height: 4rem; background-color: $default-background-grey; margin-bottom: 1rem; - width: 40%; } .permissions { @@ -12,6 +10,13 @@ .checkbox { width: 40%; } + @media (max-width:991px) { + width: 20%; +} + @media (max-width:599px) { + width: 40%; +} + } .user-input { @@ -22,4 +27,5 @@ .button-dark-blue { background-color: #003366 !important; -} \ No newline at end of file +} + diff --git a/react/src/uploads/UploadContainer.js b/react/src/uploads/UploadContainer.js index 8bcbf52d..c550f864 100644 --- a/react/src/uploads/UploadContainer.js +++ b/react/src/uploads/UploadContainer.js @@ -1,8 +1,9 @@ import { withRouter } from 'react-router-dom'; import axios from 'axios'; -import CircularProgress from '@mui/material/CircularProgress'; -import Alert from '@mui/material/Alert'; import React, { useState, useEffect } from 'react'; +import { + Paper, Alert, CircularProgress, Stack, +} from '@mui/material'; import ROUTES_UPLOAD from './routes'; import ROUTES_USERS from '../users/routes'; import UploadPage from './components/UploadPage'; @@ -134,20 +135,28 @@ const UploadContainer = () => { title="Replace existing data?" /> )} - - {adminUser - && } + + + + + {adminUser + && ( + + + + )} +
); diff --git a/react/src/uploads/components/FileDrop.js b/react/src/uploads/components/FileDrop.js index c79fb412..528f2d5b 100644 --- a/react/src/uploads/components/FileDrop.js +++ b/react/src/uploads/components/FileDrop.js @@ -1,7 +1,6 @@ import PropTypes from 'prop-types'; import React, { useCallback, useState } from 'react'; -import Box from '@mui/material/Box'; -import Button from '@mui/material/Button'; +import { Box, Button } from '@mui/material'; import UploadIcon from '@mui/icons-material/Upload'; import { useDropzone } from 'react-dropzone'; @@ -25,7 +24,7 @@ const FileDrop = (props) => { {' '}
- + {dropMessage && (
{dropMessage}
diff --git a/react/src/uploads/components/FileDropArea.js b/react/src/uploads/components/FileDropArea.js index 1883e6f0..f47c7be7 100644 --- a/react/src/uploads/components/FileDropArea.js +++ b/react/src/uploads/components/FileDropArea.js @@ -1,9 +1,7 @@ import React from 'react'; import PropTypes from 'prop-types'; -import Box from '@mui/material/Box'; -import Button from '@mui/material/Button'; -import Grid from '@mui/material/Grid'; -import DeleteIcon from '@mui/icons-material/Delete'; +import { Box, Button, Grid } from '@mui/material'; +import ClearIcon from '@mui/icons-material/Clear'; import FileDrop from './FileDrop'; import getFileSize from '../../app/utilities/getFileSize'; @@ -22,15 +20,14 @@ const FileDropArea = (props) => { function FormRow(file) { const { name, size } = file; return ( - - + + {name} {getFileSize(size)} - - + ); } return ( -
+
- + @@ -59,10 +56,10 @@ const FileDropArea = (props) => { - Filename +

Filename

- Size +

Size

{uploadFiles.map((file) => ( diff --git a/react/src/uploads/components/UploadPage.js b/react/src/uploads/components/UploadPage.js index 81a51c86..b8bba88e 100644 --- a/react/src/uploads/components/UploadPage.js +++ b/react/src/uploads/components/UploadPage.js @@ -1,14 +1,9 @@ import PropTypes from 'prop-types'; import React from 'react'; -import Box from '@mui/material/Box'; -import Button from '@mui/material/Button'; -import MenuItem from '@mui/material/MenuItem'; -import Select from '@mui/material/Select'; +import { + Box, Button, MenuItem, Select, Radio, RadioGroup, FormControlLabel, FormControl, +} from '@mui/material'; import UploadIcon from '@mui/icons-material/Upload'; -import Radio from '@mui/material/Radio'; -import RadioGroup from '@mui/material/RadioGroup'; -import FormControlLabel from '@mui/material/FormControlLabel'; -import FormControl from '@mui/material/FormControl'; import FileDropArea from './FileDropArea'; const UploadPage = (props) => { @@ -31,6 +26,7 @@ const UploadPage = (props) => { return ( <> +

Upload Program Data

@@ -39,12 +35,12 @@ const UploadPage = (props) => { - +

@@ -76,13 +72,13 @@ const UploadPage = (props) => { uploadFiles={uploadFiles} />
- +
@@ -84,7 +82,7 @@ const UsersPage = (props) => {
- + ); }; UsersPage.propTypes = { From bfebc7092b8b39f8a82431230894505e5ba592f4 Mon Sep 17 00:00:00 2001 From: Emily <44536222+emi-hi@users.noreply.github.com> Date: Tue, 5 Mar 2024 13:39:28 -0800 Subject: [PATCH 047/152] fix: aligns names vertically in row (#202) --- react/src/users/components/UsersPage.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/react/src/users/components/UsersPage.js b/react/src/users/components/UsersPage.js index c7568c3e..68a00ace 100644 --- a/react/src/users/components/UsersPage.js +++ b/react/src/users/components/UsersPage.js @@ -24,7 +24,7 @@ const UsersPage = (props) => { console.log(userPerms[permissionType]); }; return ( - + { handleRadioChange(event); }} /> { handleRadioChange(event); }} /> From 31add50935dd002795dc79419d006c75036498c8 Mon Sep 17 00:00:00 2001 From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Date: Tue, 5 Mar 2024 15:40:41 -0800 Subject: [PATCH 048/152] add new ci pipeline (#206) --- .github/workflows/dev-ci.yaml | 112 +++++++++++++++++++++++++++++++ .github/workflows/test-ci.yaml | 117 +++++++++++++++++++++++++++++++++ 2 files changed, 229 insertions(+) create mode 100644 .github/workflows/dev-ci.yaml create mode 100644 .github/workflows/test-ci.yaml diff --git a/.github/workflows/dev-ci.yaml b/.github/workflows/dev-ci.yaml new file mode 100644 index 00000000..4155485b --- /dev/null +++ b/.github/workflows/dev-ci.yaml @@ -0,0 +1,112 @@ +## For each release, the value of workflow name, branches and VERSION need to be adjusted accordingly + +name: CTHUB 0.2.0 Dev CI + +on: + push: + branches: [ release-0.2.0 ] + paths: + - frontend/** + - backend/** + workflow_dispatch: + +env: + VERSION: 0.2.0 + GIT_URL: https://github.com/bcgov/cthub.git + TOOLS_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools + DEV_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev + + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + + set-pre-release: + name: Calculate pre-release number + runs-on: ubuntu-latest + + outputs: + output1: ${{ steps.set-pre-release.outputs.PRE_RELEASE }} + + steps: + - id: set-pre-release + run: echo "PRE_RELEASE=$(date +'%Y%m%d%H%M%S')" >> $GITHUB_OUTPUT + + build: + + name: Build CTHUB + runs-on: ubuntu-latest + needs: set-pre-release + timeout-minutes: 60 + + env: + PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} + + steps: + + - name: Check out repository + uses: actions/checkout@v4.1.1 + + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.2 + with: + openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ env.TOOLS_NAMESPACE }} + + - name: Build CTHUB Backend + run: | + cd openshift/templates + oc process -f ./backend-bc.yaml NAME=cthub SUFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + sleep 5s + oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 + oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} + + - name: Build CTHUB Frontend + run: | + cd openshift/templates + oc process -f ./frontend-bc.yaml NAME=cthub SUFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + sleep 5s + oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-frontend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 + oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} + + deploy: + + name: Deploy CTHUB on Dev + runs-on: ubuntu-latest + timeout-minutes: 60 + needs: [set-pre-release, build] + + env: + PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} + + steps: + + - name: Checkout Manifest repository + uses: actions/checkout@v4.1.1 + with: + repository: bcgov-c/tenant-gitops-30b186 + ref: main + ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} + + - name: Update frontend tag + uses: mikefarah/yq@v4.40.5 + with: + cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + + - name: Update backend tag + uses: mikefarah/yq@v4.40.5 + with: + cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + + - name: GitHub Commit & Push + run: | + git config --global user.email "actions@github.com" + git config --global user.name "GitHub Actions" + git add cthub/values-dev.yaml + git commit -m "update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }}" + git push + \ No newline at end of file diff --git a/.github/workflows/test-ci.yaml b/.github/workflows/test-ci.yaml new file mode 100644 index 00000000..d25c3016 --- /dev/null +++ b/.github/workflows/test-ci.yaml @@ -0,0 +1,117 @@ +## For each release, the value of workflow name, branches and VERSION need to be adjusted accordingly + +name: CTHUB 0.2.0 Test CI + +on: + workflow_dispatch: + +env: + VERSION: 0.2.0 + GIT_URL: https://github.com/bcgov/cthub.git + TOOLS_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools + TEST_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-test + + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + + set-pre-release: + name: Calculate pre-release number + runs-on: ubuntu-latest + + outputs: + output1: ${{ steps.set-pre-release.outputs.PRE_RELEASE }} + + steps: + - id: set-pre-release + run: echo "PRE_RELEASE=$(date +'%Y%m%d%H%M%S')" >> $GITHUB_OUTPUT + + build: + + name: Build CTHUB + runs-on: ubuntu-latest + needs: set-pre-release + timeout-minutes: 60 + + env: + PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} + + steps: + + - name: Check out repository + uses: actions/checkout@v4.1.1 + + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.2 + with: + openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ env.TOOLS_NAMESPACE }} + + - name: Build CTHUB Backend + run: | + cd openshift/templates + oc process -f ./backend-bc.yaml NAME=cthub SUFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + sleep 5s + oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 + + - name: Build CTHUB Frontend + run: | + cd openshift/templates + oc process -f ./frontend-bc.yaml NAME=cthub SUFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + sleep 5s + oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-frontend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 + + deploy: + + name: Deploy CTHUB on Test + runs-on: ubuntu-latest + timeout-minutes: 60 + needs: [set-pre-release, build] + + env: + PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} + + steps: + - name: Ask for approval for CTHUB release-${{ env.VERSION }} Test deployment + uses: trstringer/manual-approval@v1.6.0 + with: + secret: ${{ github.TOKEN }} + approvers: emi-hi,kuanfandevops,tim738745,JulianForeman + minimum-approvals: 1 + issue-title: "CTHUB release-${{ env.VERSION }} Test Deployment" + + - name: Tag CTHUB images to Test + run: | + oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-$PRE_RELEASE ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-$PRE_RELEASE + oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-$PRE_RELEASE ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-$PRE_RELEASE + + - name: Checkout Manifest repository + uses: actions/checkout@v4.1.1 + with: + repository: bcgov-c/tenant-gitops-30b186 + ref: main + ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} + + - name: Update frontend tag + uses: mikefarah/yq@v4.40.5 + with: + cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml + + - name: Update backend tag + uses: mikefarah/yq@v4.40.5 + with: + cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml + + - name: GitHub Commit & Push + run: | + git config --global user.email "actions@github.com" + git config --global user.name "GitHub Actions" + git add cthub/values-test.yaml + git commit -m "update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }}" + git push + \ No newline at end of file From 9c74d8908dafc6be7fa8270b56cc17ecc66b2d39 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 5 Mar 2024 15:59:10 -0800 Subject: [PATCH 049/152] update folder location --- .github/workflows/dev-ci.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/dev-ci.yaml b/.github/workflows/dev-ci.yaml index 4155485b..6a1aaf32 100644 --- a/.github/workflows/dev-ci.yaml +++ b/.github/workflows/dev-ci.yaml @@ -59,7 +59,7 @@ jobs: - name: Build CTHUB Backend run: | - cd openshift/templates + cd openshift/templates/backend oc process -f ./backend-bc.yaml NAME=cthub SUFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} sleep 5s oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 @@ -67,7 +67,7 @@ jobs: - name: Build CTHUB Frontend run: | - cd openshift/templates + cd openshift/templates/frontend oc process -f ./frontend-bc.yaml NAME=cthub SUFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} sleep 5s oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-frontend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 From f52674ba27380e140a104e3c0672523eefdc3641 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 5 Mar 2024 16:03:06 -0800 Subject: [PATCH 050/152] update SUFFIX --- .github/workflows/dev-ci.yaml | 4 ++-- .github/workflows/test-ci.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/dev-ci.yaml b/.github/workflows/dev-ci.yaml index 6a1aaf32..a62484c5 100644 --- a/.github/workflows/dev-ci.yaml +++ b/.github/workflows/dev-ci.yaml @@ -60,7 +60,7 @@ jobs: - name: Build CTHUB Backend run: | cd openshift/templates/backend - oc process -f ./backend-bc.yaml NAME=cthub SUFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + oc process -f ./backend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} sleep 5s oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} @@ -68,7 +68,7 @@ jobs: - name: Build CTHUB Frontend run: | cd openshift/templates/frontend - oc process -f ./frontend-bc.yaml NAME=cthub SUFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + oc process -f ./frontend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} sleep 5s oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-frontend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} diff --git a/.github/workflows/test-ci.yaml b/.github/workflows/test-ci.yaml index d25c3016..51532e78 100644 --- a/.github/workflows/test-ci.yaml +++ b/.github/workflows/test-ci.yaml @@ -55,14 +55,14 @@ jobs: - name: Build CTHUB Backend run: | cd openshift/templates - oc process -f ./backend-bc.yaml NAME=cthub SUFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + oc process -f ./backend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} sleep 5s oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 - name: Build CTHUB Frontend run: | cd openshift/templates - oc process -f ./frontend-bc.yaml NAME=cthub SUFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + oc process -f ./frontend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} sleep 5s oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-frontend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 From 4027640c6fb33bbbdf60c0ab5b5cb1dc0ccec5ed Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 5 Mar 2024 17:09:42 -0800 Subject: [PATCH 051/152] update trigger for frontend build --- openshift/templates/frontend/frontend-bc.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/openshift/templates/frontend/frontend-bc.yaml b/openshift/templates/frontend/frontend-bc.yaml index d505627c..abdc2aef 100644 --- a/openshift/templates/frontend/frontend-bc.yaml +++ b/openshift/templates/frontend/frontend-bc.yaml @@ -88,6 +88,9 @@ objects: key: password type: Source successfulBuildsHistoryLimit: 5 - triggers: [] + triggers: + - imageChange: {} + type: ImageChange + - type: ConfigChange status: lastVersion: 0 \ No newline at end of file From 3a66850b8aa7257626dad818163658a9b8d2aca4 Mon Sep 17 00:00:00 2001 From: tim738745 <98717409+tim738745@users.noreply.github.com> Date: Tue, 12 Mar 2024 09:28:20 -0700 Subject: [PATCH 052/152] feat: 201 - Logout button + cleanup (#214) --- .gitignore | 3 +- react/src/App.js | 22 ++--- react/src/Login.js | 16 +++- react/src/app/components/KeycloakProvider.js | 29 +++++++ react/src/app/components/Loading.js | 12 +++ react/src/app/components/Logout.js | 20 +++++ react/src/app/utilities/useAxios.js | 30 +++++++ react/src/app/utilities/useKeycloak.js | 9 ++ react/src/contexts.js | 3 + react/src/icbc_data/IcbcDataContainer.js | 3 +- react/src/index.js | 32 ++++--- react/src/keycloak.js | 87 -------------------- react/src/uploads/UploadContainer.js | 19 ++--- react/src/users/UsersContainer.js | 3 +- react/webpack.config.js | 1 + 15 files changed, 157 insertions(+), 132 deletions(-) create mode 100644 react/src/app/components/KeycloakProvider.js create mode 100644 react/src/app/components/Loading.js create mode 100644 react/src/app/components/Logout.js create mode 100644 react/src/app/utilities/useAxios.js create mode 100644 react/src/app/utilities/useKeycloak.js create mode 100644 react/src/contexts.js delete mode 100644 react/src/keycloak.js diff --git a/.gitignore b/.gitignore index 379b86d5..59e952f2 100644 --- a/.gitignore +++ b/.gitignore @@ -21,4 +21,5 @@ TEST-*xml venv/ coverage/ minio/minio_files/cthub/ -decoder.env \ No newline at end of file +decoder.env +docker-compose-local-dev.yml \ No newline at end of file diff --git a/react/src/App.js b/react/src/App.js index 5066453d..d7b50cec 100644 --- a/react/src/App.js +++ b/react/src/App.js @@ -1,8 +1,6 @@ import 'regenerator-runtime/runtime'; -import axios from 'axios'; import React from 'react'; import { - Redirect, BrowserRouter as Router, Route, Switch, @@ -12,18 +10,18 @@ import settings from './app/settings'; import IcbcDataRouter from './icbc_data/router'; import UploadRouter from './uploads/router'; import DashboardContainer from './dashboard/DashboardContainer'; +import useKeycloak from './app/utilities/useKeycloak' +import Login from './Login'; -const { API_BASE } = settings; - -axios.defaults.baseURL = API_BASE; +const { ENABLE_KEYCLOAK } = settings; const App = () => { - const { sessionStorage } = window; - const redirect = sessionStorage.getItem('redirect'); - if (redirect && redirect !== '') { - sessionStorage.removeItem('redirect'); + const keycloak = useKeycloak() + + if (ENABLE_KEYCLOAK && !keycloak.authenticated) { + const redirectUri = window.location.href + return } - return (
@@ -35,10 +33,6 @@ const App = () => {
- {redirect && redirect !== '' && ( - - )} - {IcbcDataRouter()} {UploadRouter()} diff --git a/react/src/Login.js b/react/src/Login.js index 4eada0c8..fa4cfae3 100644 --- a/react/src/Login.js +++ b/react/src/Login.js @@ -1,8 +1,16 @@ import React from 'react'; -import CustomPropTypes from './app/utilities/props'; +import PropTypes from 'prop-types'; +import useKeycloak from './app/utilities/useKeycloak'; const Login = (props) => { - const { keycloak } = props; + const { redirectUri } = props; + const loginOptions = { + idpHint: 'idir' + } + if (redirectUri) { + loginOptions.redirectUri = redirectUri + } + const keycloak = useKeycloak() return (
@@ -13,7 +21,7 @@ const Login = (props) => {
- @@ -25,7 +33,7 @@ const Login = (props) => { }; Login.propTypes = { - keycloak: CustomPropTypes.keycloak.isRequired, + redirectUri: PropTypes.string }; export default Login; diff --git a/react/src/app/components/KeycloakProvider.js b/react/src/app/components/KeycloakProvider.js new file mode 100644 index 00000000..6c9aba2b --- /dev/null +++ b/react/src/app/components/KeycloakProvider.js @@ -0,0 +1,29 @@ +import React, { useState, useEffect } from 'react' +import { KeycloakContext } from '../../contexts' +import settings from '../settings' + +const KeycloakProvider = ({authClient, initOptions, LoadingComponent, children}) => { + const keycloakEnabled = settings.ENABLE_KEYCLOAK + const [loading, setLoading] = useState(keycloakEnabled ? true : false) + const [keycloak, setKeycloak] = useState({}) + + useEffect(() => { + if (keycloakEnabled) { + authClient.init(initOptions).then(() => { + setKeycloak(authClient) + setLoading(false) + }) + } + }, [keycloakEnabled, authClient, initOptions]) + + if (loading) { + return + } + return ( + + {children} + + ) +} + +export default KeycloakProvider \ No newline at end of file diff --git a/react/src/app/components/Loading.js b/react/src/app/components/Loading.js new file mode 100644 index 00000000..02b9b5a3 --- /dev/null +++ b/react/src/app/components/Loading.js @@ -0,0 +1,12 @@ +import React from 'react' +import { CircularProgress } from '@mui/material' + +const Loading = () => { + return ( +
+ +
+ ) +} + +export default Loading \ No newline at end of file diff --git a/react/src/app/components/Logout.js b/react/src/app/components/Logout.js new file mode 100644 index 00000000..66104646 --- /dev/null +++ b/react/src/app/components/Logout.js @@ -0,0 +1,20 @@ +import React from 'react'; +import useKeycloak from '../utilities/useKeycloak' + +const Logout = () => { + const keycloak = useKeycloak(); + if (keycloak.authenticated) { + return ( + + ) + } + return null +} + +export default Logout diff --git a/react/src/app/utilities/useAxios.js b/react/src/app/utilities/useAxios.js new file mode 100644 index 00000000..307a1275 --- /dev/null +++ b/react/src/app/utilities/useAxios.js @@ -0,0 +1,30 @@ +import axios from 'axios' +import settings from '../settings'; +import useKeycloak from './useKeycloak' + +const useAxios = (useDefault = false, opts = {}) => { + if (useDefault) { + return axios.create(opts) + } + const keycloak = useKeycloak() + const instance = axios.create({ + baseURL: settings.API_BASE, + ...opts, + }) + instance.interceptors.request.use(async (config) => { + if (keycloak.authenticated) { + try { + await keycloak.updateToken(30) + config.headers = { + 'Authorization': `Bearer ${keycloak.token}`, + } + } catch(error) { + // do something here? + } + } + return config + }) + return instance +} + +export default useAxios \ No newline at end of file diff --git a/react/src/app/utilities/useKeycloak.js b/react/src/app/utilities/useKeycloak.js new file mode 100644 index 00000000..f56bdf77 --- /dev/null +++ b/react/src/app/utilities/useKeycloak.js @@ -0,0 +1,9 @@ +import { useContext } from 'react' +import { KeycloakContext } from '../../contexts' + +const useKeycloak = () => { + const keycloak = useContext(KeycloakContext) + return keycloak +} + +export default useKeycloak \ No newline at end of file diff --git a/react/src/contexts.js b/react/src/contexts.js new file mode 100644 index 00000000..2b7751c7 --- /dev/null +++ b/react/src/contexts.js @@ -0,0 +1,3 @@ +import { createContext } from 'react' + +export const KeycloakContext = createContext({}) \ No newline at end of file diff --git a/react/src/icbc_data/IcbcDataContainer.js b/react/src/icbc_data/IcbcDataContainer.js index 1899d11e..a7e26afc 100644 --- a/react/src/icbc_data/IcbcDataContainer.js +++ b/react/src/icbc_data/IcbcDataContainer.js @@ -1,10 +1,10 @@ -import axios from 'axios'; import React, { useCallback, useRef, useState } from 'react'; import { withRouter } from 'react-router-dom'; import { getFilters, getOrderBy } from '../app/utilities/reactTable'; import IcbcDataTable from './components/IcbcDataTable'; import ROUTES from './routes'; +import useAxios from '../app/utilities/useAxios'; const IcbcDataContainer = () => { const [data, setData] = useState([]); @@ -12,6 +12,7 @@ const IcbcDataContainer = () => { const [pageCount, setPageCount] = useState(-1); const [totalRowsCount, setTotalRowsCount] = useState(0); const fetchIdRef = useRef(0); + const axios = useAxios() const onFetchData = useCallback((state) => { setLoading(true); diff --git a/react/src/index.js b/react/src/index.js index 08a8daa3..504adedd 100644 --- a/react/src/index.js +++ b/react/src/index.js @@ -1,18 +1,26 @@ import React from 'react'; -import ReactDOM from 'react-dom'; +import ReactDOM from 'react-dom' +import Keycloak from 'keycloak-js'; -import Keycloak from './keycloak'; -import settings from './app/settings'; +import KeycloakProvider from './app/components/KeycloakProvider'; +import App from './App'; +import Loading from './app/components/Loading'; import './app/styles/index.scss'; -import App from './App'; - -if (settings.ENABLE_KEYCLOAK) { - ReactDOM.render( - , - document.getElementById('root'), - ); -} else { - ReactDOM.render(, document.getElementById('root')); +const keycloak = new Keycloak() +const keycloakInitOptions = { + onLoad: 'check-sso', + pkceMethod: 'S256' } + +ReactDOM.render( + + + , + document.getElementById('root') +) diff --git a/react/src/keycloak.js b/react/src/keycloak.js deleted file mode 100644 index c54a201f..00000000 --- a/react/src/keycloak.js +++ /dev/null @@ -1,87 +0,0 @@ -import axios from 'axios'; -import React, { useEffect, useState } from 'react'; -import Keycloak from 'keycloak-js'; - -import Login from './Login'; -import App from './App'; - -const keycloakContainer = () => { - const { location, sessionStorage } = window; - const { pathname, search } = location; - const [authenticated, setAuthenticated] = useState(false); - const [initializedKeycloak, setInitializedKeycloak] = useState(false); - let globalTimeout; - - const keycloak = Keycloak(); - const initOptions = { - idpHint: 'idir', - onLoad: 'check-sso', - pkceMethod: 'S256', - redirectUri: `${window.location.origin}/`, - }; - - if ((pathname && pathname !== '/') || search) { - sessionStorage.setItem('redirect', pathname + search); - } - - const refreshToken = (time = 60) => { - /* - the time (in seconds) is used to check whether how long our current token has left, - if it's less than that, then we can refresh the token. otherwise, just keep reusing - the current token - */ - initializedKeycloak.updateToken(time).then((refreshed) => { - if (refreshed) { - const { token: newToken } = initializedKeycloak; - - axios.defaults.headers.common.Authorization = `Bearer ${newToken}`; - - clearTimeout(globalTimeout); - setDelayedRefreshToken(); - } - }).catch(() => { - initializedKeycloak.logout(); - }); - }; - - const setDelayedRefreshToken = () => { - globalTimeout = setTimeout(() => { - refreshToken(); - }, 4 * 60 * 1000); // 4 minutes x 60 seconds x 1000 milliseconds - }; - - useEffect(() => { - const initKeycloak = async () => { - keycloak.init(initOptions).then((auth) => { - setAuthenticated(auth); - setInitializedKeycloak(keycloak); - }); - }; - initKeycloak(); - }, []); - - if (!keycloak || !initializedKeycloak) { - return
Loading...
; - } - - if (!authenticated) { - return ; - } - - const { token } = initializedKeycloak; - axios.defaults.headers.common.Authorization = `Bearer ${token}`; - - axios.interceptors.request.use((config) => { - if (initializedKeycloak.isTokenExpired(150)) { // if the token is expiring by 2 mins, 30 secs - refreshToken(300); // refresh the token now - } - - return config; - }, (error) => (Promise.reject(error))); - - setDelayedRefreshToken(); - - return ; -}; - -export default keycloakContainer; diff --git a/react/src/uploads/UploadContainer.js b/react/src/uploads/UploadContainer.js index c550f864..0f1a292a 100644 --- a/react/src/uploads/UploadContainer.js +++ b/react/src/uploads/UploadContainer.js @@ -1,14 +1,15 @@ import { withRouter } from 'react-router-dom'; -import axios from 'axios'; import React, { useState, useEffect } from 'react'; import { - Paper, Alert, CircularProgress, Stack, + Paper, Alert, Stack, } from '@mui/material'; import ROUTES_UPLOAD from './routes'; import ROUTES_USERS from '../users/routes'; import UploadPage from './components/UploadPage'; import AlertDialog from '../app/components/AlertDialog'; import UsersContainer from '../users/UsersContainer'; +import Loading from '../app/components/Loading'; +import useAxios from '../app/utilities/useAxios'; const UploadContainer = () => { const [uploadFiles, setUploadFiles] = useState([]); // array of objects for files to be uploaded @@ -32,6 +33,8 @@ const UploadContainer = () => { } setReplaceData(choice); }; + const axios = useAxios() + const axiosDefault = useAxios(true) const refreshList = () => { setLoading(true); @@ -57,11 +60,7 @@ const UploadContainer = () => { const doUpload = () => uploadFiles.forEach((file) => { axios.get(ROUTES_UPLOAD.MINIO_URL).then((response) => { const { url: uploadUrl, minio_object_name: filename } = response.data; - axios.put(uploadUrl, file, { - headers: { - Authorization: null, - }, - }).then(() => { + axiosDefault.put(uploadUrl, file).then(() => { let replace = false; if (replaceData === true) { replace = true; @@ -112,11 +111,7 @@ const UploadContainer = () => { }, []); if (loading) { - return ( -
- -
- ); + return } return ( diff --git a/react/src/users/UsersContainer.js b/react/src/users/UsersContainer.js index 318f9338..d12f334e 100644 --- a/react/src/users/UsersContainer.js +++ b/react/src/users/UsersContainer.js @@ -1,14 +1,15 @@ import { withRouter } from 'react-router-dom'; -import axios from 'axios'; import CircularProgress from '@mui/material/CircularProgress'; import React, { useState, useEffect } from 'react'; import ROUTES_USERS from './routes'; import UsersPage from './components/UsersPage'; +import useAxios from '../app/utilities/useAxios'; const UsersContainer = () => { const [loading, setLoading] = useState(false); const [users, setUsers] = useState([]); const [userUpdates, setUserUpdates] = useState([]); + const axios = useAxios() const refreshDetails = () => { setLoading(true); diff --git a/react/webpack.config.js b/react/webpack.config.js index 6b1a5b08..eed6da6c 100644 --- a/react/webpack.config.js +++ b/react/webpack.config.js @@ -8,6 +8,7 @@ module.exports = { devServer: { historyApiFallback: true, hot: isDevelopment, + inline: isDevelopment }, devtool: 'source-map', resolve: { From 33d68037a7ffa3b82990b4384998c3561b9cea7e Mon Sep 17 00:00:00 2001 From: Emily <44536222+emi-hi@users.noreply.github.com> Date: Wed, 13 Mar 2024 14:45:07 -0700 Subject: [PATCH 053/152] feat: CTHUB 198 - save/update users (#218) * chore: -backend: updates users moved into new service -frontend: adds immer for setting state, uses original users array rather than creating new one * chore: moves transaction.atomic to service --- django/api/decorators/permission.py | 17 +++++- django/api/serializers/user.py | 10 +++- django/api/services/permissions.py | 13 +++++ django/api/services/user.py | 23 ++++++++ django/api/viewsets/user.py | 47 +++++++++------ react/package.json | 1 + react/src/uploads/UploadContainer.js | 7 ++- react/src/users/UsersContainer.js | 77 +++++++++++++++++++++---- react/src/users/components/UsersPage.js | 46 ++++++++------- react/src/users/routes.js | 3 +- 10 files changed, 185 insertions(+), 59 deletions(-) create mode 100644 django/api/services/permissions.py create mode 100644 django/api/services/user.py diff --git a/django/api/decorators/permission.py b/django/api/decorators/permission.py index f94256a0..a681f4e9 100644 --- a/django/api/decorators/permission.py +++ b/django/api/decorators/permission.py @@ -1,8 +1,7 @@ from rest_framework.response import Response from rest_framework import status -from api.models.user import User -from api.models.user_permission import UserPermission -from api.models.permission import Permission +from api.services.permissions import create_permission_list + def check_upload_permission(): def wrapper(func): def wrapped(request, *args, **kwargs): @@ -20,3 +19,15 @@ def wrapped(request, *args, **kwargs): return func(request, *args, **kwargs) return wrapped return wrapper + +def check_admin_permission(): + def wrapper(func): + def wrapped(request, *args, **kwargs): + permissions = create_permission_list(request.user) + if 'admin' not in permissions: + return Response( + "You do not have permission to make changes to other users' permissions.", status=status.HTTP_403_FORBIDDEN + ) + return func(request, *args, **kwargs) + return wrapped + return wrapper diff --git a/django/api/serializers/user.py b/django/api/serializers/user.py index fc2b490f..f4ffc6a6 100644 --- a/django/api/serializers/user.py +++ b/django/api/serializers/user.py @@ -17,7 +17,15 @@ class UserSerializer(ModelSerializer): def get_user_permissions(self, obj): user_permission = UserPermission.objects.filter(user_id=obj.id) permissions = PermissionSerializer(user_permission, read_only=True, many=True) - return permissions.data + admin = False + uploader = False + for i in permissions.data: + if i['description'] == 'admin': + admin = True + if i['description'] == 'uploader': + uploader = True + + return {'admin': admin, 'uploader': uploader} class Meta: model = User diff --git a/django/api/services/permissions.py b/django/api/services/permissions.py new file mode 100644 index 00000000..1c0d583a --- /dev/null +++ b/django/api/services/permissions.py @@ -0,0 +1,13 @@ +from api.models.user import User +from api.models.user_permission import UserPermission +from api.models.permission import Permission + +def create_permission_list(user): + user = User.objects.filter(idir=user).first() + user_permission = UserPermission.objects.filter(user_id=user.id) + permissions = [] + if user_permission: + for each in user_permission: + permission = Permission.objects.get(id=each.permission_id) + permissions.append(permission.description) + return permissions \ No newline at end of file diff --git a/django/api/services/user.py b/django/api/services/user.py new file mode 100644 index 00000000..520934be --- /dev/null +++ b/django/api/services/user.py @@ -0,0 +1,23 @@ +from django.db import transaction +from api.models.user_permission import UserPermission +from api.models.permission import Permission +from api.models.user import User + +@transaction.atomic +def update_permissions(self, request): + msg = [] + permissions = Permission.objects.all() + UserPermission.objects.all().delete() + for each in request.data: + for k, v in each.items(): + if k == 'idir': + user = User.objects.get(idir=v) + if k == 'user_permissions': + for permission_description, value in v.items(): + if value == True or (user.idir == request.user and permission_description == 'admin'): + ## if they are updating permissions then they are already admin user, they cannot remove their own admin + permission = permissions.get(description=permission_description) + try: + UserPermission.objects.create(user_id=user.id, permission_id=permission.id) + except Exception as error: + msg.append("{} permission could not be added to {}".format(permission_description, user.idir)) \ No newline at end of file diff --git a/django/api/viewsets/user.py b/django/api/viewsets/user.py index 13d0629c..7ee41019 100644 --- a/django/api/viewsets/user.py +++ b/django/api/viewsets/user.py @@ -1,9 +1,13 @@ +from django.utils.decorators import method_decorator +from rest_framework import status from rest_framework.decorators import action from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.viewsets import GenericViewSet from api.models.user import User -from api.serializers.user import UserSerializer, UserSaveSerializer +from api.serializers.user import UserSerializer +from api.decorators.permission import check_admin_permission +from api.services.user import update_permissions class UserViewSet(GenericViewSet): """ @@ -16,8 +20,6 @@ class UserViewSet(GenericViewSet): serializer_classes = { 'default': UserSerializer, - 'update': UserSaveSerializer, - 'create': UserSaveSerializer, } @@ -28,6 +30,25 @@ def get_serializer_class(self): return self.serializer_classes['default'] + @action(detail=False, methods=['post']) + @method_decorator(check_admin_permission()) + def new(self, request): + user_to_insert = request.data['idir'].upper() + try: + User.objects.get_or_create(idir=user_to_insert) + return Response(user_to_insert, status=status.HTTP_200_OK) + except Exception as e: + return Response({"response": str(e)}, status=status.HTTP_400_BAD_REQUEST) + + @action(detail=False, methods=['put']) + @method_decorator(check_admin_permission()) + def update_permissions(self, request): + error_msg = update_permissions(self, request) + if error_msg: + return Response(error_msg, status=status.HTTP_400_BAD_REQUEST) + else: + return Response('User permissions were updated!', status=status.HTTP_201_CREATED) + @action(detail=False) def current(self, request): """ @@ -37,20 +58,8 @@ def current(self, request): serializer = self.get_serializer(user) return Response(serializer.data) + @method_decorator(check_admin_permission()) def list(self, request): - request = self.request - ##check if user is admin before producing list of all users - users = User.objects.all() - current_user = users.filter(idir=request.user).first() - if current_user: - current_user_serializer = UserSerializer(current_user) - current_user_permissions = current_user_serializer.data['user_permissions'] - is_admin = False - if current_user_permissions: - for i in current_user_permissions: - for v in i.values(): - if v == 'admin': - is_admin = True - if is_admin == True: - serializer = UserSerializer(users, many=True) - return Response(serializer.data) \ No newline at end of file + users = User.objects.all().order_by('idir') + serializer = UserSerializer(users, many=True) + return Response(serializer.data) \ No newline at end of file diff --git a/react/package.json b/react/package.json index 9ec00bf3..32b09dc4 100644 --- a/react/package.json +++ b/react/package.json @@ -11,6 +11,7 @@ "axios": "^0.24.0", "buffer": "^6.0.3", "crypto-browserify": "^3.12.0", + "immer": "^10.0.4", "jsonwebtoken": "^8.5.1", "keycloak-js": "^15.0.2", "process": "^0.11.10", diff --git a/react/src/uploads/UploadContainer.js b/react/src/uploads/UploadContainer.js index 0f1a292a..d07f0681 100644 --- a/react/src/uploads/UploadContainer.js +++ b/react/src/uploads/UploadContainer.js @@ -19,6 +19,7 @@ const UploadContainer = () => { const [replaceData, setReplaceData] = useState('false'); // if true, we will replace all const [alertContent, setAlertContent] = useState(); const [alert, setAlert] = useState(false); + const [currentUser, setCurrentUser] = useState(''); const [alertSeverity, setAlertSeverity] = useState(''); // existing data with what is being uploaded const [open, setOpen] = useState(false); @@ -42,9 +43,9 @@ const UploadContainer = () => { setDatasetList(response.data); setLoading(false); axios.get(ROUTES_USERS.CURRENT).then((currentUserResp) => { - const permissions = currentUserResp.data.user_permissions.map((each) => each.description); - if (permissions.includes('admin')) { + if (currentUserResp.data.user_permissions.admin === true) { setAdminUser(true); + setCurrentUser(currentUserResp.data.idir); } }); }); @@ -148,7 +149,7 @@ const UploadContainer = () => { {adminUser && ( - + )} diff --git a/react/src/users/UsersContainer.js b/react/src/users/UsersContainer.js index d12f334e..ceada6ed 100644 --- a/react/src/users/UsersContainer.js +++ b/react/src/users/UsersContainer.js @@ -1,25 +1,69 @@ import { withRouter } from 'react-router-dom'; -import CircularProgress from '@mui/material/CircularProgress'; -import React, { useState, useEffect } from 'react'; +import PropTypes from 'prop-types'; +import { CircularProgress, Alert } from '@mui/material'; +import React, { useState, useEffect, useCallback } from 'react'; +import { produce } from 'immer'; import ROUTES_USERS from './routes'; import UsersPage from './components/UsersPage'; import useAxios from '../app/utilities/useAxios'; -const UsersContainer = () => { +const UsersContainer = (props) => { + const { currentUser } = props; const [loading, setLoading] = useState(false); const [users, setUsers] = useState([]); - const [userUpdates, setUserUpdates] = useState([]); - const axios = useAxios() + const [newUser, setNewUser] = useState(''); + const [permissionMessage, setPermissionMessage] = useState(''); + const [messageSeverity, setMessageSeverity] = useState(''); + const axios = useAxios(); - const refreshDetails = () => { + const handleCheckboxChange = useCallback((event) => { + const idir = event.target.name; + const permissionType = event.target.id; + const { checked } = event.target; + setUsers( + produce((draft) => { + const user = draft.find((user) => user.idir === idir); + user.user_permissions[permissionType] = checked; + }), + ); + }, []); + + const handleAddNewUser = () => { + axios.post(ROUTES_USERS.CREATE, { idir: newUser }) + .then((response) => { + const userAdded = response.data; + setMessageSeverity('success'); + setPermissionMessage(`${userAdded} was added to the user list`); + const userObject = { idir: userAdded, user_permissions: { admin: false, uploader: false } }; + setUsers( + produce((draft) => { + draft.push(userObject); + }), + ); + }) + .catch((error) => { + setMessageSeverity('error'); + setPermissionMessage('new user could not be added, sorry!'); + }); + }; + + const handleSubmitPermissionUpdates = () => { + axios.put(ROUTES_USERS.UPDATE, users) + .then((response) => { + setMessageSeverity('success'); + setPermissionMessage(response.data); + }) + .catch((error) => { + setMessageSeverity('error'); + setPermissionMessage(error.data); + }); + }; + + useEffect(() => { setLoading(true); axios.get(ROUTES_USERS.LIST).then((listResponse) => { setUsers(listResponse.data); }); - }; - - useEffect(() => { - refreshDetails(); setLoading(false); }, []); @@ -32,8 +76,19 @@ const UsersContainer = () => { } return (
- + {permissionMessage && {permissionMessage}} +
); }; +UsersContainer.propTypes = { + currentUser: PropTypes.string.isRequired, +}; export default withRouter(UsersContainer); diff --git a/react/src/users/components/UsersPage.js b/react/src/users/components/UsersPage.js index 68a00ace..e6076840 100644 --- a/react/src/users/components/UsersPage.js +++ b/react/src/users/components/UsersPage.js @@ -1,33 +1,32 @@ import React from 'react'; import PropTypes from 'prop-types'; import { - Box, Button, Grid, TextField, Checkbox, + Box, Button, Grid, TextField, Checkbox, Tooltip, } from '@mui/material'; import ClearIcon from '@mui/icons-material/Clear'; import SaveIcon from '@mui/icons-material/Save'; const UsersPage = (props) => { - const { users, userUpdates, setUserUpdates } = props; - + const { + currentUser, + users, + handleAddNewUser, + setNewUser, + handleCheckboxChange, + handleSubmitPermissionUpdates, + } = props; const userRow = (user) => { - const userPerms = { admin: false, uploader: false }; - user.user_permissions.forEach((permission) => { - userPerms[permission.description] = true; - }); - - const handleRadioChange = (event) => { - const { checked } = event.target; - const permissionType = event.target.id; - console.log(permissionType); - console.log(userPerms); - userPerms[permissionType] = checked; - console.log(userPerms[permissionType]); - }; + const disableAdmin = currentUser === user.idir; return ( + - { handleRadioChange(event); }} /> - { handleRadioChange(event); }} /> + { handleCheckboxChange(event); }} /> + + + { handleCheckboxChange(event); }} /> + + {user.idir} @@ -53,10 +52,10 @@ const UsersPage = (props) => { - + { setNewUser(event.target.value); }} /> - @@ -76,7 +75,7 @@ const UsersPage = (props) => { userRow(user) ))} - @@ -87,5 +86,10 @@ const UsersPage = (props) => { }; UsersPage.propTypes = { users: PropTypes.arrayOf(PropTypes.shape()).isRequired, + handleAddNewUser: PropTypes.func.isRequired, + setNewUser: PropTypes.func.isRequired, + handleCheckboxChange: PropTypes.func.isRequired, + handleSubmitPermissionUpdates: PropTypes.func.isRequired, + currentUser: PropTypes.string.isRequired, }; export default UsersPage; diff --git a/react/src/users/routes.js b/react/src/users/routes.js index 945af831..ef19fa16 100644 --- a/react/src/users/routes.js +++ b/react/src/users/routes.js @@ -3,7 +3,8 @@ const API_BASE_PATH = '/api/users'; const USERS = { LIST: API_BASE_PATH, CURRENT: `${API_BASE_PATH}/current`, - + CREATE: `${API_BASE_PATH}/new`, + UPDATE: `${API_BASE_PATH}/update_permissions`, }; export default USERS; From 90a27cafdeb2d0afe4986d675bbb18a2607f2242 Mon Sep 17 00:00:00 2001 From: Emily <44536222+emi-hi@users.noreply.github.com> Date: Wed, 13 Mar 2024 14:47:00 -0700 Subject: [PATCH 054/152] feat CTHUB 212 : clear upload message (#219) * feat: removes upload message when new dataset is selected or file is dropped * chore: sets alert to false instead of empty string --- react/src/uploads/UploadContainer.js | 1 + react/src/uploads/components/FileDrop.js | 3 +++ react/src/uploads/components/FileDropArea.js | 3 +++ react/src/uploads/components/UploadPage.js | 6 +++++- 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/react/src/uploads/UploadContainer.js b/react/src/uploads/UploadContainer.js index d07f0681..ea935acb 100644 --- a/react/src/uploads/UploadContainer.js +++ b/react/src/uploads/UploadContainer.js @@ -144,6 +144,7 @@ const UploadContainer = () => { replaceData={replaceData} handleRadioChange={handleRadioChange} downloadSpreadsheet={downloadSpreadsheet} + setAlert={setAlert} /> {adminUser diff --git a/react/src/uploads/components/FileDrop.js b/react/src/uploads/components/FileDrop.js index 528f2d5b..cdf0ba55 100644 --- a/react/src/uploads/components/FileDrop.js +++ b/react/src/uploads/components/FileDrop.js @@ -7,9 +7,11 @@ import { useDropzone } from 'react-dropzone'; const FileDrop = (props) => { const { setFiles, + setAlert, } = props; const [dropMessage, setDropMessage] = useState(''); const onDrop = useCallback((files) => { + setAlert(false) setDropMessage(''); setFiles(files); }, []); @@ -46,6 +48,7 @@ FileDrop.propTypes = { setFiles: PropTypes.func, maxFiles: PropTypes.number, allowedFileTypes: PropTypes.string, + setAlert: PropTypes.func.isRequired, }; export default FileDrop; diff --git a/react/src/uploads/components/FileDropArea.js b/react/src/uploads/components/FileDropArea.js index f47c7be7..62e2cc7a 100644 --- a/react/src/uploads/components/FileDropArea.js +++ b/react/src/uploads/components/FileDropArea.js @@ -9,6 +9,7 @@ const FileDropArea = (props) => { const { setUploadFiles, uploadFiles, + setAlert, } = props; const removeFile = (removedFile) => { @@ -48,6 +49,7 @@ const FileDropArea = (props) => {
@@ -75,5 +77,6 @@ const FileDropArea = (props) => { FileDropArea.propTypes = { setUploadFiles: PropTypes.func.isRequired, uploadFiles: PropTypes.arrayOf(PropTypes.shape()).isRequired, + setAlert: PropTypes.func.isRequired, }; export default FileDropArea; diff --git a/react/src/uploads/components/UploadPage.js b/react/src/uploads/components/UploadPage.js index b8bba88e..e3bc9919 100644 --- a/react/src/uploads/components/UploadPage.js +++ b/react/src/uploads/components/UploadPage.js @@ -17,6 +17,7 @@ const UploadPage = (props) => { replaceData, handleRadioChange, downloadSpreadsheet, + setAlert, } = props; const selectionList = datasetList.map((obj, index) => ( @@ -36,7 +37,7 @@ const UploadPage = (props) => { @@ -68,6 +69,7 @@ const UploadPage = (props) => {
@@ -104,5 +106,7 @@ UploadPage.propTypes = { PropTypes.bool, ]).isRequired, handleRadioChange: PropTypes.func.isRequired, + downloadSpreadsheet: PropTypes.func.isRequired, + setAlert: PropTypes.func.isRequired, }; export default UploadPage; From de6b1dd3ba7b54ffc36fc0b0ecc7a887497b134f Mon Sep 17 00:00:00 2001 From: tim738745 <98717409+tim738745@users.noreply.github.com> Date: Wed, 13 Mar 2024 14:52:05 -0700 Subject: [PATCH 055/152] feat: 197 - header and footer components (#216) * feat: 197 - header and footer components * remove header and footer from login page --- django/api/keycloak_authentication.py | 14 ---- react/src/App.js | 32 ++++---- react/src/app/components/Footer.js | 42 ++++++++++ react/src/app/components/Header.js | 25 ++++++ react/src/app/components/Layout.js | 15 ++++ react/src/app/components/Logout.js | 19 +++-- react/src/app/styles/App.scss | 12 ++- react/src/app/styles/Footer.scss | 66 ++++++++++++++++ react/src/app/styles/Header.scss | 108 ++++++++++++++++++++++++++ react/src/app/styles/index.scss | 3 + react/src/app/styles/variables.scss | 20 +++++ 11 files changed, 316 insertions(+), 40 deletions(-) create mode 100644 react/src/app/components/Footer.js create mode 100644 react/src/app/components/Header.js create mode 100644 react/src/app/components/Layout.js create mode 100644 react/src/app/styles/Footer.scss create mode 100644 react/src/app/styles/Header.scss create mode 100644 react/src/app/styles/variables.scss diff --git a/django/api/keycloak_authentication.py b/django/api/keycloak_authentication.py index 0edbd69d..d44f7a03 100644 --- a/django/api/keycloak_authentication.py +++ b/django/api/keycloak_authentication.py @@ -49,20 +49,6 @@ def authenticate(self, request): options=options ) - # Get the user from the keycloak server based on the token - print( - "identity_provider: {identity_provider}, idir_username: {idir_username}, email: {email}, preferred_username: {preferred_username}".format( - identity_provider=token_info.get( - "identity_provider", "no identity_provider" - ), - idir_username=token_info.get("idir_username", "no idir_username"), - email=token_info.get("email", "no email"), - preferred_username=token_info.get( - "preferred_username", "no preferred_username" - ), - ) - ) - user_info = keycloak_openid.userinfo(token) if user_info.get('user_id') != token_info.get('user_id'): raise exceptions.AuthenticationFailed( diff --git a/react/src/App.js b/react/src/App.js index d7b50cec..6ca2838f 100644 --- a/react/src/App.js +++ b/react/src/App.js @@ -12,6 +12,7 @@ import UploadRouter from './uploads/router'; import DashboardContainer from './dashboard/DashboardContainer'; import useKeycloak from './app/utilities/useKeycloak' import Login from './Login'; +import Layout from './app/components/Layout' const { ENABLE_KEYCLOAK } = settings; @@ -23,26 +24,21 @@ const App = () => { return } return ( -
-
-
- -
- + +
+
+ + + {IcbcDataRouter()} + {UploadRouter()} + + + + +
-
-
- - - {IcbcDataRouter()} - {UploadRouter()} - - - - -
-
+ ); }; diff --git a/react/src/app/components/Footer.js b/react/src/app/components/Footer.js new file mode 100644 index 00000000..677de896 --- /dev/null +++ b/react/src/app/components/Footer.js @@ -0,0 +1,42 @@ +import React from 'react' + +const Footer = () => { + return ( +
+ ); + }; + + export default Footer; \ No newline at end of file diff --git a/react/src/app/components/Header.js b/react/src/app/components/Header.js new file mode 100644 index 00000000..d4fb7cbd --- /dev/null +++ b/react/src/app/components/Header.js @@ -0,0 +1,25 @@ +import React from 'react'; +import logo from '../styles/images/BCID_H_rgb_rev.png'; +import Logout from './Logout' + +const Header = () => { + return ( + + ); + }; + +export default Header; \ No newline at end of file diff --git a/react/src/app/components/Layout.js b/react/src/app/components/Layout.js new file mode 100644 index 00000000..b8c1aad8 --- /dev/null +++ b/react/src/app/components/Layout.js @@ -0,0 +1,15 @@ +import React from 'react' +import Header from './Header' +import Footer from './Footer' + +const Layout = ({ children }) => { + return ( +
+
+
{children}
+
+
+ ) +} + +export default Layout \ No newline at end of file diff --git a/react/src/app/components/Logout.js b/react/src/app/components/Logout.js index 66104646..31a4031f 100644 --- a/react/src/app/components/Logout.js +++ b/react/src/app/components/Logout.js @@ -4,14 +4,19 @@ import useKeycloak from '../utilities/useKeycloak' const Logout = () => { const keycloak = useKeycloak(); if (keycloak.authenticated) { + const kcToken = keycloak.tokenParsed; return ( - +
+ {'Logged in as: ' + kcToken.idir_username + ' |'} + +
) } return null diff --git a/react/src/app/styles/App.scss b/react/src/app/styles/App.scss index 89b60e10..7081c66a 100644 --- a/react/src/app/styles/App.scss +++ b/react/src/app/styles/App.scss @@ -96,4 +96,14 @@ h4 { .text-button { color: #1a5a96 !important; text-decoration: underline !important; -} \ No newline at end of file +} + +.layout { + display: flex; + flex-direction: column; + height: 100vh; +} + +.page-content { + flex-grow: 1; +} diff --git a/react/src/app/styles/Footer.scss b/react/src/app/styles/Footer.scss new file mode 100644 index 00000000..80595d1a --- /dev/null +++ b/react/src/app/styles/Footer.scss @@ -0,0 +1,66 @@ +footer { + position: relative; + width: 100%; + bottom: 0; + background-color: #036; + border-top: 2px solid #fcba19; + color: #fff; + font-family: ‘BCSans’, ‘Noto Sans’, Verdana, Arial, sans-serif; + .container { + display: flex; + justify-content: center; + flex-direction: column; + text-align: center; + height: 46px; + } + ul { + display: flex; + flex-direction: row; + flex-wrap: wrap; + margin: 0; + color: #fff; + list-style: none; + align-items: center; + height: 100%; + li a { + font-size: 0.813em; + font-weight: normal; /* 400 */ + color: #fff; + border-right: 1px solid #4b5e7e; + padding-left: 5px; + padding-right: 5px; + text-decoration: none; + } + a:hover { + color: #fff; + text-decoration: underline; + } + a:focus { + outline: 4px solid #3b99fc; + outline-offset: 1px; + } + } + } + @media (max-width: 600px) { + .footer { + .container { + ul li a { + font-size: 0.8rem; + line-height: 0.9rem; + } + } + } + } + @media (max-width: 485px) { + .footer { + height: 3.5rem; + .container { + justify-content: space-evenly; + } + } + } + @media (max-width: 282px) { + .footer { + height: 6rem; + } + } \ No newline at end of file diff --git a/react/src/app/styles/Header.scss b/react/src/app/styles/Header.scss new file mode 100644 index 00000000..d287abc6 --- /dev/null +++ b/react/src/app/styles/Header.scss @@ -0,0 +1,108 @@ +.cthub-banner { + background-color: $banner-blue; + display: flex; + border-bottom: 3px solid $border-orange; + flex-direction: row; + justify-content: space-between; + align-items: center; + padding-left: 6rem; + padding-right: 6rem; + } + + .cthub-banner .left, + .cthub-banner .right { + display: flex; + flex-direction: row; + align-items: center; + font-size: 1.25rem; + a { + color: $white; + text-decoration: none; + font-weight: bold; + } + img { + height: 4.5rem; + width: auto; + } + .logout { + color: $white; + font-size: 1rem; + .logoutButton { + cursor: pointer; + border: none; + background: none; + font: inherit; + color: inherit; + } + } + } + + .page-header { + background-color: $banner-blue; + width: 100%; + //height: 191px; + + .title { + padding-left: 6rem; + h1 { + color: $white; + } + } + } + + @media (max-width: 992px) { + .cthub-banner { + font-weight: normal; + flex-direction: column; + img { + margin-left: 1rem; + height: 3rem; + } + .logout { + font-size: 0.75rem; + } + } + .page-header { + width: 100%; + position: relative; + margin: 0; + height: 180px; + .title { + margin: 5px 0 0 1rem; + padding-left: 0; + h1 { + font-size: 2rem; + } + } + } + } + @media (max-width: 688px) { + .page-header { + .title { + h1 { + font-size: 1.75rem; + } + } + } + } + @media (max-width: 380px) { + .page-header { + a { + font-size: 10pt; + } + .title { + h1 { + font-size: 1.5rem; + } + } + } + } + @media (max-width: 330px) { + .page-header { + .title { + h1 { + font-size: 1.2rem; + } + } + } + } \ No newline at end of file diff --git a/react/src/app/styles/index.scss b/react/src/app/styles/index.scss index 1a7bf42a..5845cf87 100644 --- a/react/src/app/styles/index.scss +++ b/react/src/app/styles/index.scss @@ -1,6 +1,9 @@ +@import 'variables.scss'; @import 'App.scss'; @import 'Login.scss'; @import 'ReactTable.scss'; @import 'FileUpload.scss'; @import 'Roboto.scss'; @import 'Users.scss'; +@import 'Header.scss'; +@import 'Footer.scss'; diff --git a/react/src/app/styles/variables.scss b/react/src/app/styles/variables.scss new file mode 100644 index 00000000..3d386ad6 --- /dev/null +++ b/react/src/app/styles/variables.scss @@ -0,0 +1,20 @@ +// Variables for colors // +$text-grey-paragraph: rgb(96, 96, 96); + +$border-orange: #fcbc19; + +$white: #ffffff; +$black: #000000; + +$background-grey-light: #edebeb; +$grey-dark: #8c847c; + +$button-blue: #003155; +$banner-blue: #003366; + +$default-link-blue: #1a5a96; +$default-background-grey: #f2f2f2; + +$background-light-blue: #e7f4f7; + +$table-border: rgba(49, 49, 50, 0.33); \ No newline at end of file From 3ea275e8227d7fe1f8ca2778cc68b2dbf4aad2d8 Mon Sep 17 00:00:00 2001 From: tim738745 <98717409+tim738745@users.noreply.github.com> Date: Thu, 14 Mar 2024 09:48:50 -0700 Subject: [PATCH 056/152] feat: 210 - page refinements + cleanup (#223) --- README.md | 10 +++++ django/api/decorators/permission.py | 3 ++ django/api/serializers/user.py | 47 +++++++++++--------- django/api/services/generic.py | 8 ++++ django/api/services/permissions.py | 22 ++++++++- django/api/services/user.py | 34 +++++++------- django/api/viewsets/user.py | 29 ++++++------ react/src/app/styles/App.scss | 5 +++ react/src/app/styles/FileUpload.scss | 6 +++ react/src/uploads/UploadContainer.js | 5 ++- react/src/uploads/components/FileDrop.js | 6 ++- react/src/uploads/components/FileDropArea.js | 3 ++ react/src/uploads/components/UploadPage.js | 10 ++++- react/src/users/UsersContainer.js | 2 +- react/src/users/components/UsersPage.js | 2 +- react/src/users/routes.js | 2 +- 16 files changed, 130 insertions(+), 64 deletions(-) create mode 100644 django/api/services/generic.py diff --git a/README.md b/README.md index 9cfa11f6..948af573 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,16 @@ The Clean Transportation Data Hub provides an evidence base for the Clean Transp - This is where you can make changes to your package.json - You can technically make changes to your packages without going into your container, but you'll need npm installed into your system +# Rebasing Guide +- To rebase your branch onto the latest release branch: + - ```git fetch upstream``` + - ```git checkout your_branch``` + - ```git rebase --onto A B``` + - Where `upstream` is the remote containing the release branch, and `A` is the hash of the latest commit to the release branch, and `B` is the hash of the commit in `your_branch` such that every commit after `B` ought to be rebased onto the release branch. + - If you run into conflicts while rebasing, you can resolve them in your IDE, and `git add` the resolved changes before finishing the rebase (committing). + - The rebased commits will have different hashes than the old ones, so if you previously pushed `your_branch` to a remote you will have to `git push --force` in order not to end up with additional commits in your remote branch. + - On Github, you can modify the base branch of a PR if you're rebasing from a branch based on a previous release branch to the latest release branch. + # License The code is a fork from Richard's personal project. Please do not clone, copy or replicate this project unless you're authorized to do so. diff --git a/django/api/decorators/permission.py b/django/api/decorators/permission.py index a681f4e9..beb030bc 100644 --- a/django/api/decorators/permission.py +++ b/django/api/decorators/permission.py @@ -1,6 +1,9 @@ from rest_framework.response import Response from rest_framework import status from api.services.permissions import create_permission_list +from api.models.permission import Permission +from api.models.user import User +from api.models.user_permission import UserPermission def check_upload_permission(): def wrapper(func): diff --git a/django/api/serializers/user.py b/django/api/serializers/user.py index f4ffc6a6..93946f77 100644 --- a/django/api/serializers/user.py +++ b/django/api/serializers/user.py @@ -2,11 +2,11 @@ Further reading: https://www.django-rest-framework.org/api-guide/serializers/ """ -from rest_framework.serializers import ModelSerializer, SerializerMethodField +from rest_framework.serializers import ModelSerializer, SerializerMethodField, ValidationError from api.models.user import User from api.models.user_permission import UserPermission -from api.serializers.permission import PermissionSerializer +from api.services.permissions import get_permissions_representation class UserSerializer(ModelSerializer): """ @@ -15,30 +15,33 @@ class UserSerializer(ModelSerializer): user_permissions = SerializerMethodField() def get_user_permissions(self, obj): - user_permission = UserPermission.objects.filter(user_id=obj.id) - permissions = PermissionSerializer(user_permission, read_only=True, many=True) - admin = False - uploader = False - for i in permissions.data: - if i['description'] == 'admin': - admin = True - if i['description'] == 'uploader': - uploader = True - - return {'admin': admin, 'uploader': uploader} + user_permission = UserPermission.objects.select_related("permission").filter(user_id=obj.id) + permissions = [] + for each in user_permission: + permissions.append(each.permission) + return get_permissions_representation(permissions) + + def validate_idir(self, value): + if isinstance(value, str) and value.strip(): + return value.strip().upper() + raise ValidationError("IDIR error!") + + def create(self, validated_data): + return User.objects.create(**validated_data) class Meta: model = User fields = ('idir', 'user_permissions') +# requires permissions_map object +class UserListSerializer(ModelSerializer): + user_permissions = SerializerMethodField() -class UserSaveSerializer(ModelSerializer): - def update(self, instance, validated_data): - request = self.context.get('request') - permissions = validated_data.pop('permissions') - print(request) - print(permissions) - #check if user exists, if not add them + def get_user_permissions(self, obj): + permissions_map = self.context.get("permissions_map") + permissions = permissions_map.get(obj) + return get_permissions_representation(permissions) - #update user_permissions - + class Meta: + model = User + fields = ('idir', 'user_permissions') diff --git a/django/api/services/generic.py b/django/api/services/generic.py new file mode 100644 index 00000000..86f46562 --- /dev/null +++ b/django/api/services/generic.py @@ -0,0 +1,8 @@ +# gets a map of specified model values to model instances +def get_objects_map(qs, key_field): + result = {} + for object in qs: + key = getattr(object, key_field, None) + if key: + result[key] = object + return result diff --git a/django/api/services/permissions.py b/django/api/services/permissions.py index 1c0d583a..fa597983 100644 --- a/django/api/services/permissions.py +++ b/django/api/services/permissions.py @@ -10,4 +10,24 @@ def create_permission_list(user): for each in user_permission: permission = Permission.objects.get(id=each.permission_id) permissions.append(permission.description) - return permissions \ No newline at end of file + return permissions + + +def get_permissions_map(users): + result = {} + user_permissions = UserPermission.objects.select_related("user", "permission").filter(user__in=users) + for each in user_permissions: + user = each.user + permission = each.permission + if not user in result: + result[user] = [] + result[user].append(permission) + return result + + +def get_permissions_representation(permissions): + result = {} + if permissions is not None: + for permission in permissions: + result[permission.description] = True + return result \ No newline at end of file diff --git a/django/api/services/user.py b/django/api/services/user.py index 520934be..f1c62f37 100644 --- a/django/api/services/user.py +++ b/django/api/services/user.py @@ -2,22 +2,24 @@ from api.models.user_permission import UserPermission from api.models.permission import Permission from api.models.user import User +from api.services.generic import get_objects_map + +# this deletes all records in user_permission, and adds the new ones @transaction.atomic -def update_permissions(self, request): - msg = [] - permissions = Permission.objects.all() +def update_permissions(user_permissions): + permissions_map = get_objects_map(Permission.objects.all(), "description") + users_map = get_objects_map(User.objects.all(), "idir") UserPermission.objects.all().delete() - for each in request.data: - for k, v in each.items(): - if k == 'idir': - user = User.objects.get(idir=v) - if k == 'user_permissions': - for permission_description, value in v.items(): - if value == True or (user.idir == request.user and permission_description == 'admin'): - ## if they are updating permissions then they are already admin user, they cannot remove their own admin - permission = permissions.get(description=permission_description) - try: - UserPermission.objects.create(user_id=user.id, permission_id=permission.id) - except Exception as error: - msg.append("{} permission could not be added to {}".format(permission_description, user.idir)) \ No newline at end of file + user_permissions_to_add = [] + for each in user_permissions: + idir = each["idir"] + permissions = each["user_permissions"] + user = users_map.get(idir) + permission_objects = [] + for description, value in permissions.items(): + if value == True: + permission_objects.append(permissions_map.get(description)) + for permission_object in permission_objects: + user_permissions_to_add.append(UserPermission(user=user, permission=permission_object)) + UserPermission.objects.bulk_create(user_permissions_to_add) diff --git a/django/api/viewsets/user.py b/django/api/viewsets/user.py index 7ee41019..cfc47995 100644 --- a/django/api/viewsets/user.py +++ b/django/api/viewsets/user.py @@ -4,12 +4,14 @@ from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.viewsets import GenericViewSet +from rest_framework.mixins import CreateModelMixin from api.models.user import User -from api.serializers.user import UserSerializer +from api.serializers.user import UserSerializer, UserListSerializer from api.decorators.permission import check_admin_permission from api.services.user import update_permissions +from api.services.permissions import get_permissions_map -class UserViewSet(GenericViewSet): +class UserViewSet(GenericViewSet, CreateModelMixin): """ This viewset automatically provides `list`, `create`, `retrieve`, and `update` actions. @@ -30,24 +32,19 @@ def get_serializer_class(self): return self.serializer_classes['default'] - @action(detail=False, methods=['post']) @method_decorator(check_admin_permission()) - def new(self, request): - user_to_insert = request.data['idir'].upper() - try: - User.objects.get_or_create(idir=user_to_insert) - return Response(user_to_insert, status=status.HTTP_200_OK) - except Exception as e: - return Response({"response": str(e)}, status=status.HTTP_400_BAD_REQUEST) + def create(self, request): + return super().create(request) @action(detail=False, methods=['put']) @method_decorator(check_admin_permission()) def update_permissions(self, request): - error_msg = update_permissions(self, request) - if error_msg: - return Response(error_msg, status=status.HTTP_400_BAD_REQUEST) - else: - return Response('User permissions were updated!', status=status.HTTP_201_CREATED) + user_permissions = request.data + try: + update_permissions(user_permissions) + except Exception as e: + return Response(str(e), status=status.HTTP_400_BAD_REQUEST) + return Response('User permissions were updated!', status=status.HTTP_201_CREATED) @action(detail=False) def current(self, request): @@ -61,5 +58,5 @@ def current(self, request): @method_decorator(check_admin_permission()) def list(self, request): users = User.objects.all().order_by('idir') - serializer = UserSerializer(users, many=True) + serializer = UserListSerializer(users, many=True, context={"permissions_map": get_permissions_map(users)}) return Response(serializer.data) \ No newline at end of file diff --git a/react/src/app/styles/App.scss b/react/src/app/styles/App.scss index 7081c66a..925937f8 100644 --- a/react/src/app/styles/App.scss +++ b/react/src/app/styles/App.scss @@ -96,6 +96,11 @@ h4 { .text-button { color: #1a5a96 !important; text-decoration: underline !important; + text-transform: none !important; +} + +.button-lowercase { + text-transform: none !important; } .layout { diff --git a/react/src/app/styles/FileUpload.scss b/react/src/app/styles/FileUpload.scss index e9226119..29c37000 100644 --- a/react/src/app/styles/FileUpload.scss +++ b/react/src/app/styles/FileUpload.scss @@ -40,3 +40,9 @@ padding-bottom: 1rem; margin-top: 2rem; } + +.file-upload { + &.disabled { + background-color: $default-background-grey; + } +} diff --git a/react/src/uploads/UploadContainer.js b/react/src/uploads/UploadContainer.js index ea935acb..4acac51a 100644 --- a/react/src/uploads/UploadContainer.js +++ b/react/src/uploads/UploadContainer.js @@ -115,11 +115,11 @@ const UploadContainer = () => { return } + const alertElement = alert && alertContent && alertSeverity ? {alertContent} : null + return (
- {alert && alertContent && alertSeverity - && {alertContent}} {open && ( { { const { + disabled, setFiles, setAlert, } = props; @@ -16,10 +17,11 @@ const FileDrop = (props) => { setFiles(files); }, []); const { getRootProps, getInputProps } = useDropzone({ onDrop }); + const uploadBoxClassNames = disabled ? "file-upload disabled" : "file-upload" return (
- -
+ +

Drag and Drop files here or diff --git a/react/src/uploads/components/FileDropArea.js b/react/src/uploads/components/FileDropArea.js index 62e2cc7a..680a088f 100644 --- a/react/src/uploads/components/FileDropArea.js +++ b/react/src/uploads/components/FileDropArea.js @@ -7,6 +7,7 @@ import getFileSize from '../../app/utilities/getFileSize'; const FileDropArea = (props) => { const { + disabled, setUploadFiles, uploadFiles, setAlert, @@ -49,6 +50,7 @@ const FileDropArea = (props) => {
@@ -75,6 +77,7 @@ const FileDropArea = (props) => { ); }; FileDropArea.propTypes = { + disabled: PropTypes.bool.isRequired, setUploadFiles: PropTypes.func.isRequired, uploadFiles: PropTypes.arrayOf(PropTypes.shape()).isRequired, setAlert: PropTypes.func.isRequired, diff --git a/react/src/uploads/components/UploadPage.js b/react/src/uploads/components/UploadPage.js index e3bc9919..21b2fccd 100644 --- a/react/src/uploads/components/UploadPage.js +++ b/react/src/uploads/components/UploadPage.js @@ -8,6 +8,7 @@ import FileDropArea from './FileDropArea'; const UploadPage = (props) => { const { + alertElement, datasetList, datasetSelected, doUpload, @@ -28,6 +29,7 @@ const UploadPage = (props) => { <>

Upload Program Data

+ {alertElement}

@@ -41,7 +43,7 @@ const UploadPage = (props) => { > {selectionList} - +

@@ -53,6 +55,7 @@ const UploadPage = (props) => { onChange={handleRadioChange} > @@ -60,6 +63,7 @@ const UploadPage = (props) => { label="Replace existing data" /> } label="Add to existing data" @@ -69,6 +73,7 @@ const UploadPage = (props) => {
{ + + + + + @@ -84,6 +90,11 @@ const UsersPage = (props) => { ); }; +UsersPage.defaultProps = { + newUser: '', + setMessage: '', +}; + UsersPage.propTypes = { users: PropTypes.arrayOf(PropTypes.shape()).isRequired, handleAddNewUser: PropTypes.func.isRequired, @@ -91,5 +102,7 @@ UsersPage.propTypes = { handleCheckboxChange: PropTypes.func.isRequired, handleSubmitPermissionUpdates: PropTypes.func.isRequired, currentUser: PropTypes.string.isRequired, + newUser: PropTypes.string, + setMessage: PropTypes.string, }; export default UsersPage; From 58d5470047cad21208d5a2fb85dcbce1481ba6e7 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 11:04:18 -0700 Subject: [PATCH 058/152] update frontend build --- .github/workflows/dev-ci.yaml | 78 +++++++-------- .../frontend/frontend-bc-docker.yaml | 95 +++++++++++++++++++ react/Dockerfile-Openshift | 22 +++++ react/nginx.conf | 64 +++++++++++++ 4 files changed, 220 insertions(+), 39 deletions(-) create mode 100644 openshift/templates/frontend/frontend-bc-docker.yaml create mode 100644 react/Dockerfile-Openshift create mode 100644 react/nginx.conf diff --git a/.github/workflows/dev-ci.yaml b/.github/workflows/dev-ci.yaml index a62484c5..8c76132c 100644 --- a/.github/workflows/dev-ci.yaml +++ b/.github/workflows/dev-ci.yaml @@ -57,56 +57,56 @@ jobs: insecure_skip_tls_verify: true namespace: ${{ env.TOOLS_NAMESPACE }} - - name: Build CTHUB Backend - run: | - cd openshift/templates/backend - oc process -f ./backend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} - sleep 5s - oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 - oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} + # - name: Build CTHUB Backend + # run: | + # cd openshift/templates/backend + # oc process -f ./backend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + # sleep 5s + # oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 + # oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - name: Build CTHUB Frontend run: | cd openshift/templates/frontend - oc process -f ./frontend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + oc process -f ./frontend-bc-docker.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} sleep 5s oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-frontend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - deploy: + # deploy: - name: Deploy CTHUB on Dev - runs-on: ubuntu-latest - timeout-minutes: 60 - needs: [set-pre-release, build] + # name: Deploy CTHUB on Dev + # runs-on: ubuntu-latest + # timeout-minutes: 60 + # needs: [set-pre-release, build] - env: - PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} + # env: + # PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} - steps: + # steps: - - name: Checkout Manifest repository - uses: actions/checkout@v4.1.1 - with: - repository: bcgov-c/tenant-gitops-30b186 - ref: main - ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} + # - name: Checkout Manifest repository + # uses: actions/checkout@v4.1.1 + # with: + # repository: bcgov-c/tenant-gitops-30b186 + # ref: main + # ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} - - name: Update frontend tag - uses: mikefarah/yq@v4.40.5 - with: - cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml - - - name: Update backend tag - uses: mikefarah/yq@v4.40.5 - with: - cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml - - - name: GitHub Commit & Push - run: | - git config --global user.email "actions@github.com" - git config --global user.name "GitHub Actions" - git add cthub/values-dev.yaml - git commit -m "update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }}" - git push + # - name: Update frontend tag + # uses: mikefarah/yq@v4.40.5 + # with: + # cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + + # - name: Update backend tag + # uses: mikefarah/yq@v4.40.5 + # with: + # cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + + # - name: GitHub Commit & Push + # run: | + # git config --global user.email "actions@github.com" + # git config --global user.name "GitHub Actions" + # git add cthub/values-dev.yaml + # git commit -m "update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }}" + # git push \ No newline at end of file diff --git a/openshift/templates/frontend/frontend-bc-docker.yaml b/openshift/templates/frontend/frontend-bc-docker.yaml new file mode 100644 index 00000000..7d99def2 --- /dev/null +++ b/openshift/templates/frontend/frontend-bc-docker.yaml @@ -0,0 +1,95 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: frontend +parameters: + - name: NAME + displayName: + description: the module name entered when run yo bcdk:pipeline, which is zeva + required: true + - name: SUFFIX + displayName: + description: sample is -pr-0 + required: true + - name: VERSION + displayName: + description: image tag name for output + required: true + - name: GIT_URL + displayName: + description: cthub repo + required: true + - name: GIT_REF + displayName: + description: cthub branch name of the pr + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the client / front end image + labels: + shared: "true" + creationTimestamp: null + name: ${NAME}-frontend + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: "" +- apiVersion: build.openshift.io/v1 + kind: BuildConfig + metadata: + creationTimestamp: null + name: ${NAME}-frontend${SUFFIX} + spec: + failedBuildsHistoryLimit: 5 + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: ${NAME}-frontend:${VERSION} + postCommit: {} + resources: + limits: + cpu: 2000m + memory: 4Gi + requests: + cpu: 500m + memory: 2Gi + runPolicy: Serial + source: + git: + ref: ${GIT_REF} + uri: ${GIT_URL} + type: Git + contextDir: react + strategy: + sourceStrategy: + from: + kind: ImageStreamTag + name: nodejs-20:1-34 + namespace: 30b186-tools + forcePull: true + noCache: true + env: + - name: ARTIFACTORY_USER + valueFrom: + secretKeyRef: + name: artifacts-default-idxprm + key: username + - name: ARTIFACTORY_PASSWORD + valueFrom: + secretKeyRef: + name: artifacts-default-idxprm + key: password + type: Source + successfulBuildsHistoryLimit: 5 + triggers: + - imageChange: {} + type: ImageChange + - type: ConfigChange + status: + lastVersion: 0 \ No newline at end of file diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift new file mode 100644 index 00000000..2c087f33 --- /dev/null +++ b/react/Dockerfile-Openshift @@ -0,0 +1,22 @@ +# Stage 1: Use yarn to build the app +# FROM artifacts.developer.gov.bc.ca/docker-remote/node:20 as builder +# WORKDIR /usr/src/app +# COPY ./ ./ +# RUN npm install -g npm@9.1.1 \ +# && npm install --omit=dev \ +# && npm install -D webpack webpack-cli +# RUN yes | npm run dist +FROM artifacts.developer.gov.bc.ca/docker-remote/node:20 as builder +ENV NODE_ENV=production +WORKDIR /usr/src/app +COPY ./ ./ +RUN npm install --omit=dev && \ + npm run build + +# Stage 2: Copy the JS React SPA into the Nginx HTML directory +FROM artifacts.developer.gov.bc.ca/docker-remote/bitnami/nginx:1.24.0 +COPY ./nginx.conf /opt/bitnami/nginx/conf/ +COPY --from=builder /usr/src/app/public/build /app +EXPOSE 8080 +CMD ["nginx", "-g", "daemon off;"] + diff --git a/react/nginx.conf b/react/nginx.conf new file mode 100644 index 00000000..7895946f --- /dev/null +++ b/react/nginx.conf @@ -0,0 +1,64 @@ +# Based on https://www.nginx.com/resources/wiki/start/topics/examples/full/#nginx-conf +# user www www; ## Default: nobody + +worker_processes auto; +error_log "/opt/bitnami/nginx/logs/error.log"; +pid "/opt/bitnami/nginx/tmp/nginx.pid"; + +events { + worker_connections 1024; +} + +http { + include mime.types; + default_type application/octet-stream; + log_format main '$remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + access_log "/opt/bitnami/nginx/logs/access.log" main; + add_header X-Frame-Options SAMEORIGIN; + + client_body_temp_path "/opt/bitnami/nginx/tmp/client_body" 1 2; + proxy_temp_path "/opt/bitnami/nginx/tmp/proxy" 1 2; + fastcgi_temp_path "/opt/bitnami/nginx/tmp/fastcgi" 1 2; + scgi_temp_path "/opt/bitnami/nginx/tmp/scgi" 1 2; + uwsgi_temp_path "/opt/bitnami/nginx/tmp/uwsgi" 1 2; + + sendfile on; + tcp_nopush on; + tcp_nodelay off; + gzip on; + gzip_http_version 1.0; + gzip_comp_level 2; + gzip_proxied any; + gzip_types text/plain text/css application/javascript text/xml application/xml+rss; + keepalive_timeout 65; + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305; + client_max_body_size 80M; + server_tokens off; + + absolute_redirect off; + port_in_redirect off; + + include "/opt/bitnami/nginx/conf/server_blocks/*.conf"; + + # HTTP Server + server { + # Port to listen on, can also be set in IP:PORT format + listen 8080; + + include "/opt/bitnami/nginx/conf/bitnami/*.conf"; + + location / { + try_files $uri /index.html; + } + + location /status { + stub_status on; + access_log off; + allow 127.0.0.1; + deny all; + } + } +} \ No newline at end of file From 825c20b6d848d5d8c918f2e8939501f9a32367c6 Mon Sep 17 00:00:00 2001 From: tim738745 <98717409+tim738745@users.noreply.github.com> Date: Thu, 14 Mar 2024 11:17:47 -0700 Subject: [PATCH 059/152] feat: 210 - do not display "download spreadsheet" when no dataset selected (#227) --- react/src/uploads/components/UploadPage.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/react/src/uploads/components/UploadPage.js b/react/src/uploads/components/UploadPage.js index 21b2fccd..bf5240d8 100644 --- a/react/src/uploads/components/UploadPage.js +++ b/react/src/uploads/components/UploadPage.js @@ -43,7 +43,7 @@ const UploadPage = (props) => { > {selectionList} - + {datasetSelected && }
From 353f044ee2490a36fb8932528632edd026ece44a Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 11:32:22 -0700 Subject: [PATCH 060/152] list the build foders --- react/Dockerfile-Openshift | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index 2c087f33..17d5e18d 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -11,7 +11,10 @@ ENV NODE_ENV=production WORKDIR /usr/src/app COPY ./ ./ RUN npm install --omit=dev && \ - npm run build + npm run build && \ + ls -l && \ + tree + # Stage 2: Copy the JS React SPA into the Nginx HTML directory FROM artifacts.developer.gov.bc.ca/docker-remote/bitnami/nginx:1.24.0 From 0a0a870824fbedcc0ad38f6f74580b9e6f670a0f Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 11:37:23 -0700 Subject: [PATCH 061/152] update to docker strategy --- .../templates/frontend/frontend-bc-docker.yaml | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/openshift/templates/frontend/frontend-bc-docker.yaml b/openshift/templates/frontend/frontend-bc-docker.yaml index 7d99def2..e0b40b9c 100644 --- a/openshift/templates/frontend/frontend-bc-docker.yaml +++ b/openshift/templates/frontend/frontend-bc-docker.yaml @@ -67,13 +67,8 @@ objects: type: Git contextDir: react strategy: - sourceStrategy: - from: - kind: ImageStreamTag - name: nodejs-20:1-34 - namespace: 30b186-tools - forcePull: true - noCache: true + dockerStrategy: + dockerfilePath: ./Dockerfile-openshift env: - name: ARTIFACTORY_USER valueFrom: @@ -84,8 +79,8 @@ objects: valueFrom: secretKeyRef: name: artifacts-default-idxprm - key: password - type: Source + key: password + type: Docker successfulBuildsHistoryLimit: 5 triggers: - imageChange: {} From daf9116d77131a5388023011458a08bbe2a9fd53 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 11:42:10 -0700 Subject: [PATCH 062/152] update Dockerfile-Openshift file name --- openshift/templates/frontend/frontend-bc-docker.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openshift/templates/frontend/frontend-bc-docker.yaml b/openshift/templates/frontend/frontend-bc-docker.yaml index e0b40b9c..8b93b0dc 100644 --- a/openshift/templates/frontend/frontend-bc-docker.yaml +++ b/openshift/templates/frontend/frontend-bc-docker.yaml @@ -68,7 +68,7 @@ objects: contextDir: react strategy: dockerStrategy: - dockerfilePath: ./Dockerfile-openshift + dockerfilePath: ./Dockerfile-Openshift env: - name: ARTIFACTORY_USER valueFrom: From 027e565bca653a94427c69a4461553fe7231717e Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 11:46:17 -0700 Subject: [PATCH 063/152] udpate docker build --- react/Dockerfile-Openshift | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index 17d5e18d..e4f3e958 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -11,7 +11,9 @@ ENV NODE_ENV=production WORKDIR /usr/src/app COPY ./ ./ RUN npm install --omit=dev && \ - npm run build && \ + npm install -D webpack webpack-cli && \ + yes | npm run build +RUN pwd && \ ls -l && \ tree From dffb50e5d22c2634957b32b2825f0207ce4ee20e Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 11:59:26 -0700 Subject: [PATCH 064/152] upgrade npm --- react/Dockerfile-Openshift | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index e4f3e958..5930ffbc 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -10,9 +10,10 @@ FROM artifacts.developer.gov.bc.ca/docker-remote/node:20 as builder ENV NODE_ENV=production WORKDIR /usr/src/app COPY ./ ./ -RUN npm install --omit=dev && \ - npm install -D webpack webpack-cli && \ - yes | npm run build +RUN npm install -g npm@latest +RUN npm install --omit=dev +RUN npm install -D webpack-cli +RUN yes | npm run build RUN pwd && \ ls -l && \ tree From 5c84544e7f00da8c96bb0ad96b3bb8e0b8abe520 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 12:03:42 -0700 Subject: [PATCH 065/152] add yes to insall webpack-cli --- react/Dockerfile-Openshift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index 5930ffbc..acbd6569 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -12,7 +12,7 @@ WORKDIR /usr/src/app COPY ./ ./ RUN npm install -g npm@latest RUN npm install --omit=dev -RUN npm install -D webpack-cli +RUN yes | npm install -D webpack-cli RUN yes | npm run build RUN pwd && \ ls -l && \ From 96f18faf0307c190935c935baa221112b434f4d1 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 13:29:04 -0700 Subject: [PATCH 066/152] install webpack client globally --- react/Dockerfile-Openshift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index acbd6569..c47889d6 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -12,7 +12,7 @@ WORKDIR /usr/src/app COPY ./ ./ RUN npm install -g npm@latest RUN npm install --omit=dev -RUN yes | npm install -D webpack-cli +RUN yes | npm install -g webpack-cli RUN yes | npm run build RUN pwd && \ ls -l && \ From dadcc2f0657262bd6cdf317543e41ec01620a4a2 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 13:36:51 -0700 Subject: [PATCH 067/152] move webpack to dev dependencies --- react/Dockerfile-Openshift | 2 +- react/package.json | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index c47889d6..cd67bf7d 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -12,7 +12,7 @@ WORKDIR /usr/src/app COPY ./ ./ RUN npm install -g npm@latest RUN npm install --omit=dev -RUN yes | npm install -g webpack-cli +RUN yes | npm install -g webpack webpack-cli RUN yes | npm run build RUN pwd && \ ls -l && \ diff --git a/react/package.json b/react/package.json index 32b09dc4..e0a1661c 100644 --- a/react/package.json +++ b/react/package.json @@ -23,9 +23,7 @@ "react-table": "^7.7.0", "regenerator-runtime": "^0.13.9", "stream-browserify": "^3.0.0", - "util": "^0.12.4", - "webpack": "^5.60.0", - "webpack-dev-server": "^3.11.2" + "util": "^0.12.4" }, "devDependencies": { "@babel/core": "^7.15.8", @@ -49,7 +47,9 @@ "source-map-loader": "^2.0.2", "style-loader": "^2.0.0", "url-loader": "^4.1.1", - "webpack-cli": "^4.9.1" + "webpack-cli": "^4.9.1", + "webpack": "^5.60.0", + "webpack-dev-server": "^3.11.2" }, "scripts": { "start": "node start", From 2ec07d88b223b12788c3123bbcc1cd337f22b43e Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 13:47:25 -0700 Subject: [PATCH 068/152] try to fin modules --- react/Dockerfile-Openshift | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index cd67bf7d..b9051bb4 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -13,6 +13,10 @@ COPY ./ ./ RUN npm install -g npm@latest RUN npm install --omit=dev RUN yes | npm install -g webpack webpack-cli +RUN pwd && \ + ls -l && \ + ls -l node_modules && \ + ls -l /usr/local/lib/node_modules RUN yes | npm run build RUN pwd && \ ls -l && \ From a1e9a3bd324ba99e86dd188a9a28586a3d0c9b02 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 13:54:44 -0700 Subject: [PATCH 069/152] install webpack as dev dependency --- react/Dockerfile-Openshift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index b9051bb4..cce2c09c 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -12,7 +12,7 @@ WORKDIR /usr/src/app COPY ./ ./ RUN npm install -g npm@latest RUN npm install --omit=dev -RUN yes | npm install -g webpack webpack-cli +RUN yes | npm install -D webpack webpack-cli RUN pwd && \ ls -l && \ ls -l node_modules && \ From b3a9428007fbd1c9a01156b18bc2189af921b612 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 14:00:07 -0700 Subject: [PATCH 070/152] update NODE_PATH --- react/Dockerfile-Openshift | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index cce2c09c..7216f682 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -12,7 +12,9 @@ WORKDIR /usr/src/app COPY ./ ./ RUN npm install -g npm@latest RUN npm install --omit=dev -RUN yes | npm install -D webpack webpack-cli +RUN yes | npm install -g webpack webpack-cli +RUN echo $NODE_PATH +RUN export NODE_PATH="/usr/local/lib/node_modules:$NODE_PATH" RUN pwd && \ ls -l && \ ls -l node_modules && \ From 710d004addffabf0685665ab08c1c187ec26876e Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 14:05:52 -0700 Subject: [PATCH 071/152] set ENV NODE_PATH --- react/Dockerfile-Openshift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index 7216f682..1aa71843 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -8,13 +8,13 @@ # RUN yes | npm run dist FROM artifacts.developer.gov.bc.ca/docker-remote/node:20 as builder ENV NODE_ENV=production +ENV NODE_PATH=/usr/local/lib/node_modules:/usr/src/app/node_modules WORKDIR /usr/src/app COPY ./ ./ RUN npm install -g npm@latest RUN npm install --omit=dev RUN yes | npm install -g webpack webpack-cli RUN echo $NODE_PATH -RUN export NODE_PATH="/usr/local/lib/node_modules:$NODE_PATH" RUN pwd && \ ls -l && \ ls -l node_modules && \ From eb71f73ace404b3092fd39c6e91299bbb6500c7d Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 14:13:27 -0700 Subject: [PATCH 072/152] install copy-webpack-plugin --- react/Dockerfile-Openshift | 2 +- react/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index 1aa71843..f4aba0c9 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -13,7 +13,7 @@ WORKDIR /usr/src/app COPY ./ ./ RUN npm install -g npm@latest RUN npm install --omit=dev -RUN yes | npm install -g webpack webpack-cli +RUN yes | npm install -g webpack webpack-cli copy-webpack-plugin RUN echo $NODE_PATH RUN pwd && \ ls -l && \ diff --git a/react/package.json b/react/package.json index e0a1661c..44edfd37 100644 --- a/react/package.json +++ b/react/package.json @@ -49,7 +49,7 @@ "url-loader": "^4.1.1", "webpack-cli": "^4.9.1", "webpack": "^5.60.0", - "webpack-dev-server": "^3.11.2" + "webpack-dev-server": "^3.11.2" }, "scripts": { "start": "node start", From aa37e7cd7ea7d5b2b8a82e50d9c3034bd80f1d1d Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 14:21:15 -0700 Subject: [PATCH 073/152] install all modules --- react/Dockerfile-Openshift | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index f4aba0c9..4ad4d28d 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -8,22 +8,20 @@ # RUN yes | npm run dist FROM artifacts.developer.gov.bc.ca/docker-remote/node:20 as builder ENV NODE_ENV=production -ENV NODE_PATH=/usr/local/lib/node_modules:/usr/src/app/node_modules +# ENV NODE_PATH=/usr/local/lib/node_modules:/usr/src/app/node_modules WORKDIR /usr/src/app COPY ./ ./ RUN npm install -g npm@latest -RUN npm install --omit=dev -RUN yes | npm install -g webpack webpack-cli copy-webpack-plugin +RUN npm install +# RUN yes | npm install -g webpack webpack-cli copy-webpack-plugin RUN echo $NODE_PATH RUN pwd && \ ls -l && \ - ls -l node_modules && \ - ls -l /usr/local/lib/node_modules + ls -l node_modules + # ls -l /usr/local/lib/node_modules RUN yes | npm run build RUN pwd && \ - ls -l && \ - tree - + ls -l # Stage 2: Copy the JS React SPA into the Nginx HTML directory FROM artifacts.developer.gov.bc.ca/docker-remote/bitnami/nginx:1.24.0 From 99fa3aaa2b96d1aac09447a4df06391dbdcdfc3e Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 14:28:55 -0700 Subject: [PATCH 074/152] install all dev dependencies with -g --- react/Dockerfile-Openshift | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index 4ad4d28d..0d7785cc 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -8,17 +8,17 @@ # RUN yes | npm run dist FROM artifacts.developer.gov.bc.ca/docker-remote/node:20 as builder ENV NODE_ENV=production -# ENV NODE_PATH=/usr/local/lib/node_modules:/usr/src/app/node_modules +ENV NODE_PATH=/usr/local/lib/node_modules:/usr/src/app/node_modules WORKDIR /usr/src/app COPY ./ ./ RUN npm install -g npm@latest RUN npm install -# RUN yes | npm install -g webpack webpack-cli copy-webpack-plugin +RUN yes | npm install -g --only=dev RUN echo $NODE_PATH RUN pwd && \ ls -l && \ - ls -l node_modules - # ls -l /usr/local/lib/node_modules + ls -l node_modules && \ + ls -l /usr/local/lib/node_modules RUN yes | npm run build RUN pwd && \ ls -l From 5c00681a8db1328f2be9cb64aac878c5bee933f4 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 14:34:58 -0700 Subject: [PATCH 075/152] move NODE_ENV right before run build --- react/Dockerfile-Openshift | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index 0d7785cc..cf7fb9b3 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -7,18 +7,18 @@ # && npm install -D webpack webpack-cli # RUN yes | npm run dist FROM artifacts.developer.gov.bc.ca/docker-remote/node:20 as builder -ENV NODE_ENV=production -ENV NODE_PATH=/usr/local/lib/node_modules:/usr/src/app/node_modules WORKDIR /usr/src/app COPY ./ ./ RUN npm install -g npm@latest RUN npm install -RUN yes | npm install -g --only=dev +# RUN yes | npm install -g --only=dev RUN echo $NODE_PATH RUN pwd && \ ls -l && \ ls -l node_modules && \ ls -l /usr/local/lib/node_modules +ENV NODE_ENV=production +ENV NODE_PATH=/usr/local/lib/node_modules:/usr/src/app/node_modules RUN yes | npm run build RUN pwd && \ ls -l From 3bd731f695b4ea70416a22081aea29d5865ae7a9 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 14:46:13 -0700 Subject: [PATCH 076/152] copy the right folder --- react/Dockerfile-Openshift | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index cf7fb9b3..4cd9406c 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -11,22 +11,16 @@ WORKDIR /usr/src/app COPY ./ ./ RUN npm install -g npm@latest RUN npm install -# RUN yes | npm install -g --only=dev -RUN echo $NODE_PATH RUN pwd && \ ls -l && \ - ls -l node_modules && \ - ls -l /usr/local/lib/node_modules -ENV NODE_ENV=production -ENV NODE_PATH=/usr/local/lib/node_modules:/usr/src/app/node_modules -RUN yes | npm run build -RUN pwd && \ - ls -l + ls -l node_modules +RUN NODE_ENV=production | npm run build # Stage 2: Copy the JS React SPA into the Nginx HTML directory FROM artifacts.developer.gov.bc.ca/docker-remote/bitnami/nginx:1.24.0 COPY ./nginx.conf /opt/bitnami/nginx/conf/ -COPY --from=builder /usr/src/app/public/build /app +COPY --from=builder /usr/src/app/public /app +RUN ls -l /app EXPOSE 8080 CMD ["nginx", "-g", "daemon off;"] From c8a6b6a1b48a3cc2d0a40717e38790927a5cf843 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 14 Mar 2024 14:51:18 -0700 Subject: [PATCH 077/152] remove tekton --- .tekton/README.md | 7 -- .tekton/build-cthub-pr.yaml | 27 ------- .tekton/build-cthub.yaml | 74 ------------------- .tekton/cat-branch-readme.yaml | 76 ------------------- .tekton/cthub-trigger-tb.yaml | 12 --- .tekton/cthub-trigger-tt.yaml | 41 ----------- .tekton/cthub-trigger.yaml | 12 --- .tekton/deploy-cthub.yaml | 59 --------------- .tekton/deployment/frontend-dc.yaml | 106 --------------------------- .tekton/persistent_volume_claim.yaml | 10 --- .tekton/shared-data.yaml | 10 --- .tekton/tasks/apply-manifests.yaml | 25 ------- .tekton/tasks/fetch-repository.yaml | 37 ---------- 13 files changed, 496 deletions(-) delete mode 100644 .tekton/README.md delete mode 100644 .tekton/build-cthub-pr.yaml delete mode 100644 .tekton/build-cthub.yaml delete mode 100644 .tekton/cat-branch-readme.yaml delete mode 100644 .tekton/cthub-trigger-tb.yaml delete mode 100644 .tekton/cthub-trigger-tt.yaml delete mode 100644 .tekton/cthub-trigger.yaml delete mode 100644 .tekton/deploy-cthub.yaml delete mode 100644 .tekton/deployment/frontend-dc.yaml delete mode 100644 .tekton/persistent_volume_claim.yaml delete mode 100644 .tekton/shared-data.yaml delete mode 100644 .tekton/tasks/apply-manifests.yaml delete mode 100644 .tekton/tasks/fetch-repository.yaml diff --git a/.tekton/README.md b/.tekton/README.md deleted file mode 100644 index feb211f2..00000000 --- a/.tekton/README.md +++ /dev/null @@ -1,7 +0,0 @@ -tkn pipeline start build-cthub \ --w name=shared-data,volumeClaimTemplateFile=./shared-data.yaml \ --p repo-url=https://github.com/bcgov/cthub.git \ --p branch=tekton-0.1.0 \ --p frontend-image=image-registry.openshift-image-registry.svc:5000/30b186-tools/cthub-frontend:frontendtekton \ --p backend-image=image-registry.openshift-image-registry.svc:5000/30b186-tools/cthub-backend:backendtekton \ ---use-param-defaults \ No newline at end of file diff --git a/.tekton/build-cthub-pr.yaml b/.tekton/build-cthub-pr.yaml deleted file mode 100644 index 1711048d..00000000 --- a/.tekton/build-cthub-pr.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: tekton.dev/v1beta1 -kind: PipelineRun -metadata: - name: build-cthub-pr -spec: - pipelineRef: - name: build-cthub - params: - - name: repo-url - value: 'https://github.com/bcgov/cthub.git' - - name: branch - value: tekton3-0.1.0 - - name: frontend-image - value: >- - image-registry.openshift-image-registry.svc:5000/30b186-tools/cthub-frontend:frontendtekton - - name: backend-image - value: >- - image-registry.openshift-image-registry.svc:5000/30b186-tools/cthub-backend:backendtekton - workspaces: - - name: shared-data - volumeClaimTemplate: - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 500Mi \ No newline at end of file diff --git a/.tekton/build-cthub.yaml b/.tekton/build-cthub.yaml deleted file mode 100644 index b63a7ef7..00000000 --- a/.tekton/build-cthub.yaml +++ /dev/null @@ -1,74 +0,0 @@ -apiVersion: tekton.dev/v1beta1 -kind: Pipeline -metadata: - name: build-cthub - namespace: 30b186-tools -spec: - params: - - description: the git repo url - name: repo-url - type: string - - description: the git branch name - name: branch - type: string - - description: where the frontend image push to - name: frontend-image - type: string - - description: where the backend image push to - name: backend-image - type: string - tasks: - - name: git-clone - params: - - name: url - value: $(params.repo-url) - - name: revision - value: $(params.branch) - taskRef: - kind: ClusterTask - name: git-clone - workspaces: - - name: output - workspace: shared-data - - name: show-dir - runAfter: - - git-clone - taskRef: - kind: Task - name: show-dir - workspaces: - - name: source - workspace: shared-data - # - name: build-backend - # params: - # - name: PATH_CONTEXT - # value: /workspace/source/django - # - name: IMAGE - # value: $(params.backend-image) - # runAfter: - # - show-dir - # taskRef: - # kind: ClusterTask - # name: s2i-python - # workspaces: - # - name: source - # workspace: shared-data - # - name: build-frontend - # params: - # - name: PATH_CONTEXT - # value: /workspace/source/react - # - name: IMAGE - # value: $(params.frontend-image) - # runAfter: - # - show-dir - # taskRef: - # kind: ClusterTask - # name: s2i-nodejs - # workspaces: - # - name: source - # workspace: shared-data - workspaces: - - description: | - This workspace will receive the cloned git repo and be passed - to the next Task for the repo's README.md file to be read. - name: shared-data diff --git a/.tekton/cat-branch-readme.yaml b/.tekton/cat-branch-readme.yaml deleted file mode 100644 index acd9b9ba..00000000 --- a/.tekton/cat-branch-readme.yaml +++ /dev/null @@ -1,76 +0,0 @@ -apiVersion: tekton.dev/v1beta1 -kind: Pipeline -metadata: - name: cat-branch-readme -spec: - description: | - cat-branch-readme takes a git repository and a branch name and - prints the README.md file from that branch. This is an example - Pipeline demonstrating the following: - - Using the git-clone catalog Task to clone a branch - - Passing a cloned repo to subsequent Tasks using a Workspace. - - Ordering Tasks in a Pipeline using "runAfter" so that - git-clone completes before we try to read from the Workspace. - - Using a volumeClaimTemplate Volume as a Workspace. - - Avoiding hard-coded paths by using a Workspace's path - variable instead. - params: - - name: repo-url - type: string - description: The git repository URL to clone from. - - name: branch-name - type: string - description: The git branch to clone. - workspaces: - - name: shared-data - description: | - This workspace will receive the cloned git repo and be passed - to the next Task for the repo's README.md file to be read. - tasks: - - name: fetch-repo - taskRef: - kind: ClusterTask - name: git-clone - workspaces: - - name: output - workspace: shared-data - params: - - name: url - value: $(params.repo-url) - - name: revision - value: $(params.branch-name) - - name: cat-readme - runAfter: ["fetch-repo"] # Wait until the clone is done before reading the readme. - workspaces: - - name: source - workspace: shared-data - taskSpec: - workspaces: - - name: source - steps: - - image: zshusers/zsh:4.3.15 - script: | - #!/usr/bin/env zsh - cat $(workspaces.source.path)/README.md ---- -apiVersion: tekton.dev/v1beta1 -kind: PipelineRun -metadata: - name: git-clone-checking-out-a-branch -spec: - pipelineRef: - name: cat-branch-readme - workspaces: - - name: shared-data - volumeClaimTemplate: - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - params: - - name: repo-url - value: https://github.com/bcgov/cthub.git - - name: branch-name - value: main \ No newline at end of file diff --git a/.tekton/cthub-trigger-tb.yaml b/.tekton/cthub-trigger-tb.yaml deleted file mode 100644 index 914d1a8a..00000000 --- a/.tekton/cthub-trigger-tb.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: triggers.tekton.dev/v1beta1 -kind: TriggerBinding -metadata: - name: cthub-trigger-tb -spec: - params: - - name: repo-url - value: $(body.repository.url) - - name: branch - value: $(body.head_commit.id) - - name: repo-name - value: $(body.repository.name) diff --git a/.tekton/cthub-trigger-tt.yaml b/.tekton/cthub-trigger-tt.yaml deleted file mode 100644 index 1ef7e5e9..00000000 --- a/.tekton/cthub-trigger-tt.yaml +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: triggers.tekton.dev/v1alpha1 -kind: TriggerTemplate -metadata: - name: cthub-trigger-tt -spec: - params: - - description: the git repo url - name: repo-url - - description: the git branch name - name: branch - - description: the git repo url - name: repo-name - resourcetemplates: - - apiVersion: tekton.dev/v1beta1 - kind: PipelineRun - metadata: - generateName: build-cthub-$(tt.params.repo-name)- - spec: - serviceAccountName: pipeline - pipelineRef: - name: build-cthub - params: - - name: repo-url - value: $(tt.params.repo-url) - - name: branch - value: $(tt.params.branch) - - name: frontend-image - value: >- - image-registry.openshift-image-registry.svc:5000/30b186-tools/cthub-frontend:frontendtekton - - name: backend-image - value: >- - image-registry.openshift-image-registry.svc:5000/30b186-tools/cthub-backend:backendtekton - workspaces: - - name: shared-data - volumeClaimTemplate: - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 500Mi \ No newline at end of file diff --git a/.tekton/cthub-trigger.yaml b/.tekton/cthub-trigger.yaml deleted file mode 100644 index 914d1a8a..00000000 --- a/.tekton/cthub-trigger.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: triggers.tekton.dev/v1beta1 -kind: TriggerBinding -metadata: - name: cthub-trigger-tb -spec: - params: - - name: repo-url - value: $(body.repository.url) - - name: branch - value: $(body.head_commit.id) - - name: repo-name - value: $(body.repository.name) diff --git a/.tekton/deploy-cthub.yaml b/.tekton/deploy-cthub.yaml deleted file mode 100644 index bdc7e13d..00000000 --- a/.tekton/deploy-cthub.yaml +++ /dev/null @@ -1,59 +0,0 @@ -apiVersion: tekton.dev/v1beta1 -kind: Pipeline -metadata: - name: deploy-cthub - namespace: 30b186-tools -spec: - params: - - description: the git repo url, https://github.com/bcgov/cthub.git - name: repo-url - type: string - default: https://github.com/bcgov/cthub.git - - description: the git branch name - name: branch - type: string - default: tekton-0.1.0 - - description: where the frontend image push to - name: frontend-image - type: string - default: 'image-registry.openshift-image-registry.svc:5000/30b186-tools/cthub-frontend:frontendtekton' - - description: where the backend image push to - name: backend-image - type: string - default: 'image-registry.openshift-image-registry.svc:5000/30b186-tools/cthub-backend:backendtekton' - - description: The namespace to be deployed on - name: namespace - type: string - default: 30b186-dev - tasks: - - name: git-clone - params: - - name: url - value: $(params.repo-url) - - name: revision - value: $(params.branch) - taskRef: - kind: ClusterTask - name: git-clone - workspaces: - - name: output - workspace: shared-data - - name: apply-manifests - runAfter: - - git-clone - params: - - name: manifest_dir - value: .tekton/deployment/frontend-dc.yaml - - name: namespace - value: $(params.namespace) - taskRef: - kind: Task - name: apply-manifests - workspaces: - - name: source - workspace: shared-data - workspaces: - - description: | - This workspace will receive the cloned git repo and be passed - to the next Task for the repo's README.md file to be read. - name: shared-data diff --git a/.tekton/deployment/frontend-dc.yaml b/.tekton/deployment/frontend-dc.yaml deleted file mode 100644 index 517b3919..00000000 --- a/.tekton/deployment/frontend-dc.yaml +++ /dev/null @@ -1,106 +0,0 @@ -apiVersion: apps.openshift.io/v1 -kind: DeploymentConfig -metadata: - name: cthub-frontend-tekton - annotations: - description: Defines how to deploy the frontend application - creationTimestamp: null -parameters: - - name: FRONTEND_IMAGE_TAG - required: true -spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - name: cthub-frontend-tekton - strategy: - activeDeadlineSeconds: 21600 - recreateParams: - timeoutSeconds: 600 - resources: {} - type: Recreate - template: - metadata: - creationTimestamp: null - labels: - name: cthub-frontend-tekton - spec: - containers: - - name: frontend - env: - - name: API_BASE - value: "https://cthub-backend-tekton.apps.silver.devops.gov.bc.ca" - - name: ENABLE_KEYCLOAK - value: "true" - - name: KEYCLOAK_CLIENT_ID - valueFrom: - secretKeyRef: - name: cthub-keycloak - key: KEYCLOAK_CLIENT_ID - - name: KEYCLOAK_REALM - valueFrom: - secretKeyRef: - name: cthub-keycloak - key: KEYCLOAK_REALM - - name: KEYCLOAK_URL - valueFrom: - secretKeyRef: - name: cthub-keycloak - key: KEYCLOAK_URL - image: - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 10 - initialDelaySeconds: 50 - periodSeconds: 10 - successThreshold: 1 - tcpSocket: - port: 3000 - timeoutSeconds: 3 - ports: - - containerPort: 3000 - protocol: TCP - - containerPort: 5002 - protocol: TCP - readinessProbe: - failureThreshold: 10 - initialDelaySeconds: 40 - periodSeconds: 10 - successThreshold: 1 - tcpSocket: - port: 3000 - timeoutSeconds: 3 - resources: - requests: - cpu: 100m - memory: 300Mi - limits: - cpu: 300m - memory: 400Mi - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - terminationGracePeriodSeconds: 30 - test: false - triggers: - - imageChangeParams: - automatic: true - containerNames: - - frontend - from: - kind: ImageStreamTag - namspace: 30b186-tools - name: cthub-frontend:${FRONTEND_IMAGE_TAG} - lastTriggeredImage: - type: ImageChange - - type: ConfigChange -status: - availableReplicas: 0 - latestVersion: 0 - observedGeneration: 0 - replicas: 0 - unavailableReplicas: 0 - updatedReplicas: 0 diff --git a/.tekton/persistent_volume_claim.yaml b/.tekton/persistent_volume_claim.yaml deleted file mode 100644 index 88de4df9..00000000 --- a/.tekton/persistent_volume_claim.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: source-pvc -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 500Mi \ No newline at end of file diff --git a/.tekton/shared-data.yaml b/.tekton/shared-data.yaml deleted file mode 100644 index e464c99f..00000000 --- a/.tekton/shared-data.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: shared-data -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi \ No newline at end of file diff --git a/.tekton/tasks/apply-manifests.yaml b/.tekton/tasks/apply-manifests.yaml deleted file mode 100644 index 20390ba8..00000000 --- a/.tekton/tasks/apply-manifests.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: tekton.dev/v1beta1 -kind: Task -metadata: - name: apply-manifests -spec: - workspaces: - - name: source - params: - - name: manifest_dir - description: The directory in source that contains yaml manifests - type: string - default: "k8s" - - name: namespace - description: The namespace - type: string - steps: - - name: apply - image: image-registry.openshift-image-registry.svc:5000/openshift/cli:latest - workingDir: /workspace/source - command: ["/bin/bash", "-c"] - args: - - |- - echo Applying manifests in $(inputs.params.manifest_dir) directory - oc apply -f $(inputs.params.manifest_dir) -n $(inputs.params.namespace) - echo ----------------------------------- \ No newline at end of file diff --git a/.tekton/tasks/fetch-repository.yaml b/.tekton/tasks/fetch-repository.yaml deleted file mode 100644 index 54669257..00000000 --- a/.tekton/tasks/fetch-repository.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: tekton.dev/v1beta1 -kind: Task -metadata: - creationTimestamp: '2021-10-09T23:45:57Z' - name: fetch_repository - namespace: 30b186-tools -spec: - params: - - description: url of the git repo for the code of deployment - name: git-url - type: string - - default: master - description: revision to be used from repo of the code for deployment - name: git-revision - type: string - - description: image to be build from the code - name: IMAGE - type: string - tasks: - - name: fetch-repository - params: - - name: url - value: $(params.git-url) - - name: subdirectory - value: '' - - name: deleteExisting - value: 'true' - - name: revision - value: $(params.git-revision) - - name: sslVerify - value: 'false' - taskRef: - kind: ClusterTask - name: git-clone - workspaces: - - name: output - workspace: shared-workspace \ No newline at end of file From 2d2465cbb876a30d2315ce1f33cb74b01483672a Mon Sep 17 00:00:00 2001 From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Date: Fri, 15 Mar 2024 13:10:54 -0700 Subject: [PATCH 078/152] Backend build 0.2.0 (#251) --- .github/workflows/dev-ci.yaml | 14 +- django/.s2i/bin/assemble | 137 -------------------- django/.s2i/environment | 1 - openshift/templates/backend/backend-bc.yaml | 16 +-- react/.s2i/bin/assemble | 128 ------------------ 5 files changed, 8 insertions(+), 288 deletions(-) delete mode 100755 django/.s2i/bin/assemble delete mode 100644 django/.s2i/environment delete mode 100644 react/.s2i/bin/assemble diff --git a/.github/workflows/dev-ci.yaml b/.github/workflows/dev-ci.yaml index 8c76132c..fceba5ba 100644 --- a/.github/workflows/dev-ci.yaml +++ b/.github/workflows/dev-ci.yaml @@ -57,13 +57,13 @@ jobs: insecure_skip_tls_verify: true namespace: ${{ env.TOOLS_NAMESPACE }} - # - name: Build CTHUB Backend - # run: | - # cd openshift/templates/backend - # oc process -f ./backend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} - # sleep 5s - # oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 - # oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} + - name: Build CTHUB Backend + run: | + cd openshift/templates/backend + oc process -f ./backend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + sleep 5s + oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 + oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - name: Build CTHUB Frontend run: | diff --git a/django/.s2i/bin/assemble b/django/.s2i/bin/assemble deleted file mode 100755 index f1354418..00000000 --- a/django/.s2i/bin/assemble +++ /dev/null @@ -1,137 +0,0 @@ -#!/bin/bash - -function is_django_installed() { - python -c "import django" &>/dev/null -} - -function should_collectstatic() { - is_django_installed && [[ -z "$DISABLE_COLLECTSTATIC" ]] -} - -function virtualenv_bin() { - # New versions of Python (>3.6) should use venv module - # from stdlib instead of virtualenv package - python3.9 -m venv $1 -} - -# Install pipenv or micropipenv to the separate virtualenv to isolate it -# from system Python packages and packages in the main -# virtualenv. Executable is simlinked into ~/.local/bin -# to be accessible. This approach is inspired by pipsi -# (pip script installer). -function install_tool() { - echo "---> Installing $1 packaging tool ..." - VENV_DIR=$HOME/.local/venvs/$1 - virtualenv_bin "$VENV_DIR" - # First, try to install the tool without --isolated which means that if you - # have your own PyPI mirror, it will take it from there. If this try fails, try it - # again with --isolated which ignores external pip settings (env vars, config file) - # and installs the tool from PyPI (needs internet connetion). - # $1$2 combines package name with [extras] or version specifier if is defined as $2``` - if ! $VENV_DIR/bin/pip install -U $1$2; then - echo "WARNING: Installation of $1 failed, trying again from official PyPI with pip --isolated install" - $VENV_DIR/bin/pip install --isolated -U $1$2 # Combines package name with [extras] or version specifier if is defined as $2``` - fi - mkdir -p $HOME/.local/bin - ln -s $VENV_DIR/bin/$1 $HOME/.local/bin/$1 -} - -set -e - -# First of all, check that we don't have disallowed combination of ENVs -if [[ ! -z "$ENABLE_PIPENV" && ! -z "$ENABLE_MICROPIPENV" ]]; then - echo "ERROR: Pipenv and micropipenv cannot be enabled at the same time!" - # podman/buildah does not relay this exit code but it will be fixed hopefuly - # https://github.com/containers/buildah/issues/2305 - exit 3 -fi - -shopt -s dotglob -echo "---> Installing application source ..." -mv /tmp/src/* "$HOME" - -# set permissions for any installed artifacts -fix-permissions /opt/app-root -P - -# We have to first upgrade pip to at least 19.3 because: -# * pip < 9 does not support different packages' versions for Python 2/3 -# * pip < 19.3 does not support manylinux2014 wheels. Only manylinux2014 wheels -# support platforms like ppc64le, aarch64 or armv7 -echo "---> Upgrading pip to version 19.3.1 ..." -if ! pip install -U "pip==19.3.1"; then - echo "WARNING: Installation of 'pip==19.3.1' failed, trying again from official PyPI with pip --isolated install" - pip install --isolated -U "pip==19.3.1" -fi - -if [[ ! -z "$UPGRADE_PIP_TO_LATEST" ]]; then - echo "---> Upgrading pip to latest version ..." - if ! pip install -U pip setuptools wheel; then - echo "WARNING: Installation of the latest pip,setuptools and wheel failed, trying again from official PyPI with pip --isolated install" - pip install --isolated -U pip setuptools wheel - fi -fi - -if [[ ! -z "$ENABLE_PIPENV" ]]; then - if [[ ! -z "$PIN_PIPENV_VERSION" ]]; then - # Add == as a prefix to pipenv version, if defined - PIN_PIPENV_VERSION="==$PIN_PIPENV_VERSION" - fi - install_tool "pipenv" "$PIN_PIPENV_VERSION" - echo "---> Installing dependencies via pipenv ..." - if [[ -f Pipfile ]]; then - pipenv install --deploy - elif [[ -f requirements.txt ]]; then - pipenv install -r requirements.txt - fi - # pipenv check -elif [[ ! -z "$ENABLE_MICROPIPENV" ]]; then - install_tool "micropipenv" "[toml]" - echo "---> Installing dependencies via micropipenv ..." - # micropipenv detects Pipfile.lock and requirements.txt in this order - micropipenv install --deploy -elif [[ -f requirements.txt ]]; then - if [[ -z "${ARTIFACTORY_USER}" ]]; then - echo "---> Installing dependencies from external repo ..." - pip install -r requirements.txt - else - echo "---> Installing dependencies from artifactory ..." - pip install -i https://$ARTIFACTORY_USER:$ARTIFACTORY_PASSWORD@artifacts.developer.gov.bc.ca/artifactory/api/pypi/pypi-remote/simple -r requirements.txt - fi -fi - -if [[ -f setup.py && -z "$DISABLE_SETUP_PY_PROCESSING" ]]; then - echo "---> Installing application ..." - pip install . -fi - -if should_collectstatic; then - ( - echo "---> Collecting Django static files ..." - - APP_HOME=$(readlink -f "${APP_HOME:-.}") - # Change the working directory to APP_HOME - PYTHONPATH="$(pwd)${PYTHONPATH:+:$PYTHONPATH}" - cd "$APP_HOME" - - # Look for 'manage.py' in the current directory - manage_file=./manage.py - - if [[ ! -f "$manage_file" ]]; then - echo "WARNING: seems that you're using Django, but we could not find a 'manage.py' file." - echo "'manage.py collectstatic' ignored." - exit - fi - - if ! python $manage_file collectstatic --dry-run --noinput &> /dev/null; then - echo "WARNING: could not run 'manage.py collectstatic'. To debug, run:" - echo " $ python $manage_file collectstatic --noinput" - echo "Ignore this warning if you're not serving static files with Django." - exit - fi - - python $manage_file collectstatic --noinput - ) -fi - -# set permissions for any installed artifacts -fix-permissions /opt/app-root -P \ No newline at end of file diff --git a/django/.s2i/environment b/django/.s2i/environment deleted file mode 100644 index 18f3b046..00000000 --- a/django/.s2i/environment +++ /dev/null @@ -1 +0,0 @@ -DISABLE_MIGRATE=1 diff --git a/openshift/templates/backend/backend-bc.yaml b/openshift/templates/backend/backend-bc.yaml index b2f2d07d..5299ee8b 100644 --- a/openshift/templates/backend/backend-bc.yaml +++ b/openshift/templates/backend/backend-bc.yaml @@ -25,20 +25,6 @@ parameters: description: cthub branch name of the pr required: true objects: - - apiVersion: image.openshift.io/v1 - kind: ImageStream - metadata: - annotations: - description: Keeps track of changes in the backend image - creationTimestamp: null - name: ${NAME}-python - labels: - shared: "true" - spec: - lookupPolicy: - local: false - status: - dockerImageRepository: "" - apiVersion: image.openshift.io/v1 kind: ImageStream metadata: @@ -100,7 +86,7 @@ objects: value: 'true' from: kind: ImageStreamTag - name: python-39:1-18.1634036280 + name: python-311:1-52 forcePull: true noCache: true type: Source diff --git a/react/.s2i/bin/assemble b/react/.s2i/bin/assemble deleted file mode 100644 index 87c16bb1..00000000 --- a/react/.s2i/bin/assemble +++ /dev/null @@ -1,128 +0,0 @@ -#!/bin/bash - -# Prevent running assemble in builders different than official STI image. -# The official nodejs:8-onbuild already run npm install and use different -# application folder. -[ -d "/usr/src/app" ] && exit 0 - -set -e - -# FIXME: Linking of global modules is disabled for now as it causes npm failures -# under RHEL7 -# Global modules good to have -# npmgl=$(grep "^\s*[^#\s]" ../etc/npm_global_module_list | sort -u) -# Available global modules; only match top-level npm packages -#global_modules=$(npm ls -g 2> /dev/null | perl -ne 'print "$1\n" if /^\S+\s(\S+)\@[\d\.-]+/' | sort -u) -# List all modules in common -#module_list=$(/usr/bin/comm -12 <(echo "${global_modules}") | tr '\n' ' ') -# Link the modules -#npm link $module_list - -safeLogging () { - if [[ $1 =~ http[s]?://.*@.*$ ]]; then - echo $1 | sed 's/^.*@/redacted@/' - else - echo $1 - fi -} - -shopt -s dotglob -if [ -d /tmp/artifacts ] && [ "$(ls /tmp/artifacts/ 2>/dev/null)" ]; then - echo "---> Restoring previous build artifacts ..." - mv -T --verbose /tmp/artifacts/node_modules "${HOME}/node_modules" -fi - -echo "---> Installing application source ..." -mv /tmp/src/* ./ - -# Fix source directory permissions -fix-permissions ./ - -if [ ! -z $HTTP_PROXY ]; then - echo "---> Setting npm http proxy to" $(safeLogging $HTTP_PROXY) - npm config set proxy $HTTP_PROXY -fi - -if [ ! -z $http_proxy ]; then - echo "---> Setting npm http proxy to" $(safeLogging $http_proxy) - npm config set proxy $http_proxy -fi - -if [ ! -z $HTTPS_PROXY ]; then - echo "---> Setting npm https proxy to" $(safeLogging $HTTPS_PROXY) - npm config set https-proxy $HTTPS_PROXY -fi - -if [ ! -z $https_proxy ]; then - echo "---> Setting npm https proxy to" $(safeLogging $https_proxy) - npm config set https-proxy $https_proxy -fi - -# Change the npm registry mirror if provided -if [ -n "$NPM_MIRROR" ]; then - npm config set registry $NPM_MIRROR -fi - -# Set the DEV_MODE to false by default. -if [ -z "$DEV_MODE" ]; then - export DEV_MODE=false -fi - -# If NODE_ENV is not set by the user, then NODE_ENV is determined by whether -# the container is run in development mode. -if [ -z "$NODE_ENV" ]; then - if [ "$DEV_MODE" == true ]; then - export NODE_ENV=development - else - export NODE_ENV=production - fi -fi - -if [ "$NODE_ENV" != "production" ]; then - - echo "---> Building your Node application from source" - npm install - -else - - echo "---> Have to set DEV_MODE and NODE_ENV to empty otherwise the deployment can not be started" - echo "---> It'll have error like can not resolve source-map-loader..." - export DEV_MODE="" - export NODE_ENV="" - - if [[ -z "${ARTIFACTORY_USER}" ]]; then - echo "---> Installing all dependencies from external repo" - else - echo "---> Installing all dependencies from Artifactory" - npm config set registry https://artifacts.developer.gov.bc.ca/artifactory/api/npm/npm-remote/ - curl -u $ARTIFACTORY_USER:$ARTIFACTORY_PASSWORD https://artifacts.developer.gov.bc.ca/artifactory/api/npm/auth >> ~/.npmrc - fi - - NODE_ENV=development npm install - - #do not fail when there is no build script - echo "---> Building in production mode" - npm run build --if-present - - echo "---> Pruning the development dependencies" - npm prune - - # Clear the npm's cache and tmp directories only if they are not a docker volumes - NPM_CACHE=$(npm config get cache) - if ! mountpoint $NPM_CACHE; then - echo "---> Cleaning the npm cache $NPM_CACHE" - #As of npm@5 even the 'npm cache clean --force' does not fully remove the cache directory - # instead of $NPM_CACHE* use $NPM_CACHE/*. - # We do not want to delete .npmrc file. - rm -rf "${NPM_CACHE:?}/" - fi - NPM_TMP=$(npm config get tmp) - if ! mountpoint $NPM_TMP; then - echo "---> Cleaning the $NPM_TMP/npm-*" - rm -rf $NPM_TMP/npm-* - fi - -fi - -# Fix source directory permissions -fix-permissions ./ From 7f76c063224fc51732f2eafabb9b794eb80e0b63 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Fri, 15 Mar 2024 14:32:37 -0700 Subject: [PATCH 079/152] disable django migration --- django/.s2i/environment | 1 + 1 file changed, 1 insertion(+) create mode 100644 django/.s2i/environment diff --git a/django/.s2i/environment b/django/.s2i/environment new file mode 100644 index 00000000..00417809 --- /dev/null +++ b/django/.s2i/environment @@ -0,0 +1 @@ +DISABLE_MIGRATE=1 \ No newline at end of file From 22ea4f4f947accd3bc103d905e9b7aaa100681c0 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Fri, 15 Mar 2024 14:36:00 -0700 Subject: [PATCH 080/152] open the CD --- .github/workflows/dev-ci.yaml | 62 +++++++++++++++++------------------ 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/.github/workflows/dev-ci.yaml b/.github/workflows/dev-ci.yaml index fceba5ba..bcc52797 100644 --- a/.github/workflows/dev-ci.yaml +++ b/.github/workflows/dev-ci.yaml @@ -73,40 +73,40 @@ jobs: oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-frontend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - # deploy: + deploy: - # name: Deploy CTHUB on Dev - # runs-on: ubuntu-latest - # timeout-minutes: 60 - # needs: [set-pre-release, build] + name: Deploy CTHUB on Dev + runs-on: ubuntu-latest + timeout-minutes: 60 + needs: [set-pre-release, build] - # env: - # PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} + env: + PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} - # steps: + steps: - # - name: Checkout Manifest repository - # uses: actions/checkout@v4.1.1 - # with: - # repository: bcgov-c/tenant-gitops-30b186 - # ref: main - # ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} + - name: Checkout Manifest repository + uses: actions/checkout@v4.1.1 + with: + repository: bcgov-c/tenant-gitops-30b186 + ref: main + ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} - # - name: Update frontend tag - # uses: mikefarah/yq@v4.40.5 - # with: - # cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml - - # - name: Update backend tag - # uses: mikefarah/yq@v4.40.5 - # with: - # cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml - - # - name: GitHub Commit & Push - # run: | - # git config --global user.email "actions@github.com" - # git config --global user.name "GitHub Actions" - # git add cthub/values-dev.yaml - # git commit -m "update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }}" - # git push + - name: Update frontend tag + uses: mikefarah/yq@v4.40.5 + with: + cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + + - name: Update backend tag + uses: mikefarah/yq@v4.40.5 + with: + cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + + - name: GitHub Commit & Push + run: | + git config --global user.email "actions@github.com" + git config --global user.name "GitHub Actions" + git add cthub/values-dev.yaml + git commit -m "update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }}" + git push \ No newline at end of file From 442a169ace98227f257277620ccc8d9efb91c0ff Mon Sep 17 00:00:00 2001 From: Emily <44536222+emi-hi@users.noreply.github.com> Date: Mon, 18 Mar 2024 10:08:27 -0700 Subject: [PATCH 081/152] feat: CTHUB 248 -Delete user (#253) * feat: turns x icon into buttton, adds empty handler function * feat: adds functionality for deleting users, reuses and slightly adjusts alert dialogue for confirming, adds checks on frontend and backend for ensuring user does not delete their own self from user table * fix: adds missed backend check * chore: makes minor changes to handle delelete user and delete viewset * refactor AlertDialog plus a few small changes --------- Co-authored-by: tim738745 --- django/api/viewsets/user.py | 17 +++-- react/src/app/components/AlertDialog.js | 47 ++++++++----- react/src/uploads/UploadContainer.js | 61 ++++++++-------- react/src/uploads/components/UploadPage.js | 10 ++- react/src/users/UsersContainer.js | 82 +++++++++++++++++----- react/src/users/components/UsersPage.js | 34 ++++++--- react/src/users/routes.js | 1 + 7 files changed, 164 insertions(+), 88 deletions(-) diff --git a/django/api/viewsets/user.py b/django/api/viewsets/user.py index cfc47995..326ff90e 100644 --- a/django/api/viewsets/user.py +++ b/django/api/viewsets/user.py @@ -4,21 +4,22 @@ from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.viewsets import GenericViewSet -from rest_framework.mixins import CreateModelMixin +from rest_framework.mixins import CreateModelMixin, DestroyModelMixin from api.models.user import User from api.serializers.user import UserSerializer, UserListSerializer from api.decorators.permission import check_admin_permission from api.services.user import update_permissions from api.services.permissions import get_permissions_map -class UserViewSet(GenericViewSet, CreateModelMixin): +class UserViewSet(GenericViewSet, CreateModelMixin, DestroyModelMixin): """ This viewset automatically provides `list`, `create`, `retrieve`, and `update` actions. """ permission_classes = (AllowAny,) - http_method_names = ['get', 'post', 'put', 'patch'] + http_method_names = ['get', 'post', 'put', 'patch', 'delete'] queryset = User.objects.all() + lookup_field = 'idir' serializer_classes = { 'default': UserSerializer, @@ -28,10 +29,16 @@ class UserViewSet(GenericViewSet, CreateModelMixin): def get_serializer_class(self): if self.action in list(self.serializer_classes.keys()): return self.serializer_classes[self.action] - return self.serializer_classes['default'] + @method_decorator(check_admin_permission()) + def destroy(self, request, idir=None): + if request.user == idir: + return Response('you cannot delete your own idir', status=status.HTTP_400_BAD_REQUEST ) + return super().destroy(self, request) + + @method_decorator(check_admin_permission()) def create(self, request): return super().create(request) @@ -44,7 +51,7 @@ def update_permissions(self, request): update_permissions(user_permissions) except Exception as e: return Response(str(e), status=status.HTTP_400_BAD_REQUEST) - return Response('User permissions were updated!', status=status.HTTP_201_CREATED) + return Response('User permissions were updated!', status=status.HTTP_200_OK) @action(detail=False) def current(self, request): diff --git a/react/src/app/components/AlertDialog.js b/react/src/app/components/AlertDialog.js index 63ac413e..c0b052c4 100644 --- a/react/src/app/components/AlertDialog.js +++ b/react/src/app/components/AlertDialog.js @@ -7,20 +7,27 @@ import DialogContent from '@mui/material/DialogContent'; import DialogContentText from '@mui/material/DialogContentText'; import DialogTitle from '@mui/material/DialogTitle'; -export default function AlertDialog(props) { +const AlertDialog = (props) => { const { - open, setOpen, rightButtonText, dialogue, leftButtonText, setReplaceData, title + open, + dialogue, + title, + cancelText, + handleCancel, + confirmText, + handleConfirm } = props; - const handleClose = (trueFalse) => { - setReplaceData(trueFalse); - setOpen(false); - }; + if (!open) { + return null + } return (
{ + handleCancel() + }} aria-labelledby="alert-dialog-title" aria-describedby="alert-dialog-description" > @@ -34,36 +41,38 @@ export default function AlertDialog(props) {
); } + AlertDialog.defaultProps = { - rightButtonText: '', dialogue: '', - leftButtonText: '', - setReplaceData: '', + title: '', }; AlertDialog.propTypes = { open: PropTypes.bool.isRequired, - setOpen: PropTypes.func.isRequired, - rightButtonText: PropTypes.string, + title: PropTypes.string, dialogue: PropTypes.string, - leftButtonText: PropTypes.string, - setReplaceData: PropTypes.func, + cancelText: PropTypes.string.isRequired, + handleCancel: PropTypes.func.isRequired, + confirmText: PropTypes.string.isRequired, + handleConfirm: PropTypes.func.isRequired }; + +export default AlertDialog diff --git a/react/src/uploads/UploadContainer.js b/react/src/uploads/UploadContainer.js index 4acac51a..32588bf0 100644 --- a/react/src/uploads/UploadContainer.js +++ b/react/src/uploads/UploadContainer.js @@ -16,26 +16,15 @@ const UploadContainer = () => { const [datasetList, setDatasetList] = useState([{}]); // holds the array of names of datasets const [loading, setLoading] = useState(false); const [datasetSelected, setDatasetSelected] = useState(''); // string identifying which dataset is being uploaded - const [replaceData, setReplaceData] = useState('false'); // if true, we will replace all + const [replaceData, setReplaceData] = useState(false); // if true, we will replace all const [alertContent, setAlertContent] = useState(); const [alert, setAlert] = useState(false); const [currentUser, setCurrentUser] = useState(''); const [alertSeverity, setAlertSeverity] = useState(''); - // existing data with what is being uploaded - const [open, setOpen] = useState(false); + const [openDialog, setOpenDialog] = useState(false); const [adminUser, setAdminUser] = useState(false); - const dialogue = 'Selecting replace will delete all previously uploaded records for this dataset'; - const leftButtonText = 'Cancel'; - const rightButtonText = 'Replace existing data'; - const handleRadioChange = (event) => { - const choice = event.target.value; - if (choice === 'true') { - setOpen(true); - } - setReplaceData(choice); - }; - const axios = useAxios() - const axiosDefault = useAxios(true) + const axios = useAxios(); + const axiosDefault = useAxios(true); const refreshList = () => { setLoading(true); @@ -88,9 +77,9 @@ const UploadContainer = () => { const downloadSpreadsheet = () => { axios.get(ROUTES_UPLOAD.DOWNLOAD_SPREADSHEET, { params: { - datasetSelected: datasetSelected + datasetSelected, }, - responseType: 'blob' + responseType: 'blob', }).then((response) => { const url = window.URL.createObjectURL(new Blob([response.data])); const link = document.createElement('a'); @@ -107,12 +96,30 @@ const UploadContainer = () => { }); }; + const handleRadioChange = (event) => { + const choice = event.target.value; + if (choice === 'replace') { + setOpenDialog(true); + } else { + setReplaceData(false); + } + }; + + const handleReplaceDataConfirm = () => { + setReplaceData(true) + setOpenDialog(false) + } + + const handleReplaceDataCancel = () => { + setOpenDialog(false) + } + useEffect(() => { refreshList(true); }, []); if (loading) { - return + return ; } const alertElement = alert && alertContent && alertSeverity ? {alertContent} : null @@ -120,21 +127,19 @@ const UploadContainer = () => { return (
- {open && ( - )} { -)} + value={"replace"} + control={} label="Replace existing data" /> } label="Add to existing data" /> diff --git a/react/src/users/UsersContainer.js b/react/src/users/UsersContainer.js index a20231b4..b9614b3c 100644 --- a/react/src/users/UsersContainer.js +++ b/react/src/users/UsersContainer.js @@ -1,21 +1,46 @@ import { withRouter } from 'react-router-dom'; import PropTypes from 'prop-types'; -import { CircularProgress, Alert } from '@mui/material'; +import { Alert } from '@mui/material'; import React, { useState, useEffect, useCallback } from 'react'; import { produce } from 'immer'; import ROUTES_USERS from './routes'; import UsersPage from './components/UsersPage'; import useAxios from '../app/utilities/useAxios'; +import AlertDialog from '../app/components/AlertDialog'; +import Loading from '../app/components/Loading'; const UsersContainer = (props) => { - const { currentUser } = props; + const { + currentUser, + } = props; + const [loading, setLoading] = useState(false); const [users, setUsers] = useState([]); const [newUser, setNewUser] = useState(''); const [message, setMessage] = useState(''); const [messageSeverity, setMessageSeverity] = useState(''); + const [userToDelete, setUserToDelete] = useState(''); + const [openDialog, setOpenDialog] = useState(false); const axios = useAxios(); + const handleAddNewUser = () => { + axios.post(ROUTES_USERS.CREATE, { idir: newUser }) + .then((response) => { + const userAdded = response.data.idir; + setMessageSeverity('success'); + setMessage(`${userAdded} was added to the user list`); + const userObject = { idir: userAdded, user_permissions: { admin: false, uploader: false } }; + setUsers( + produce((draft) => { + draft.push(userObject); + }), + ); + }) + .catch((error) => { + setMessageSeverity('error'); + setMessage('new user could not be added, sorry!'); + }); + }; const handleCheckboxChange = useCallback((event) => { setMessage(''); const idir = event.target.name; @@ -23,32 +48,45 @@ const UsersContainer = (props) => { const { checked } = event.target; setUsers( produce((draft) => { - const user = draft.find((user) => user.idir === idir); - user.user_permissions[permissionType] = checked; + const userToChange = draft.find((user) => user.idir === idir); + userToChange.user_permissions[permissionType] = checked; }), ); }, []); - const handleAddNewUser = () => { - axios.post(ROUTES_USERS.CREATE, { idir: newUser }) + const handleDeleteUserClick = (idir) => { + setUserToDelete(idir); + setOpenDialog(true); + } + + const handleDeleteUser = () => { + axios.delete(ROUTES_USERS.DETAILS.replace(/:id/g, userToDelete)) .then((response) => { - const userAdded = response.data.idir; setMessageSeverity('success'); - setMessage(`${userAdded} was added to the user list`); - const userObject = { idir: userAdded, user_permissions: { admin: false, uploader: false } }; + setMessage(`${userToDelete} was deleted from the user table`); setUsers( produce((draft) => { - draft.push(userObject); + const indexOfUserToRemove = draft.findIndex((user) => user.idir === userToDelete); + draft.splice(indexOfUserToRemove, 1); }), ); }) .catch((error) => { setMessageSeverity('error'); - setMessage('new user could not be added, sorry!'); + setMessage('something went wrong when deleting the user, sorry!'); + }) + .finally(() => { + setUserToDelete(''); + setOpenDialog(false) }); - }; + } + + const handleDeleteUserCancel = () => { + setUserToDelete(''); + setOpenDialog(false); + } - const handleSubmitPermissionUpdates = () => { + const handleSubmitUserUpdates = () => { axios.put(ROUTES_USERS.UPDATE, users) .then((response) => { setMessageSeverity('success'); @@ -69,24 +107,30 @@ const UsersContainer = (props) => { }, []); if (loading) { - return ( -
- -
- ); + return } return (
{message && {message}} +
); diff --git a/react/src/users/components/UsersPage.js b/react/src/users/components/UsersPage.js index 1d4f4c70..f35f7e3d 100644 --- a/react/src/users/components/UsersPage.js +++ b/react/src/users/components/UsersPage.js @@ -1,7 +1,7 @@ import React from 'react'; import PropTypes from 'prop-types'; import { - Box, Button, Grid, TextField, Checkbox, Tooltip, + Box, Button, Grid, TextField, Checkbox, Tooltip, IconButton, } from '@mui/material'; import ClearIcon from '@mui/icons-material/Clear'; import SaveIcon from '@mui/icons-material/Save'; @@ -13,20 +13,21 @@ const UsersPage = (props) => { handleAddNewUser, setNewUser, handleCheckboxChange, - handleSubmitPermissionUpdates, + handleSubmitUserUpdates, newUser, setMessage, + handleXClick, } = props; + const userRow = (user) => { const disableAdmin = currentUser === user.idir; return ( - - { handleCheckboxChange(event); }} /> + { handleCheckboxChange(event); }} /> - { handleCheckboxChange(event); }} /> + { handleCheckboxChange(event); }} /> @@ -34,11 +35,22 @@ const UsersPage = (props) => { {user.idir} - + + + { handleXClick(user.idir); }} + > + + + + ); }; + return ( <> @@ -57,7 +69,7 @@ const UsersPage = (props) => { { setNewUser(event.target.value); setMessage(''); }} /> - + @@ -92,7 +104,6 @@ const UsersPage = (props) => { }; UsersPage.defaultProps = { newUser: '', - setMessage: '', }; UsersPage.propTypes = { @@ -100,9 +111,10 @@ UsersPage.propTypes = { handleAddNewUser: PropTypes.func.isRequired, setNewUser: PropTypes.func.isRequired, handleCheckboxChange: PropTypes.func.isRequired, - handleSubmitPermissionUpdates: PropTypes.func.isRequired, + handleSubmitUserUpdates: PropTypes.func.isRequired, currentUser: PropTypes.string.isRequired, newUser: PropTypes.string, - setMessage: PropTypes.string, + setMessage: PropTypes.func.isRequired, + handleXClick: PropTypes.func.isRequired, }; export default UsersPage; diff --git a/react/src/users/routes.js b/react/src/users/routes.js index a084ec34..6cb6c026 100644 --- a/react/src/users/routes.js +++ b/react/src/users/routes.js @@ -5,6 +5,7 @@ const USERS = { CURRENT: `${API_BASE_PATH}/current`, CREATE: API_BASE_PATH, UPDATE: `${API_BASE_PATH}/update_permissions`, + DETAILS: `${API_BASE_PATH}/:id`, }; export default USERS; From 1fc06f799d37e170d89f915d6fe5c83c8ada554d Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 19 Mar 2024 11:48:14 -0700 Subject: [PATCH 082/152] update backend bind port 8080 --- django/gunicorn.cfg.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/django/gunicorn.cfg.py b/django/gunicorn.cfg.py index 1e50f170..f6b80dc1 100644 --- a/django/gunicorn.cfg.py +++ b/django/gunicorn.cfg.py @@ -43,7 +43,11 @@ # A positive integer. Generally set in the 1-5 seconds range. # -workers = 8 -timeout = 600 -graceful_timeout = 600 -keepalive = 5 +# workers = 8 +# timeout = 600 +# graceful_timeout = 600 +# keepalive = 5 + +bind = "0.0.0.0:8080" +workers = 2 +timeout = 600 \ No newline at end of file From 0d6c3689dc680b5b95f27b7e74c35b2eac711653 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 19 Mar 2024 15:06:14 -0700 Subject: [PATCH 083/152] update to use node 16 --- react/Dockerfile-Openshift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index 4cd9406c..49539dcf 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -6,7 +6,7 @@ # && npm install --omit=dev \ # && npm install -D webpack webpack-cli # RUN yes | npm run dist -FROM artifacts.developer.gov.bc.ca/docker-remote/node:20 as builder +FROM artifacts.developer.gov.bc.ca/docker-remote/node:16.13 as builder WORKDIR /usr/src/app COPY ./ ./ RUN npm install -g npm@latest From b1b3fc2b2b6abb5bd55b22ae34f353963cee7a9b Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 19 Mar 2024 15:30:07 -0700 Subject: [PATCH 084/152] not to install npm latest --- react/Dockerfile-Openshift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/react/Dockerfile-Openshift b/react/Dockerfile-Openshift index 49539dcf..04476d87 100644 --- a/react/Dockerfile-Openshift +++ b/react/Dockerfile-Openshift @@ -9,7 +9,7 @@ FROM artifacts.developer.gov.bc.ca/docker-remote/node:16.13 as builder WORKDIR /usr/src/app COPY ./ ./ -RUN npm install -g npm@latest +# RUN npm install -g npm@latest doesn't work for node 16 RUN npm install RUN pwd && \ ls -l && \ From 900b0d78d623b89b00c98f96b7846f27116280ef Mon Sep 17 00:00:00 2001 From: tim738745 <98717409+tim738745@users.noreply.github.com> Date: Fri, 22 Mar 2024 11:28:49 -0700 Subject: [PATCH 085/152] rebuild frontend using create-react-app (#259) --- .gitignore | 1 - docker-compose.yml | 14 +-- frontend/.dockerignore | 1 + frontend/.env | 5 + frontend/.gitignore | 23 ++++ {react => frontend}/Dockerfile | 4 +- {react => frontend}/Dockerfile-Openshift | 2 +- frontend/README.md | 70 +++++++++++ {react => frontend}/nginx.conf | 0 {react => frontend}/package.json | 56 +++++---- frontend/public/favicon.ico | Bin 0 -> 3870 bytes frontend/public/index.html | 47 ++++++++ frontend/public/logo192.png | Bin 0 -> 5347 bytes frontend/public/logo512.png | Bin 0 -> 9664 bytes frontend/public/manifest.json | 25 ++++ frontend/public/robots.txt | 3 + .../src/app/components/AlertDialog.js | 0 .../src/app/components}/App.js | 17 ++- .../src/app/components/Footer.js | 0 .../src/app/components/Header.js | 0 .../src/app/components/KeycloakProvider.js | 9 +- .../src/app/components/Layout.js | 0 .../src/app/components/Loading.js | 0 .../src/app/components}/Login.js | 2 +- .../src/app/components/Logout.js | 0 .../src/app/components/ReactTable.js | 0 .../app/components/ReactTablePagination.js | 0 {react => frontend}/src/app/static/logo.png | Bin {react => frontend}/src/app/styles/App.scss | 0 .../src/app/styles/FileUpload.scss | 0 .../src/app/styles/Footer.scss | 0 .../src/app/styles/Header.scss | 0 {react => frontend}/src/app/styles/Login.scss | 0 .../src/app/styles/ReactTable.scss | 0 .../src/app/styles/Roboto.scss | 0 {react => frontend}/src/app/styles/Users.scss | 0 .../src/app/styles/fonts/LICENSE.txt | 0 .../src/app/styles/fonts/Roboto-Bold.ttf | Bin .../app/styles/fonts/Roboto-BoldItalic.ttf | Bin .../src/app/styles/fonts/Roboto-Italic.ttf | Bin .../src/app/styles/fonts/Roboto-Regular.ttf | Bin .../src/app/styles/images/BCID_H_rgb_pos.png | Bin .../src/app/styles/images/BCID_H_rgb_rev.png | Bin .../src/app/styles/images/BCID_V_rgb_pos.png | Bin .../src/app/styles/images/tof.jpg | Bin {react => frontend}/src/app/styles/index.scss | 0 .../src/app/styles/variables.scss | 0 .../src/app/utilities/getFileSize.js | 0 .../src/app/utilities/props.js | 0 .../src/app/utilities/reactTable.js | 0 .../src/app/utilities/useAxios.js | 6 +- .../src/app/utilities/useKeycloak.js | 0 frontend/src/config.js | 19 +++ {react => frontend}/src/contexts.js | 0 .../src/dashboard/DashboardContainer.js | 0 {react => frontend}/src/dashboard/router.js | 0 .../src/icbc_data/IcbcDataContainer.js | 0 .../src/icbc_data/components/IcbcDataTable.js | 0 {react => frontend}/src/icbc_data/router.js | 0 {react => frontend}/src/icbc_data/routes.js | 0 {react => frontend}/src/index.js | 9 +- .../src/uploads/UploadContainer.js | 0 .../src/uploads/components/FileDrop.js | 0 .../src/uploads/components/FileDropArea.js | 0 .../src/uploads/components/UploadPage.js | 0 {react => frontend}/src/uploads/router.js | 0 {react => frontend}/src/uploads/routes.js | 0 .../src/users/UsersContainer.js | 0 .../src/users/components/UsersPage.js | 0 {react => frontend}/src/users/routes.js | 0 react/.eslintrc.js | 30 ----- react/README.md | 1 - react/public/favicon.ico | Bin 22486 -> 0 bytes react/public/index.html | 14 --- react/src/app/settings.js | 11 -- react/src/keycloak-source.json | 8 -- react/start.js | 32 ----- react/webpack.config.js | 114 ------------------ 78 files changed, 254 insertions(+), 269 deletions(-) create mode 100644 frontend/.dockerignore create mode 100644 frontend/.env create mode 100644 frontend/.gitignore rename {react => frontend}/Dockerfile (69%) rename {react => frontend}/Dockerfile-Openshift (95%) create mode 100644 frontend/README.md rename {react => frontend}/nginx.conf (100%) rename {react => frontend}/package.json (50%) create mode 100644 frontend/public/favicon.ico create mode 100644 frontend/public/index.html create mode 100644 frontend/public/logo192.png create mode 100644 frontend/public/logo512.png create mode 100644 frontend/public/manifest.json create mode 100644 frontend/public/robots.txt rename {react => frontend}/src/app/components/AlertDialog.js (100%) rename {react/src => frontend/src/app/components}/App.js (63%) rename {react => frontend}/src/app/components/Footer.js (100%) rename {react => frontend}/src/app/components/Header.js (100%) rename {react => frontend}/src/app/components/KeycloakProvider.js (72%) rename {react => frontend}/src/app/components/Layout.js (100%) rename {react => frontend}/src/app/components/Loading.js (100%) rename {react/src => frontend/src/app/components}/Login.js (94%) rename {react => frontend}/src/app/components/Logout.js (100%) rename {react => frontend}/src/app/components/ReactTable.js (100%) rename {react => frontend}/src/app/components/ReactTablePagination.js (100%) rename {react => frontend}/src/app/static/logo.png (100%) rename {react => frontend}/src/app/styles/App.scss (100%) rename {react => frontend}/src/app/styles/FileUpload.scss (100%) rename {react => frontend}/src/app/styles/Footer.scss (100%) rename {react => frontend}/src/app/styles/Header.scss (100%) rename {react => frontend}/src/app/styles/Login.scss (100%) rename {react => frontend}/src/app/styles/ReactTable.scss (100%) rename {react => frontend}/src/app/styles/Roboto.scss (100%) rename {react => frontend}/src/app/styles/Users.scss (100%) rename {react => frontend}/src/app/styles/fonts/LICENSE.txt (100%) rename {react => frontend}/src/app/styles/fonts/Roboto-Bold.ttf (100%) rename {react => frontend}/src/app/styles/fonts/Roboto-BoldItalic.ttf (100%) rename {react => frontend}/src/app/styles/fonts/Roboto-Italic.ttf (100%) rename {react => frontend}/src/app/styles/fonts/Roboto-Regular.ttf (100%) rename {react => frontend}/src/app/styles/images/BCID_H_rgb_pos.png (100%) rename {react => frontend}/src/app/styles/images/BCID_H_rgb_rev.png (100%) rename {react => frontend}/src/app/styles/images/BCID_V_rgb_pos.png (100%) rename {react => frontend}/src/app/styles/images/tof.jpg (100%) rename {react => frontend}/src/app/styles/index.scss (100%) rename {react => frontend}/src/app/styles/variables.scss (100%) rename {react => frontend}/src/app/utilities/getFileSize.js (100%) rename {react => frontend}/src/app/utilities/props.js (100%) rename {react => frontend}/src/app/utilities/reactTable.js (100%) rename {react => frontend}/src/app/utilities/useAxios.js (90%) rename {react => frontend}/src/app/utilities/useKeycloak.js (100%) create mode 100644 frontend/src/config.js rename {react => frontend}/src/contexts.js (100%) rename {react => frontend}/src/dashboard/DashboardContainer.js (100%) rename {react => frontend}/src/dashboard/router.js (100%) rename {react => frontend}/src/icbc_data/IcbcDataContainer.js (100%) rename {react => frontend}/src/icbc_data/components/IcbcDataTable.js (100%) rename {react => frontend}/src/icbc_data/router.js (100%) rename {react => frontend}/src/icbc_data/routes.js (100%) rename {react => frontend}/src/index.js (69%) rename {react => frontend}/src/uploads/UploadContainer.js (100%) rename {react => frontend}/src/uploads/components/FileDrop.js (100%) rename {react => frontend}/src/uploads/components/FileDropArea.js (100%) rename {react => frontend}/src/uploads/components/UploadPage.js (100%) rename {react => frontend}/src/uploads/router.js (100%) rename {react => frontend}/src/uploads/routes.js (100%) rename {react => frontend}/src/users/UsersContainer.js (100%) rename {react => frontend}/src/users/components/UsersPage.js (100%) rename {react => frontend}/src/users/routes.js (100%) delete mode 100644 react/.eslintrc.js delete mode 100644 react/README.md delete mode 100644 react/public/favicon.ico delete mode 100644 react/public/index.html delete mode 100644 react/src/app/settings.js delete mode 100644 react/src/keycloak-source.json delete mode 100644 react/start.js delete mode 100644 react/webpack.config.js diff --git a/.gitignore b/.gitignore index 59e952f2..c54ffabf 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,6 @@ __pycache__/ .coverage .DS_Store -.env .idea/ .minio.sys/ .pyc diff --git a/docker-compose.yml b/docker-compose.yml index 0a67251d..19371282 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -58,17 +58,11 @@ services: db: condition: service_healthy web: - build: ./react - command: > - sh -c "npm install --legacy-peer-deps && - npm start" - env_file: - - keycloak.env - environment: - - API_BASE=http://localhost:8000 - - ENABLE_KEYCLOAK=true + build: ./frontend + command: npm run start volumes: - - ./react:/web + - ./frontend:/web + - /web/node_modules ports: - 3000:3000 diff --git a/frontend/.dockerignore b/frontend/.dockerignore new file mode 100644 index 00000000..b512c09d --- /dev/null +++ b/frontend/.dockerignore @@ -0,0 +1 @@ +node_modules \ No newline at end of file diff --git a/frontend/.env b/frontend/.env new file mode 100644 index 00000000..f70aacba --- /dev/null +++ b/frontend/.env @@ -0,0 +1,5 @@ +REACT_APP_ENABLE_KEYCLOAK=true +REACT_APP_API_BASE=http://localhost:8000 +REACT_APP_KEYCLOAK_CLIENT_ID=cthub-on-gold-cluster-3974 +REACT_APP_KEYCLOAK_REALM=standard +REACT_APP_KEYCLOAK_URL=https://dev.loginproxy.gov.bc.ca/auth/ \ No newline at end of file diff --git a/frontend/.gitignore b/frontend/.gitignore new file mode 100644 index 00000000..4d29575d --- /dev/null +++ b/frontend/.gitignore @@ -0,0 +1,23 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.js + +# testing +/coverage + +# production +/build + +# misc +.DS_Store +.env.local +.env.development.local +.env.test.local +.env.production.local + +npm-debug.log* +yarn-debug.log* +yarn-error.log* diff --git a/react/Dockerfile b/frontend/Dockerfile similarity index 69% rename from react/Dockerfile rename to frontend/Dockerfile index 9873de1f..707160f3 100644 --- a/react/Dockerfile +++ b/frontend/Dockerfile @@ -1,8 +1,10 @@ FROM node:16.13 WORKDIR /web -COPY . /web/ +COPY package*.json /web/ RUN npm install --legacy-peer-deps +COPY . . + EXPOSE 3000 diff --git a/react/Dockerfile-Openshift b/frontend/Dockerfile-Openshift similarity index 95% rename from react/Dockerfile-Openshift rename to frontend/Dockerfile-Openshift index 04476d87..ed6deb62 100644 --- a/react/Dockerfile-Openshift +++ b/frontend/Dockerfile-Openshift @@ -14,7 +14,7 @@ RUN npm install RUN pwd && \ ls -l && \ ls -l node_modules -RUN NODE_ENV=production | npm run build +RUN yes | npm run build # Stage 2: Copy the JS React SPA into the Nginx HTML directory FROM artifacts.developer.gov.bc.ca/docker-remote/bitnami/nginx:1.24.0 diff --git a/frontend/README.md b/frontend/README.md new file mode 100644 index 00000000..58beeacc --- /dev/null +++ b/frontend/README.md @@ -0,0 +1,70 @@ +# Getting Started with Create React App + +This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app). + +## Available Scripts + +In the project directory, you can run: + +### `npm start` + +Runs the app in the development mode.\ +Open [http://localhost:3000](http://localhost:3000) to view it in your browser. + +The page will reload when you make changes.\ +You may also see any lint errors in the console. + +### `npm test` + +Launches the test runner in the interactive watch mode.\ +See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information. + +### `npm run build` + +Builds the app for production to the `build` folder.\ +It correctly bundles React in production mode and optimizes the build for the best performance. + +The build is minified and the filenames include the hashes.\ +Your app is ready to be deployed! + +See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information. + +### `npm run eject` + +**Note: this is a one-way operation. Once you `eject`, you can't go back!** + +If you aren't satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project. + +Instead, it will copy all the configuration files and the transitive dependencies (webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you're on your own. + +You don't have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn't feel obligated to use this feature. However we understand that this tool wouldn't be useful if you couldn't customize it when you are ready for it. + +## Learn More + +You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started). + +To learn React, check out the [React documentation](https://reactjs.org/). + +### Code Splitting + +This section has moved here: [https://facebook.github.io/create-react-app/docs/code-splitting](https://facebook.github.io/create-react-app/docs/code-splitting) + +### Analyzing the Bundle Size + +This section has moved here: [https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size](https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size) + +### Making a Progressive Web App + +This section has moved here: [https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app](https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app) + +### Advanced Configuration + +This section has moved here: [https://facebook.github.io/create-react-app/docs/advanced-configuration](https://facebook.github.io/create-react-app/docs/advanced-configuration) + +### Deployment + +This section has moved here: [https://facebook.github.io/create-react-app/docs/deployment](https://facebook.github.io/create-react-app/docs/deployment) + +### `npm run build` fails to minify + +This section has moved here: [https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify](https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify) diff --git a/react/nginx.conf b/frontend/nginx.conf similarity index 100% rename from react/nginx.conf rename to frontend/nginx.conf diff --git a/react/package.json b/frontend/package.json similarity index 50% rename from react/package.json rename to frontend/package.json index 44edfd37..c444fd99 100644 --- a/react/package.json +++ b/frontend/package.json @@ -1,5 +1,5 @@ { - "name": "app", + "name": "frontend", "version": "0.2.0", "private": true, "dependencies": { @@ -16,44 +16,50 @@ "keycloak-js": "^15.0.2", "process": "^0.11.10", "prop-types": "^15.7.2", - "react": "^17.0.1", - "react-dom": "^17.0.1", + "@testing-library/jest-dom": "^5.17.0", + "@testing-library/react": "^13.4.0", + "@testing-library/user-event": "^13.5.0", + "react": "^18.2.0", + "react-dom": "^18.2.0", "react-dropzone": "^10.2.2", "react-router-dom": "^5.3.0", + "react-scripts": "5.0.1", "react-table": "^7.7.0", "regenerator-runtime": "^0.13.9", + "web-vitals": "^2.1.4", "stream-browserify": "^3.0.0", "util": "^0.12.4" }, "devDependencies": { - "@babel/core": "^7.15.8", - "@babel/preset-env": "^7.15.8", - "@babel/preset-react": "^7.14.5", - "@typescript-eslint/eslint-plugin": "^4.33.0", - "@typescript-eslint/parser": "^4.33.0", - "babel-loader": "^8.2.3", - "copy-webpack-plugin": "^9.0.1", "css-loader": "^5.2.7", - "eslint": "^7.32.0", - "eslint-config-airbnb": "^18.2.1", - "eslint-plugin-import": "^2.25.2", - "eslint-plugin-jsx-a11y": "^6.4.1", - "eslint-plugin-react": "^7.26.1", - "eslint-plugin-react-hooks": "^4.2.0", - "html-webpack-plugin": "^5.5.0", - "jest": "^27.3.1", "sass": "^1.43.4", "sass-loader": "^11.1.1", "source-map-loader": "^2.0.2", "style-loader": "^2.0.0", - "url-loader": "^4.1.1", - "webpack-cli": "^4.9.1", - "webpack": "^5.60.0", - "webpack-dev-server": "^3.11.2" + "url-loader": "^4.1.1" }, "scripts": { - "start": "node start", - "build": "webpack", - "test": "jest --coverage" + "start": "react-scripts start", + "build": "react-scripts build", + "test": "react-scripts test", + "eject": "react-scripts eject" + }, + "eslintConfig": { + "extends": [ + "react-app", + "react-app/jest" + ] + }, + "browserslist": { + "production": [ + ">0.2%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 1 chrome version", + "last 1 firefox version", + "last 1 safari version" + ] } } diff --git a/frontend/public/favicon.ico b/frontend/public/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..a11777cc471a4344702741ab1c8a588998b1311a GIT binary patch literal 3870 zcma);c{J4h9>;%nil|2-o+rCuEF-(I%-F}ijC~o(k~HKAkr0)!FCj~d>`RtpD?8b; zXOC1OD!V*IsqUwzbMF1)-gEDD=A573Z-&G7^LoAC9|WO7Xc0Cx1g^Zu0u_SjAPB3vGa^W|sj)80f#V0@M_CAZTIO(t--xg= z!sii`1giyH7EKL_+Wi0ab<)&E_0KD!3Rp2^HNB*K2@PHCs4PWSA32*-^7d{9nH2_E zmC{C*N*)(vEF1_aMamw2A{ZH5aIDqiabnFdJ|y0%aS|64E$`s2ccV~3lR!u<){eS` z#^Mx6o(iP1Ix%4dv`t@!&Za-K@mTm#vadc{0aWDV*_%EiGK7qMC_(`exc>-$Gb9~W!w_^{*pYRm~G zBN{nA;cm^w$VWg1O^^<6vY`1XCD|s_zv*g*5&V#wv&s#h$xlUilPe4U@I&UXZbL z0)%9Uj&@yd03n;!7do+bfixH^FeZ-Ema}s;DQX2gY+7g0s(9;`8GyvPY1*vxiF&|w z>!vA~GA<~JUqH}d;DfBSi^IT*#lrzXl$fNpq0_T1tA+`A$1?(gLb?e#0>UELvljtQ zK+*74m0jn&)5yk8mLBv;=@}c{t0ztT<v;Avck$S6D`Z)^c0(jiwKhQsn|LDRY&w(Fmi91I7H6S;b0XM{e zXp0~(T@k_r-!jkLwd1_Vre^v$G4|kh4}=Gi?$AaJ)3I+^m|Zyj#*?Kp@w(lQdJZf4 z#|IJW5z+S^e9@(6hW6N~{pj8|NO*>1)E=%?nNUAkmv~OY&ZV;m-%?pQ_11)hAr0oAwILrlsGawpxx4D43J&K=n+p3WLnlDsQ$b(9+4 z?mO^hmV^F8MV{4Lx>(Q=aHhQ1){0d*(e&s%G=i5rq3;t{JC zmgbn5Nkl)t@fPH$v;af26lyhH!k+#}_&aBK4baYPbZy$5aFx4}ka&qxl z$=Rh$W;U)>-=S-0=?7FH9dUAd2(q#4TCAHky!$^~;Dz^j|8_wuKc*YzfdAht@Q&ror?91Dm!N03=4=O!a)I*0q~p0g$Fm$pmr$ zb;wD;STDIi$@M%y1>p&_>%?UP($15gou_ue1u0!4(%81;qcIW8NyxFEvXpiJ|H4wz z*mFT(qVx1FKufG11hByuX%lPk4t#WZ{>8ka2efjY`~;AL6vWyQKpJun2nRiZYDij$ zP>4jQXPaP$UC$yIVgGa)jDV;F0l^n(V=HMRB5)20V7&r$jmk{UUIe zVjKroK}JAbD>B`2cwNQ&GDLx8{pg`7hbA~grk|W6LgiZ`8y`{Iq0i>t!3p2}MS6S+ zO_ruKyAElt)rdS>CtF7j{&6rP-#c=7evGMt7B6`7HG|-(WL`bDUAjyn+k$mx$CH;q2Dz4x;cPP$hW=`pFfLO)!jaCL@V2+F)So3}vg|%O*^T1j>C2lx zsURO-zIJC$^$g2byVbRIo^w>UxK}74^TqUiRR#7s_X$e)$6iYG1(PcW7un-va-S&u zHk9-6Zn&>T==A)lM^D~bk{&rFzCi35>UR!ZjQkdSiNX*-;l4z9j*7|q`TBl~Au`5& z+c)*8?#-tgUR$Zd%Q3bs96w6k7q@#tUn`5rj+r@_sAVVLqco|6O{ILX&U-&-cbVa3 zY?ngHR@%l{;`ri%H*0EhBWrGjv!LE4db?HEWb5mu*t@{kv|XwK8?npOshmzf=vZA@ zVSN9sL~!sn?r(AK)Q7Jk2(|M67Uy3I{eRy z_l&Y@A>;vjkWN5I2xvFFTLX0i+`{qz7C_@bo`ZUzDugfq4+>a3?1v%)O+YTd6@Ul7 zAfLfm=nhZ`)P~&v90$&UcF+yXm9sq!qCx3^9gzIcO|Y(js^Fj)Rvq>nQAHI92ap=P z10A4@prk+AGWCb`2)dQYFuR$|H6iDE8p}9a?#nV2}LBCoCf(Xi2@szia7#gY>b|l!-U`c}@ zLdhvQjc!BdLJvYvzzzngnw51yRYCqh4}$oRCy-z|v3Hc*d|?^Wj=l~18*E~*cR_kU z{XsxM1i{V*4GujHQ3DBpl2w4FgFR48Nma@HPgnyKoIEY-MqmMeY=I<%oG~l!f<+FN z1ZY^;10j4M4#HYXP zw5eJpA_y(>uLQ~OucgxDLuf}fVs272FaMxhn4xnDGIyLXnw>Xsd^J8XhcWIwIoQ9} z%FoSJTAGW(SRGwJwb=@pY7r$uQRK3Zd~XbxU)ts!4XsJrCycrWSI?e!IqwqIR8+Jh zlRjZ`UO1I!BtJR_2~7AbkbSm%XQqxEPkz6BTGWx8e}nQ=w7bZ|eVP4?*Tb!$(R)iC z9)&%bS*u(lXqzitAN)Oo=&Ytn>%Hzjc<5liuPi>zC_nw;Z0AE3Y$Jao_Q90R-gl~5 z_xAb2J%eArrC1CN4G$}-zVvCqF1;H;abAu6G*+PDHSYFx@Tdbfox*uEd3}BUyYY-l zTfEsOqsi#f9^FoLO;ChK<554qkri&Av~SIM*{fEYRE?vH7pTAOmu2pz3X?Wn*!ROX ztd54huAk&mFBemMooL33RV-*1f0Q3_(7hl$<#*|WF9P!;r;4_+X~k~uKEqdzZ$5Al zV63XN@)j$FN#cCD;ek1R#l zv%pGrhB~KWgoCj%GT?%{@@o(AJGt*PG#l3i>lhmb_twKH^EYvacVY-6bsCl5*^~L0 zonm@lk2UvvTKr2RS%}T>^~EYqdL1q4nD%0n&Xqr^cK^`J5W;lRRB^R-O8b&HENO||mo0xaD+S=I8RTlIfVgqN@SXDr2&-)we--K7w= zJVU8?Z+7k9dy;s;^gDkQa`0nz6N{T?(A&Iz)2!DEecLyRa&FI!id#5Z7B*O2=PsR0 zEvc|8{NS^)!d)MDX(97Xw}m&kEO@5jqRaDZ!+%`wYOI<23q|&js`&o4xvjP7D_xv@ z5hEwpsp{HezI9!~6O{~)lLR@oF7?J7i>1|5a~UuoN=q&6N}EJPV_GD`&M*v8Y`^2j zKII*d_@Fi$+i*YEW+Hbzn{iQk~yP z>7N{S4)r*!NwQ`(qcN#8SRQsNK6>{)X12nbF`*7#ecO7I)Q$uZsV+xS4E7aUn+U(K baj7?x%VD!5Cxk2YbYLNVeiXvvpMCWYo=by@ literal 0 HcmV?d00001 diff --git a/frontend/public/index.html b/frontend/public/index.html new file mode 100644 index 00000000..bc0e7679 --- /dev/null +++ b/frontend/public/index.html @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + <% if (process.env.NODE_ENV === 'production') { %> + + <% } %> + CTHUB + + + +
+ + + diff --git a/frontend/public/logo192.png b/frontend/public/logo192.png new file mode 100644 index 0000000000000000000000000000000000000000..fc44b0a3796c0e0a64c3d858ca038bd4570465d9 GIT binary patch literal 5347 zcmZWtbyO6NvR-oO24RV%BvuJ&=?+<7=`LvyB&A_#M7mSDYw1v6DJkiYl9XjT!%$dLEBTQ8R9|wd3008in6lFF3GV-6mLi?MoP_y~}QUnaDCHI#t z7w^m$@6DI)|C8_jrT?q=f8D?0AM?L)Z}xAo^e^W>t$*Y0KlT5=@bBjT9kxb%-KNdk zeOS1tKO#ChhG7%{ApNBzE2ZVNcxbrin#E1TiAw#BlUhXllzhN$qWez5l;h+t^q#Eav8PhR2|T}y5kkflaK`ba-eoE+Z2q@o6P$)=&` z+(8}+-McnNO>e#$Rr{32ngsZIAX>GH??tqgwUuUz6kjns|LjsB37zUEWd|(&O!)DY zQLrq%Y>)Y8G`yYbYCx&aVHi@-vZ3|ebG!f$sTQqMgi0hWRJ^Wc+Ibv!udh_r%2|U) zPi|E^PK?UE!>_4`f`1k4hqqj_$+d!EB_#IYt;f9)fBOumGNyglU(ofY`yHq4Y?B%- zp&G!MRY<~ajTgIHErMe(Z8JG*;D-PJhd@RX@QatggM7+G(Lz8eZ;73)72Hfx5KDOE zkT(m}i2;@X2AT5fW?qVp?@WgN$aT+f_6eo?IsLh;jscNRp|8H}Z9p_UBO^SJXpZew zEK8fz|0Th%(Wr|KZBGTM4yxkA5CFdAj8=QSrT$fKW#tweUFqr0TZ9D~a5lF{)%-tTGMK^2tz(y2v$i%V8XAxIywrZCp=)83p(zIk6@S5AWl|Oa2hF`~~^W zI;KeOSkw1O#TiQ8;U7OPXjZM|KrnN}9arP)m0v$c|L)lF`j_rpG(zW1Qjv$=^|p*f z>)Na{D&>n`jOWMwB^TM}slgTEcjxTlUby89j1)|6ydRfWERn3|7Zd2&e7?!K&5G$x z`5U3uFtn4~SZq|LjFVrz$3iln-+ucY4q$BC{CSm7Xe5c1J<=%Oagztj{ifpaZk_bQ z9Sb-LaQMKp-qJA*bP6DzgE3`}*i1o3GKmo2pn@dj0;He}F=BgINo};6gQF8!n0ULZ zL>kC0nPSFzlcB7p41doao2F7%6IUTi_+!L`MM4o*#Y#0v~WiO8uSeAUNp=vA2KaR&=jNR2iVwG>7t%sG2x_~yXzY)7K& zk3p+O0AFZ1eu^T3s};B%6TpJ6h-Y%B^*zT&SN7C=N;g|#dGIVMSOru3iv^SvO>h4M=t-N1GSLLDqVTcgurco6)3&XpU!FP6Hlrmj}f$ zp95;b)>M~`kxuZF3r~a!rMf4|&1=uMG$;h^g=Kl;H&Np-(pFT9FF@++MMEx3RBsK?AU0fPk-#mdR)Wdkj)`>ZMl#^<80kM87VvsI3r_c@_vX=fdQ`_9-d(xiI z4K;1y1TiPj_RPh*SpDI7U~^QQ?%0&!$Sh#?x_@;ag)P}ZkAik{_WPB4rHyW#%>|Gs zdbhyt=qQPA7`?h2_8T;-E6HI#im9K>au*(j4;kzwMSLgo6u*}-K`$_Gzgu&XE)udQ zmQ72^eZd|vzI)~!20JV-v-T|<4@7ruqrj|o4=JJPlybwMg;M$Ud7>h6g()CT@wXm` zbq=A(t;RJ^{Xxi*Ff~!|3!-l_PS{AyNAU~t{h;(N(PXMEf^R(B+ZVX3 z8y0;0A8hJYp@g+c*`>eTA|3Tgv9U8#BDTO9@a@gVMDxr(fVaEqL1tl?md{v^j8aUv zm&%PX4^|rX|?E4^CkplWWNv*OKM>DxPa z!RJ)U^0-WJMi)Ksc!^ixOtw^egoAZZ2Cg;X7(5xZG7yL_;UJ#yp*ZD-;I^Z9qkP`} zwCTs0*%rIVF1sgLervtnUo&brwz?6?PXRuOCS*JI-WL6GKy7-~yi0giTEMmDs_-UX zo=+nFrW_EfTg>oY72_4Z0*uG>MnXP=c0VpT&*|rvv1iStW;*^={rP1y?Hv+6R6bxFMkxpWkJ>m7Ba{>zc_q zEefC3jsXdyS5??Mz7IET$Kft|EMNJIv7Ny8ZOcKnzf`K5Cd)&`-fTY#W&jnV0l2vt z?Gqhic}l}mCv1yUEy$%DP}4AN;36$=7aNI^*AzV(eYGeJ(Px-j<^gSDp5dBAv2#?; zcMXv#aj>%;MiG^q^$0MSg-(uTl!xm49dH!{X0){Ew7ThWV~Gtj7h%ZD zVN-R-^7Cf0VH!8O)uUHPL2mO2tmE*cecwQv_5CzWeh)ykX8r5Hi`ehYo)d{Jnh&3p z9ndXT$OW51#H5cFKa76c<%nNkP~FU93b5h-|Cb}ScHs@4Q#|}byWg;KDMJ#|l zE=MKD*F@HDBcX@~QJH%56eh~jfPO-uKm}~t7VkHxHT;)4sd+?Wc4* z>CyR*{w@4(gnYRdFq=^(#-ytb^5ESD?x<0Skhb%Pt?npNW1m+Nv`tr9+qN<3H1f<% zZvNEqyK5FgPsQ`QIu9P0x_}wJR~^CotL|n zk?dn;tLRw9jJTur4uWoX6iMm914f0AJfB@C74a;_qRrAP4E7l890P&{v<}>_&GLrW z)klculcg`?zJO~4;BBAa=POU%aN|pmZJn2{hA!d!*lwO%YSIzv8bTJ}=nhC^n}g(ld^rn#kq9Z3)z`k9lvV>y#!F4e{5c$tnr9M{V)0m(Z< z#88vX6-AW7T2UUwW`g<;8I$Jb!R%z@rCcGT)-2k7&x9kZZT66}Ztid~6t0jKb&9mm zpa}LCb`bz`{MzpZR#E*QuBiZXI#<`5qxx=&LMr-UUf~@dRk}YI2hbMsAMWOmDzYtm zjof16D=mc`^B$+_bCG$$@R0t;e?~UkF?7<(vkb70*EQB1rfUWXh$j)R2)+dNAH5%R zEBs^?N;UMdy}V};59Gu#0$q53$}|+q7CIGg_w_WlvE}AdqoS<7DY1LWS9?TrfmcvT zaypmplwn=P4;a8-%l^e?f`OpGb}%(_mFsL&GywhyN(-VROj`4~V~9bGv%UhcA|YW% zs{;nh@aDX11y^HOFXB$a7#Sr3cEtNd4eLm@Y#fc&j)TGvbbMwze zXtekX_wJqxe4NhuW$r}cNy|L{V=t#$%SuWEW)YZTH|!iT79k#?632OFse{+BT_gau zJwQcbH{b}dzKO?^dV&3nTILYlGw{27UJ72ZN){BILd_HV_s$WfI2DC<9LIHFmtyw? zQ;?MuK7g%Ym+4e^W#5}WDLpko%jPOC=aN)3!=8)s#Rnercak&b3ESRX3z{xfKBF8L z5%CGkFmGO@x?_mPGlpEej!3!AMddChabyf~nJNZxx!D&{@xEb!TDyvqSj%Y5@A{}9 zRzoBn0?x}=krh{ok3Nn%e)#~uh;6jpezhA)ySb^b#E>73e*frBFu6IZ^D7Ii&rsiU z%jzygxT-n*joJpY4o&8UXr2s%j^Q{?e-voloX`4DQyEK+DmrZh8A$)iWL#NO9+Y@!sO2f@rI!@jN@>HOA< z?q2l{^%mY*PNx2FoX+A7X3N}(RV$B`g&N=e0uvAvEN1W^{*W?zT1i#fxuw10%~))J zjx#gxoVlXREWZf4hRkgdHx5V_S*;p-y%JtGgQ4}lnA~MBz-AFdxUxU1RIT$`sal|X zPB6sEVRjGbXIP0U+?rT|y5+ev&OMX*5C$n2SBPZr`jqzrmpVrNciR0e*Wm?fK6DY& zl(XQZ60yWXV-|Ps!A{EF;=_z(YAF=T(-MkJXUoX zI{UMQDAV2}Ya?EisdEW;@pE6dt;j0fg5oT2dxCi{wqWJ<)|SR6fxX~5CzblPGr8cb zUBVJ2CQd~3L?7yfTpLNbt)He1D>*KXI^GK%<`bq^cUq$Q@uJifG>p3LU(!H=C)aEL zenk7pVg}0{dKU}&l)Y2Y2eFMdS(JS0}oZUuVaf2+K*YFNGHB`^YGcIpnBlMhO7d4@vV zv(@N}(k#REdul8~fP+^F@ky*wt@~&|(&&meNO>rKDEnB{ykAZ}k>e@lad7to>Ao$B zz<1(L=#J*u4_LB=8w+*{KFK^u00NAmeNN7pr+Pf+N*Zl^dO{LM-hMHyP6N!~`24jd zXYP|Ze;dRXKdF2iJG$U{k=S86l@pytLx}$JFFs8e)*Vi?aVBtGJ3JZUj!~c{(rw5>vuRF$`^p!P8w1B=O!skwkO5yd4_XuG^QVF z`-r5K7(IPSiKQ2|U9+`@Js!g6sfJwAHVd|s?|mnC*q zp|B|z)(8+mxXyxQ{8Pg3F4|tdpgZZSoU4P&9I8)nHo1@)9_9u&NcT^FI)6|hsAZFk zZ+arl&@*>RXBf-OZxhZerOr&dN5LW9@gV=oGFbK*J+m#R-|e6(Loz(;g@T^*oO)0R zN`N=X46b{7yk5FZGr#5&n1!-@j@g02g|X>MOpF3#IjZ_4wg{dX+G9eqS+Es9@6nC7 zD9$NuVJI}6ZlwtUm5cCAiYv0(Yi{%eH+}t)!E^>^KxB5^L~a`4%1~5q6h>d;paC9c zTj0wTCKrhWf+F#5>EgX`sl%POl?oyCq0(w0xoL?L%)|Q7d|Hl92rUYAU#lc**I&^6p=4lNQPa0 znQ|A~i0ip@`B=FW-Q;zh?-wF;Wl5!+q3GXDu-x&}$gUO)NoO7^$BeEIrd~1Dh{Tr` z8s<(Bn@gZ(mkIGnmYh_ehXnq78QL$pNDi)|QcT*|GtS%nz1uKE+E{7jdEBp%h0}%r zD2|KmYGiPa4;md-t_m5YDz#c*oV_FqXd85d@eub?9N61QuYcb3CnVWpM(D-^|CmkL z(F}L&N7qhL2PCq)fRh}XO@U`Yn<?TNGR4L(mF7#4u29{i~@k;pLsgl({YW5`Mo+p=zZn3L*4{JU;++dG9 X@eDJUQo;Ye2mwlRs?y0|+_a0zY+Zo%Dkae}+MySoIppb75o?vUW_?)>@g{U2`ERQIXV zeY$JrWnMZ$QC<=ii4X|@0H8`si75jB(ElJb00HAB%>SlLR{!zO|C9P3zxw_U8?1d8uRZ=({Ga4shyN}3 zAK}WA(ds|``G4jA)9}Bt2Hy0+f3rV1E6b|@?hpGA=PI&r8)ah|)I2s(P5Ic*Ndhn^ z*T&j@gbCTv7+8rpYbR^Ty}1AY)YH;p!m948r#%7x^Z@_-w{pDl|1S4`EM3n_PaXvK z1JF)E3qy$qTj5Xs{jU9k=y%SQ0>8E$;x?p9ayU0bZZeo{5Z@&FKX>}s!0+^>C^D#z z>xsCPvxD3Z=dP}TTOSJhNTPyVt14VCQ9MQFN`rn!c&_p?&4<5_PGm4a;WS&1(!qKE z_H$;dDdiPQ!F_gsN`2>`X}$I=B;={R8%L~`>RyKcS$72ai$!2>d(YkciA^J0@X%G4 z4cu!%Ps~2JuJ8ex`&;Fa0NQOq_nDZ&X;^A=oc1&f#3P1(!5il>6?uK4QpEG8z0Rhu zvBJ+A9RV?z%v?!$=(vcH?*;vRs*+PPbOQ3cdPr5=tOcLqmfx@#hOqX0iN)wTTO21jH<>jpmwRIAGw7`a|sl?9y9zRBh>(_%| zF?h|P7}~RKj?HR+q|4U`CjRmV-$mLW>MScKnNXiv{vD3&2@*u)-6P@h0A`eeZ7}71 zK(w%@R<4lLt`O7fs1E)$5iGb~fPfJ?WxhY7c3Q>T-w#wT&zW522pH-B%r5v#5y^CF zcC30Se|`D2mY$hAlIULL%-PNXgbbpRHgn<&X3N9W!@BUk@9g*P5mz-YnZBb*-$zMM z7Qq}ic0mR8n{^L|=+diODdV}Q!gwr?y+2m=3HWwMq4z)DqYVg0J~^}-%7rMR@S1;9 z7GFj6K}i32X;3*$SmzB&HW{PJ55kT+EI#SsZf}bD7nW^Haf}_gXciYKX{QBxIPSx2Ma? zHQqgzZq!_{&zg{yxqv3xq8YV+`S}F6A>Gtl39_m;K4dA{pP$BW0oIXJ>jEQ!2V3A2 zdpoTxG&V=(?^q?ZTj2ZUpDUdMb)T?E$}CI>r@}PFPWD9@*%V6;4Ag>D#h>!s)=$0R zRXvdkZ%|c}ubej`jl?cS$onl9Tw52rBKT)kgyw~Xy%z62Lr%V6Y=f?2)J|bZJ5(Wx zmji`O;_B+*X@qe-#~`HFP<{8$w@z4@&`q^Q-Zk8JG3>WalhnW1cvnoVw>*R@c&|o8 zZ%w!{Z+MHeZ*OE4v*otkZqz11*s!#s^Gq>+o`8Z5 z^i-qzJLJh9!W-;SmFkR8HEZJWiXk$40i6)7 zZpr=k2lp}SasbM*Nbn3j$sn0;rUI;%EDbi7T1ZI4qL6PNNM2Y%6{LMIKW+FY_yF3) zSKQ2QSujzNMSL2r&bYs`|i2Dnn z=>}c0>a}>|uT!IiMOA~pVT~R@bGlm}Edf}Kq0?*Af6#mW9f9!}RjW7om0c9Qlp;yK z)=XQs(|6GCadQbWIhYF=rf{Y)sj%^Id-ARO0=O^Ad;Ph+ z0?$eE1xhH?{T$QI>0JP75`r)U_$#%K1^BQ8z#uciKf(C701&RyLQWBUp*Q7eyn76} z6JHpC9}R$J#(R0cDCkXoFSp;j6{x{b&0yE@P7{;pCEpKjS(+1RQy38`=&Yxo%F=3y zCPeefABp34U-s?WmU#JJw23dcC{sPPFc2#J$ZgEN%zod}J~8dLm*fx9f6SpO zn^Ww3bt9-r0XaT2a@Wpw;C23XM}7_14#%QpubrIw5aZtP+CqIFmsG4`Cm6rfxl9n5 z7=r2C-+lM2AB9X0T_`?EW&Byv&K?HS4QLoylJ|OAF z`8atBNTzJ&AQ!>sOo$?^0xj~D(;kS$`9zbEGd>f6r`NC3X`tX)sWgWUUOQ7w=$TO&*j;=u%25ay-%>3@81tGe^_z*C7pb9y*Ed^H3t$BIKH2o+olp#$q;)_ zfpjCb_^VFg5fU~K)nf*d*r@BCC>UZ!0&b?AGk_jTPXaSnCuW110wjHPPe^9R^;jo3 zwvzTl)C`Zl5}O2}3lec=hZ*$JnkW#7enKKc)(pM${_$9Hc=Sr_A9Biwe*Y=T?~1CK z6eZ9uPICjy-sMGbZl$yQmpB&`ouS8v{58__t0$JP%i3R&%QR3ianbZqDs<2#5FdN@n5bCn^ZtH992~5k(eA|8|@G9u`wdn7bnpg|@{m z^d6Y`*$Zf2Xr&|g%sai#5}Syvv(>Jnx&EM7-|Jr7!M~zdAyjt*xl;OLhvW-a%H1m0 z*x5*nb=R5u><7lyVpNAR?q@1U59 zO+)QWwL8t zyip?u_nI+K$uh{y)~}qj?(w0&=SE^8`_WMM zTybjG=999h38Yes7}-4*LJ7H)UE8{mE(6;8voE+TYY%33A>S6`G_95^5QHNTo_;Ao ztIQIZ_}49%{8|=O;isBZ?=7kfdF8_@azfoTd+hEJKWE!)$)N%HIe2cplaK`ry#=pV z0q{9w-`i0h@!R8K3GC{ivt{70IWG`EP|(1g7i_Q<>aEAT{5(yD z=!O?kq61VegV+st@XCw475j6vS)_z@efuqQgHQR1T4;|-#OLZNQJPV4k$AX1Uk8Lm z{N*b*ia=I+MB}kWpupJ~>!C@xEN#Wa7V+7{m4j8c?)ChV=D?o~sjT?0C_AQ7B-vxqX30s0I_`2$in86#`mAsT-w?j{&AL@B3$;P z31G4(lV|b}uSDCIrjk+M1R!X7s4Aabn<)zpgT}#gE|mIvV38^ODy@<&yflpCwS#fRf9ZX3lPV_?8@C5)A;T zqmouFLFk;qIs4rA=hh=GL~sCFsXHsqO6_y~*AFt939UYVBSx1s(=Kb&5;j7cSowdE;7()CC2|-i9Zz+_BIw8#ll~-tyH?F3{%`QCsYa*b#s*9iCc`1P1oC26?`g<9))EJ3%xz+O!B3 zZ7$j~To)C@PquR>a1+Dh>-a%IvH_Y7^ys|4o?E%3`I&ADXfC8++hAdZfzIT#%C+Jz z1lU~K_vAm0m8Qk}K$F>|>RPK%<1SI0(G+8q~H zAsjezyP+u!Se4q3GW)`h`NPSRlMoBjCzNPesWJwVTY!o@G8=(6I%4XHGaSiS3MEBK zhgGFv6Jc>L$4jVE!I?TQuwvz_%CyO!bLh94nqK11C2W$*aa2ueGopG8DnBICVUORP zgytv#)49fVXDaR$SukloYC3u7#5H)}1K21=?DKj^U)8G;MS)&Op)g^zR2($<>C*zW z;X7`hLxiIO#J`ANdyAOJle4V%ppa*(+0i3w;8i*BA_;u8gOO6)MY`ueq7stBMJTB; z-a0R>hT*}>z|Gg}@^zDL1MrH+2hsR8 zHc}*9IvuQC^Ju)^#Y{fOr(96rQNPNhxc;mH@W*m206>Lo<*SaaH?~8zg&f&%YiOEG zGiz?*CP>Bci}!WiS=zj#K5I}>DtpregpP_tfZtPa(N<%vo^#WCQ5BTv0vr%Z{)0q+ z)RbfHktUm|lg&U3YM%lMUM(fu}i#kjX9h>GYctkx9Mt_8{@s%!K_EI zScgwy6%_fR?CGJQtmgNAj^h9B#zmaMDWgH55pGuY1Gv7D z;8Psm(vEPiwn#MgJYu4Ty9D|h!?Rj0ddE|&L3S{IP%H4^N!m`60ZwZw^;eg4sk6K{ ziA^`Sbl_4~f&Oo%n;8Ye(tiAdlZKI!Z=|j$5hS|D$bDJ}p{gh$KN&JZYLUjv4h{NY zBJ>X9z!xfDGY z+oh_Z&_e#Q(-}>ssZfm=j$D&4W4FNy&-kAO1~#3Im;F)Nwe{(*75(p=P^VI?X0GFakfh+X-px4a%Uw@fSbmp9hM1_~R>?Z8+ ziy|e9>8V*`OP}4x5JjdWp}7eX;lVxp5qS}0YZek;SNmm7tEeSF*-dI)6U-A%m6YvCgM(}_=k#a6o^%-K4{`B1+}O4x zztDT%hVb;v#?j`lTvlFQ3aV#zkX=7;YFLS$uIzb0E3lozs5`Xy zi~vF+%{z9uLjKvKPhP%x5f~7-Gj+%5N`%^=yk*Qn{`> z;xj&ROY6g`iy2a@{O)V(jk&8#hHACVDXey5a+KDod_Z&}kHM}xt7}Md@pil{2x7E~ zL$k^d2@Ec2XskjrN+IILw;#7((abu;OJii&v3?60x>d_Ma(onIPtcVnX@ELF0aL?T zSmWiL3(dOFkt!x=1O!_0n(cAzZW+3nHJ{2S>tgSK?~cFha^y(l@-Mr2W$%MN{#af8J;V*>hdq!gx=d0h$T7l}>91Wh07)9CTX zh2_ZdQCyFOQ)l(}gft0UZG`Sh2`x-w`5vC2UD}lZs*5 zG76$akzn}Xi))L3oGJ75#pcN=cX3!=57$Ha=hQ2^lwdyU#a}4JJOz6ddR%zae%#4& za)bFj)z=YQela(F#Y|Q#dp}PJghITwXouVaMq$BM?K%cXn9^Y@g43$=O)F&ZlOUom zJiad#dea;-eywBA@e&D6Pdso1?2^(pXiN91?jvcaUyYoKUmvl5G9e$W!okWe*@a<^ z8cQQ6cNSf+UPDx%?_G4aIiybZHHagF{;IcD(dPO!#=u zWfqLcPc^+7Uu#l(Bpxft{*4lv#*u7X9AOzDO z1D9?^jIo}?%iz(_dwLa{ex#T}76ZfN_Z-hwpus9y+4xaUu9cX}&P{XrZVWE{1^0yw zO;YhLEW!pJcbCt3L8~a7>jsaN{V3>tz6_7`&pi%GxZ=V3?3K^U+*ryLSb)8^IblJ0 zSRLNDvIxt)S}g30?s_3NX>F?NKIGrG_zB9@Z>uSW3k2es_H2kU;Rnn%j5qP)!XHKE zPB2mHP~tLCg4K_vH$xv`HbRsJwbZMUV(t=ez;Ec(vyHH)FbfLg`c61I$W_uBB>i^r z&{_P;369-&>23R%qNIULe=1~T$(DA`ev*EWZ6j(B$(te}x1WvmIll21zvygkS%vwG zzkR6Z#RKA2!z!C%M!O>!=Gr0(J0FP=-MN=5t-Ir)of50y10W}j`GtRCsXBakrKtG& zazmITDJMA0C51&BnLY)SY9r)NVTMs);1<=oosS9g31l{4ztjD3#+2H7u_|66b|_*O z;Qk6nalpqdHOjx|K&vUS_6ITgGll;TdaN*ta=M_YtyC)I9Tmr~VaPrH2qb6sd~=AcIxV+%z{E&0@y=DPArw zdV7z(G1hBx7hd{>(cr43^WF%4Y@PXZ?wPpj{OQ#tvc$pABJbvPGvdR`cAtHn)cSEV zrpu}1tJwQ3y!mSmH*uz*x0o|CS<^w%&KJzsj~DU0cLQUxk5B!hWE>aBkjJle8z~;s z-!A=($+}Jq_BTK5^B!`R>!MulZN)F=iXXeUd0w5lUsE5VP*H*oCy(;?S$p*TVvTxwAeWFB$jHyb0593)$zqalVlDX=GcCN1gU0 zlgU)I$LcXZ8Oyc2TZYTPu@-;7<4YYB-``Qa;IDcvydIA$%kHhJKV^m*-zxcvU4viy&Kr5GVM{IT>WRywKQ9;>SEiQD*NqplK-KK4YR`p0@JW)n_{TU3bt0 zim%;(m1=#v2}zTps=?fU5w^(*y)xT%1vtQH&}50ZF!9YxW=&7*W($2kgKyz1mUgfs zfV<*XVVIFnohW=|j+@Kfo!#liQR^x>2yQdrG;2o8WZR+XzU_nG=Ed2rK?ntA;K5B{ z>M8+*A4!Jm^Bg}aW?R?6;@QG@uQ8&oJ{hFixcfEnJ4QH?A4>P=q29oDGW;L;= z9-a0;g%c`C+Ai!UmK$NC*4#;Jp<1=TioL=t^YM)<<%u#hnnfSS`nq63QKGO1L8RzX z@MFDqs1z ztYmxDl@LU)5acvHk)~Z`RW7=aJ_nGD!mOSYD>5Odjn@TK#LY{jf?+piB5AM-CAoT_ z?S-*q7}wyLJzK>N%eMPuFgN)Q_otKP;aqy=D5f!7<=n(lNkYRXVpkB{TAYLYg{|(jtRqYmg$xH zjmq?B(RE4 zQx^~Pt}gxC2~l=K$$-sYy_r$CO(d=+b3H1MB*y_5g6WLaWTXn+TKQ|hNY^>Mp6k*$ zwkovomhu776vQATqT4blf~g;TY(MWCrf^^yfWJvSAB$p5l;jm@o#=!lqw+Lqfq>X= z$6~kxfm7`3q4zUEB;u4qa#BdJxO!;xGm)wwuisj{0y2x{R(IGMrsIzDY9LW>m!Y`= z04sx3IjnYvL<4JqxQ8f7qYd0s2Ig%`ytYPEMKI)s(LD}D@EY>x`VFtqvnADNBdeao zC96X+MxnwKmjpg{U&gP3HE}1=s!lv&D{6(g_lzyF3A`7Jn*&d_kL<;dAFx!UZ>hB8 z5A*%LsAn;VLp>3${0>M?PSQ)9s3}|h2e?TG4_F{}{Cs>#3Q*t$(CUc}M)I}8cPF6% z=+h(Kh^8)}gj(0}#e7O^FQ6`~fd1#8#!}LMuo3A0bN`o}PYsm!Y}sdOz$+Tegc=qT z8x`PH$7lvnhJp{kHWb22l;@7B7|4yL4UOOVM0MP_>P%S1Lnid)+k9{+3D+JFa#Pyf zhVc#&df87APl4W9X)F3pGS>@etfl=_E5tBcVoOfrD4hmVeTY-cj((pkn%n@EgN{0f zwb_^Rk0I#iZuHK!l*lN`ceJn(sI{$Fq6nN& zE<-=0_2WN}m+*ivmIOxB@#~Q-cZ>l136w{#TIJe478`KE7@=a{>SzPHsKLzYAyBQO zAtuuF$-JSDy_S@6GW0MOE~R)b;+0f%_NMrW(+V#c_d&U8Z9+ec4=HmOHw?gdjF(Lu zzra83M_BoO-1b3;9`%&DHfuUY)6YDV21P$C!Rc?mv&{lx#f8oc6?0?x zK08{WP65?#>(vPfA-c=MCY|%*1_<3D4NX zeVTi-JGl2uP_2@0F{G({pxQOXt_d{g_CV6b?jNpfUG9;8yle-^4KHRvZs-_2siata zt+d_T@U$&t*xaD22(fH(W1r$Mo?3dc%Tncm=C6{V9y{v&VT#^1L04vDrLM9qBoZ4@ z6DBN#m57hX7$C(=#$Y5$bJmwA$T8jKD8+6A!-IJwA{WOfs%s}yxUw^?MRZjF$n_KN z6`_bGXcmE#5e4Ym)aQJ)xg3Pg0@k`iGuHe?f(5LtuzSq=nS^5z>vqU0EuZ&75V%Z{ zYyhRLN^)$c6Ds{f7*FBpE;n5iglx5PkHfWrj3`x^j^t z7ntuV`g!9Xg#^3!x)l*}IW=(Tz3>Y5l4uGaB&lz{GDjm2D5S$CExLT`I1#n^lBH7Y zDgpMag@`iETKAI=p<5E#LTkwzVR@=yY|uBVI1HG|8h+d;G-qfuj}-ZR6fN>EfCCW z9~wRQoAPEa#aO?3h?x{YvV*d+NtPkf&4V0k4|L=uj!U{L+oLa(z#&iuhJr3-PjO3R z5s?=nn_5^*^Rawr>>Nr@K(jwkB#JK-=+HqwfdO<+P5byeim)wvqGlP-P|~Nse8=XF zz`?RYB|D6SwS}C+YQv+;}k6$-%D(@+t14BL@vM z2q%q?f6D-A5s$_WY3{^G0F131bbh|g!}#BKw=HQ7mx;Dzg4Z*bTLQSfo{ed{4}NZW zfrRm^Ca$rlE{Ue~uYv>R9{3smwATcdM_6+yWIO z*ZRH~uXE@#p$XTbCt5j7j2=86e{9>HIB6xDzV+vAo&B?KUiMP|ttOElepnl%|DPqL b{|{}U^kRn2wo}j7|0ATu<;8xA7zX}7|B6mN literal 0 HcmV?d00001 diff --git a/frontend/public/manifest.json b/frontend/public/manifest.json new file mode 100644 index 00000000..080d6c77 --- /dev/null +++ b/frontend/public/manifest.json @@ -0,0 +1,25 @@ +{ + "short_name": "React App", + "name": "Create React App Sample", + "icons": [ + { + "src": "favicon.ico", + "sizes": "64x64 32x32 24x24 16x16", + "type": "image/x-icon" + }, + { + "src": "logo192.png", + "type": "image/png", + "sizes": "192x192" + }, + { + "src": "logo512.png", + "type": "image/png", + "sizes": "512x512" + } + ], + "start_url": ".", + "display": "standalone", + "theme_color": "#000000", + "background_color": "#ffffff" +} diff --git a/frontend/public/robots.txt b/frontend/public/robots.txt new file mode 100644 index 00000000..e9e57dc4 --- /dev/null +++ b/frontend/public/robots.txt @@ -0,0 +1,3 @@ +# https://www.robotstxt.org/robotstxt.html +User-agent: * +Disallow: diff --git a/react/src/app/components/AlertDialog.js b/frontend/src/app/components/AlertDialog.js similarity index 100% rename from react/src/app/components/AlertDialog.js rename to frontend/src/app/components/AlertDialog.js diff --git a/react/src/App.js b/frontend/src/app/components/App.js similarity index 63% rename from react/src/App.js rename to frontend/src/app/components/App.js index 6ca2838f..3153bd5d 100644 --- a/react/src/App.js +++ b/frontend/src/app/components/App.js @@ -1,4 +1,3 @@ -import 'regenerator-runtime/runtime'; import React from 'react'; import { BrowserRouter as Router, @@ -6,15 +5,13 @@ import { Switch, } from 'react-router-dom'; -import settings from './app/settings'; -import IcbcDataRouter from './icbc_data/router'; -import UploadRouter from './uploads/router'; -import DashboardContainer from './dashboard/DashboardContainer'; -import useKeycloak from './app/utilities/useKeycloak' -import Login from './Login'; -import Layout from './app/components/Layout' - -const { ENABLE_KEYCLOAK } = settings; +import IcbcDataRouter from '../../icbc_data/router'; +import UploadRouter from '../../uploads/router'; +import DashboardContainer from '../../dashboard/DashboardContainer'; +import useKeycloak from '../utilities/useKeycloak' +import Login from './Login' +import Layout from './Layout' +import { ENABLE_KEYCLOAK } from '../../config'; const App = () => { const keycloak = useKeycloak() diff --git a/react/src/app/components/Footer.js b/frontend/src/app/components/Footer.js similarity index 100% rename from react/src/app/components/Footer.js rename to frontend/src/app/components/Footer.js diff --git a/react/src/app/components/Header.js b/frontend/src/app/components/Header.js similarity index 100% rename from react/src/app/components/Header.js rename to frontend/src/app/components/Header.js diff --git a/react/src/app/components/KeycloakProvider.js b/frontend/src/app/components/KeycloakProvider.js similarity index 72% rename from react/src/app/components/KeycloakProvider.js rename to frontend/src/app/components/KeycloakProvider.js index 6c9aba2b..269213e6 100644 --- a/react/src/app/components/KeycloakProvider.js +++ b/frontend/src/app/components/KeycloakProvider.js @@ -1,20 +1,19 @@ import React, { useState, useEffect } from 'react' import { KeycloakContext } from '../../contexts' -import settings from '../settings' +import { ENABLE_KEYCLOAK } from '../../config' const KeycloakProvider = ({authClient, initOptions, LoadingComponent, children}) => { - const keycloakEnabled = settings.ENABLE_KEYCLOAK - const [loading, setLoading] = useState(keycloakEnabled ? true : false) + const [loading, setLoading] = useState(ENABLE_KEYCLOAK ? true : false) const [keycloak, setKeycloak] = useState({}) useEffect(() => { - if (keycloakEnabled) { + if (ENABLE_KEYCLOAK) { authClient.init(initOptions).then(() => { setKeycloak(authClient) setLoading(false) }) } - }, [keycloakEnabled, authClient, initOptions]) + }, [authClient, initOptions]) if (loading) { return diff --git a/react/src/app/components/Layout.js b/frontend/src/app/components/Layout.js similarity index 100% rename from react/src/app/components/Layout.js rename to frontend/src/app/components/Layout.js diff --git a/react/src/app/components/Loading.js b/frontend/src/app/components/Loading.js similarity index 100% rename from react/src/app/components/Loading.js rename to frontend/src/app/components/Loading.js diff --git a/react/src/Login.js b/frontend/src/app/components/Login.js similarity index 94% rename from react/src/Login.js rename to frontend/src/app/components/Login.js index fa4cfae3..3e7f6f3f 100644 --- a/react/src/Login.js +++ b/frontend/src/app/components/Login.js @@ -1,6 +1,6 @@ import React from 'react'; import PropTypes from 'prop-types'; -import useKeycloak from './app/utilities/useKeycloak'; +import useKeycloak from '../../app/utilities/useKeycloak'; const Login = (props) => { const { redirectUri } = props; diff --git a/react/src/app/components/Logout.js b/frontend/src/app/components/Logout.js similarity index 100% rename from react/src/app/components/Logout.js rename to frontend/src/app/components/Logout.js diff --git a/react/src/app/components/ReactTable.js b/frontend/src/app/components/ReactTable.js similarity index 100% rename from react/src/app/components/ReactTable.js rename to frontend/src/app/components/ReactTable.js diff --git a/react/src/app/components/ReactTablePagination.js b/frontend/src/app/components/ReactTablePagination.js similarity index 100% rename from react/src/app/components/ReactTablePagination.js rename to frontend/src/app/components/ReactTablePagination.js diff --git a/react/src/app/static/logo.png b/frontend/src/app/static/logo.png similarity index 100% rename from react/src/app/static/logo.png rename to frontend/src/app/static/logo.png diff --git a/react/src/app/styles/App.scss b/frontend/src/app/styles/App.scss similarity index 100% rename from react/src/app/styles/App.scss rename to frontend/src/app/styles/App.scss diff --git a/react/src/app/styles/FileUpload.scss b/frontend/src/app/styles/FileUpload.scss similarity index 100% rename from react/src/app/styles/FileUpload.scss rename to frontend/src/app/styles/FileUpload.scss diff --git a/react/src/app/styles/Footer.scss b/frontend/src/app/styles/Footer.scss similarity index 100% rename from react/src/app/styles/Footer.scss rename to frontend/src/app/styles/Footer.scss diff --git a/react/src/app/styles/Header.scss b/frontend/src/app/styles/Header.scss similarity index 100% rename from react/src/app/styles/Header.scss rename to frontend/src/app/styles/Header.scss diff --git a/react/src/app/styles/Login.scss b/frontend/src/app/styles/Login.scss similarity index 100% rename from react/src/app/styles/Login.scss rename to frontend/src/app/styles/Login.scss diff --git a/react/src/app/styles/ReactTable.scss b/frontend/src/app/styles/ReactTable.scss similarity index 100% rename from react/src/app/styles/ReactTable.scss rename to frontend/src/app/styles/ReactTable.scss diff --git a/react/src/app/styles/Roboto.scss b/frontend/src/app/styles/Roboto.scss similarity index 100% rename from react/src/app/styles/Roboto.scss rename to frontend/src/app/styles/Roboto.scss diff --git a/react/src/app/styles/Users.scss b/frontend/src/app/styles/Users.scss similarity index 100% rename from react/src/app/styles/Users.scss rename to frontend/src/app/styles/Users.scss diff --git a/react/src/app/styles/fonts/LICENSE.txt b/frontend/src/app/styles/fonts/LICENSE.txt similarity index 100% rename from react/src/app/styles/fonts/LICENSE.txt rename to frontend/src/app/styles/fonts/LICENSE.txt diff --git a/react/src/app/styles/fonts/Roboto-Bold.ttf b/frontend/src/app/styles/fonts/Roboto-Bold.ttf similarity index 100% rename from react/src/app/styles/fonts/Roboto-Bold.ttf rename to frontend/src/app/styles/fonts/Roboto-Bold.ttf diff --git a/react/src/app/styles/fonts/Roboto-BoldItalic.ttf b/frontend/src/app/styles/fonts/Roboto-BoldItalic.ttf similarity index 100% rename from react/src/app/styles/fonts/Roboto-BoldItalic.ttf rename to frontend/src/app/styles/fonts/Roboto-BoldItalic.ttf diff --git a/react/src/app/styles/fonts/Roboto-Italic.ttf b/frontend/src/app/styles/fonts/Roboto-Italic.ttf similarity index 100% rename from react/src/app/styles/fonts/Roboto-Italic.ttf rename to frontend/src/app/styles/fonts/Roboto-Italic.ttf diff --git a/react/src/app/styles/fonts/Roboto-Regular.ttf b/frontend/src/app/styles/fonts/Roboto-Regular.ttf similarity index 100% rename from react/src/app/styles/fonts/Roboto-Regular.ttf rename to frontend/src/app/styles/fonts/Roboto-Regular.ttf diff --git a/react/src/app/styles/images/BCID_H_rgb_pos.png b/frontend/src/app/styles/images/BCID_H_rgb_pos.png similarity index 100% rename from react/src/app/styles/images/BCID_H_rgb_pos.png rename to frontend/src/app/styles/images/BCID_H_rgb_pos.png diff --git a/react/src/app/styles/images/BCID_H_rgb_rev.png b/frontend/src/app/styles/images/BCID_H_rgb_rev.png similarity index 100% rename from react/src/app/styles/images/BCID_H_rgb_rev.png rename to frontend/src/app/styles/images/BCID_H_rgb_rev.png diff --git a/react/src/app/styles/images/BCID_V_rgb_pos.png b/frontend/src/app/styles/images/BCID_V_rgb_pos.png similarity index 100% rename from react/src/app/styles/images/BCID_V_rgb_pos.png rename to frontend/src/app/styles/images/BCID_V_rgb_pos.png diff --git a/react/src/app/styles/images/tof.jpg b/frontend/src/app/styles/images/tof.jpg similarity index 100% rename from react/src/app/styles/images/tof.jpg rename to frontend/src/app/styles/images/tof.jpg diff --git a/react/src/app/styles/index.scss b/frontend/src/app/styles/index.scss similarity index 100% rename from react/src/app/styles/index.scss rename to frontend/src/app/styles/index.scss diff --git a/react/src/app/styles/variables.scss b/frontend/src/app/styles/variables.scss similarity index 100% rename from react/src/app/styles/variables.scss rename to frontend/src/app/styles/variables.scss diff --git a/react/src/app/utilities/getFileSize.js b/frontend/src/app/utilities/getFileSize.js similarity index 100% rename from react/src/app/utilities/getFileSize.js rename to frontend/src/app/utilities/getFileSize.js diff --git a/react/src/app/utilities/props.js b/frontend/src/app/utilities/props.js similarity index 100% rename from react/src/app/utilities/props.js rename to frontend/src/app/utilities/props.js diff --git a/react/src/app/utilities/reactTable.js b/frontend/src/app/utilities/reactTable.js similarity index 100% rename from react/src/app/utilities/reactTable.js rename to frontend/src/app/utilities/reactTable.js diff --git a/react/src/app/utilities/useAxios.js b/frontend/src/app/utilities/useAxios.js similarity index 90% rename from react/src/app/utilities/useAxios.js rename to frontend/src/app/utilities/useAxios.js index 307a1275..1b932994 100644 --- a/react/src/app/utilities/useAxios.js +++ b/frontend/src/app/utilities/useAxios.js @@ -1,14 +1,14 @@ import axios from 'axios' -import settings from '../settings'; import useKeycloak from './useKeycloak' +import { API_BASE } from '../../config'; const useAxios = (useDefault = false, opts = {}) => { + const keycloak = useKeycloak() if (useDefault) { return axios.create(opts) } - const keycloak = useKeycloak() const instance = axios.create({ - baseURL: settings.API_BASE, + baseURL: API_BASE, ...opts, }) instance.interceptors.request.use(async (config) => { diff --git a/react/src/app/utilities/useKeycloak.js b/frontend/src/app/utilities/useKeycloak.js similarity index 100% rename from react/src/app/utilities/useKeycloak.js rename to frontend/src/app/utilities/useKeycloak.js diff --git a/frontend/src/config.js b/frontend/src/config.js new file mode 100644 index 00000000..5985d014 --- /dev/null +++ b/frontend/src/config.js @@ -0,0 +1,19 @@ +export const ENABLE_KEYCLOAK = window.cthub_config + ? window.cthub_config.REACT_APP_ENABLE_KEYCLOAK + : process.env.REACT_APP_ENABLE_KEYCLOAK; + +export const API_BASE = window.cthub_config + ? window.cthub_config.REACT_APP_API_BASE + : process.env.REACT_APP_API_BASE; + +export const KEYCLOAK_CLIENT_ID = window.cthub_config + ? window.cthub_config.REACT_APP_KEYCLOAK_CLIENT_ID + : process.env.REACT_APP_KEYCLOAK_CLIENT_ID; + +export const KEYCLOAK_REALM = window.cthub_config + ? window.cthub_config.REACT_APP_KEYCLOAK_REALM + : process.env.REACT_APP_KEYCLOAK_REALM; + +export const KEYCLOAK_URL = window.cthub_config + ? window.cthub_config.REACT_APP_KEYCLOAK_URL + : process.env.REACT_APP_KEYCLOAK_URL; diff --git a/react/src/contexts.js b/frontend/src/contexts.js similarity index 100% rename from react/src/contexts.js rename to frontend/src/contexts.js diff --git a/react/src/dashboard/DashboardContainer.js b/frontend/src/dashboard/DashboardContainer.js similarity index 100% rename from react/src/dashboard/DashboardContainer.js rename to frontend/src/dashboard/DashboardContainer.js diff --git a/react/src/dashboard/router.js b/frontend/src/dashboard/router.js similarity index 100% rename from react/src/dashboard/router.js rename to frontend/src/dashboard/router.js diff --git a/react/src/icbc_data/IcbcDataContainer.js b/frontend/src/icbc_data/IcbcDataContainer.js similarity index 100% rename from react/src/icbc_data/IcbcDataContainer.js rename to frontend/src/icbc_data/IcbcDataContainer.js diff --git a/react/src/icbc_data/components/IcbcDataTable.js b/frontend/src/icbc_data/components/IcbcDataTable.js similarity index 100% rename from react/src/icbc_data/components/IcbcDataTable.js rename to frontend/src/icbc_data/components/IcbcDataTable.js diff --git a/react/src/icbc_data/router.js b/frontend/src/icbc_data/router.js similarity index 100% rename from react/src/icbc_data/router.js rename to frontend/src/icbc_data/router.js diff --git a/react/src/icbc_data/routes.js b/frontend/src/icbc_data/routes.js similarity index 100% rename from react/src/icbc_data/routes.js rename to frontend/src/icbc_data/routes.js diff --git a/react/src/index.js b/frontend/src/index.js similarity index 69% rename from react/src/index.js rename to frontend/src/index.js index 504adedd..667ea39e 100644 --- a/react/src/index.js +++ b/frontend/src/index.js @@ -3,12 +3,17 @@ import ReactDOM from 'react-dom' import Keycloak from 'keycloak-js'; import KeycloakProvider from './app/components/KeycloakProvider'; -import App from './App'; +import App from './app/components/App'; import Loading from './app/components/Loading'; +import { KEYCLOAK_CLIENT_ID, KEYCLOAK_REALM, KEYCLOAK_URL } from './config'; import './app/styles/index.scss'; -const keycloak = new Keycloak() +const keycloak = new Keycloak({ + clientId: KEYCLOAK_CLIENT_ID, + realm: KEYCLOAK_REALM, + url: KEYCLOAK_URL +}) const keycloakInitOptions = { onLoad: 'check-sso', pkceMethod: 'S256' diff --git a/react/src/uploads/UploadContainer.js b/frontend/src/uploads/UploadContainer.js similarity index 100% rename from react/src/uploads/UploadContainer.js rename to frontend/src/uploads/UploadContainer.js diff --git a/react/src/uploads/components/FileDrop.js b/frontend/src/uploads/components/FileDrop.js similarity index 100% rename from react/src/uploads/components/FileDrop.js rename to frontend/src/uploads/components/FileDrop.js diff --git a/react/src/uploads/components/FileDropArea.js b/frontend/src/uploads/components/FileDropArea.js similarity index 100% rename from react/src/uploads/components/FileDropArea.js rename to frontend/src/uploads/components/FileDropArea.js diff --git a/react/src/uploads/components/UploadPage.js b/frontend/src/uploads/components/UploadPage.js similarity index 100% rename from react/src/uploads/components/UploadPage.js rename to frontend/src/uploads/components/UploadPage.js diff --git a/react/src/uploads/router.js b/frontend/src/uploads/router.js similarity index 100% rename from react/src/uploads/router.js rename to frontend/src/uploads/router.js diff --git a/react/src/uploads/routes.js b/frontend/src/uploads/routes.js similarity index 100% rename from react/src/uploads/routes.js rename to frontend/src/uploads/routes.js diff --git a/react/src/users/UsersContainer.js b/frontend/src/users/UsersContainer.js similarity index 100% rename from react/src/users/UsersContainer.js rename to frontend/src/users/UsersContainer.js diff --git a/react/src/users/components/UsersPage.js b/frontend/src/users/components/UsersPage.js similarity index 100% rename from react/src/users/components/UsersPage.js rename to frontend/src/users/components/UsersPage.js diff --git a/react/src/users/routes.js b/frontend/src/users/routes.js similarity index 100% rename from react/src/users/routes.js rename to frontend/src/users/routes.js diff --git a/react/.eslintrc.js b/react/.eslintrc.js deleted file mode 100644 index 9343ebd6..00000000 --- a/react/.eslintrc.js +++ /dev/null @@ -1,30 +0,0 @@ -module.exports = { - env: { - browser: true, - es2021: true, - }, - extends: [ - 'plugin:react/recommended', - 'airbnb', - ], - parserOptions: { - ecmaFeatures: { - jsx: true, - }, - ecmaVersion: 12, - sourceType: 'module', - }, - plugins: [ - 'react', - ], - rules: { - 'no-use-before-define': 'off', - 'linebreak-style': 0, - 'react/jsx-filename-extension': [1, { extensions: ['.js', '.jsx'] }], - 'react/jsx-props-no-spreading': [0, { - custom: 'ignore', - html: 'ignore', - explicitSpread: 'ignore', - }], - }, -}; diff --git a/react/README.md b/react/README.md deleted file mode 100644 index 9301f3b6..00000000 --- a/react/README.md +++ /dev/null @@ -1 +0,0 @@ -# Frontend \ No newline at end of file diff --git a/react/public/favicon.ico b/react/public/favicon.ico deleted file mode 100644 index 1841396d21ce974cdec16f7414c88683cd36af16..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22486 zcmeHP3w+Mi|Nm%0{Y#rtDHTchOGGO*gp8KyX)AlyYBgz1S(xaC=tUx`Q^)mPji~xHM83ZEcaO`7=G{Q9@^vV)KGChWnDY2Wx#jIm`8w32 zGEuK5h#I0zfS@RNfur3OcA$SN;TL{TbE3f={b)eYP4uecPI|*}2fg`XJL=!BD-8`G z8rGU|G8riNI(yzkzl{6-( zI*oe751}uO>T(l}ee6aW4?m*ojr3mVwUpHLdYTYgi{5+kCQ1ygjqh71{^=$N>(B>J z-9gFE)Ti;0O)2HMMwIew6Z$Z!38lW!gc4qQfKt0PqmN!{PM`L?hbG6~hwwp4kA0A) z^lC}zz1mQGTrhpyKai3JKTT8ncBENC=ofJolKk848_zCpV#AJ%im`Qg{UP%v5UP7&>uc2$c zvd9m<=6s84EwHHeLW^$s+M*V7w^QA3EUN#lMU56&^va^m)OfK)P2lfYVv&EYMJ<5O#Bp8g4~$oK~> zn>v(MWDcY6vqsR0Y4No5({TvL(#oun^!@Zv^j+py%Ab))`7__AA7;Hrg#TLDs2qW6DNl>D=zdFy|nZ%Ve&q+cvb{YBBITNGt%RWxT$m13!U{%^T$>_EAc!DRTDd0;pbPwb|_clJY^-@cYG&z`%>0bvx30SuWDi^Ag#0I z7d3<&9w6Rv0q#3~gsXkKb>C`(;+t3Xb;0rVZPxY*^HBqdXn%ZFuhxBUO;}s<#h|+A zlb_!eRsA~7%^!6es*95b{e0)n)$QxneXHL6xs2fY*#Ri)+iG@x5x!=%*6=h)zpDB9 zmtCuSfV$N>xTn?Ib?Xjt5B~TDwQjz@l5YO)RfXMxDhsqG4QsVuUhFP2YnD4&TsU*W zf{f00)bR7IJmAOTG)I>zH`i;KIr+^`b8tFQKqNnezexyY0%_l@qG9_?^A;#TWM(nB5Hlef&bq2(goZdH zoX*!`BKzlL#APPOF3d?z&WcINN{ft1c7{eeLn1QVtxF^g)N(|{L`0@%^p8r5i^<4| z>z)2WY)X3f@ZK>XjZI3gDj7~kXm@8wbl;TdsMlg+Qqp6hlVhWj`^Gw)Q6Y|~Os}GL zG$#hb;E0Nibw>A&ib_d~L61UWVID~NRhgd-vX)gr&oyaXwR7X5!b4HY5sr$Xp`nr8D{9+-E6hrc zjfw6a6&V#B8=Jh)B%e1dT)k-F!bJt=!Q;Hu|JIoo{|5Hoje)#V>%g~#cfV8dhIKpF zJ2mc^R{9;P-=Kl6H)sd@?Kvv28jTIQlE!tuh7!75jW=d>dN;TRjpK}21e6j>i{%!YVl-bEk3(3C!oZcZt0IB81n zb~NqP4)odUL6rSwXUZNFLOBCNXvW~KG;_$4G-pI4jUCa8=8o(}^TxbL3nsiw3li~m zoY;@PO@57XKOR7fQwP!0Lx)iK1Yq@}x$Q{LXwv>0Xen-A|kdGop7c}MWZ zJ5s9OdYkdyJF*B!!p?l_g6IYEcd*l)VO z>wC_y4t#XE1k=}I4;O8--%cD-)#pL&%^>-k*6 zT0TBLRp9I&AD?=|rgp!n;d4>l?y7%nb!6#uDd2KG)uS7uigqTgOz6|JZuvT8^PAN1 zXPH*+xRwXtPHNO>l8(gJy`_O)l`7TWO?;r4-wn?^9hop$r%y#I;u1!Uc{{e|b@zNc zHK=9lS6=HgAZfgvVgDv3z8=;m?x8l0sZ$5~w+{^LH=s{alBf__uG>K0J8f>;3u+ z7}%#@#6Yy&HqbxJAoO8~zkg7x;MYRK9DQDQ^a+X!Z57(WKP1(zj^D}QEnD6j6w)dz zFsyfASj&)LfB%+o?p%os3GxqU)griMr{I>s{w)LigUgTu1yTn-65!vmc>w zpuHPO9MC7s5gZmbAjLcNFOFaA?M&_|V72SJiu;}-cN61W_mn~5chcyNl{AlB_~K(8 zsZ3)Zt%f_umAHdkMH3#shDJSpExq@|b>MN|Ldj3x4*qt1`mkF=nh<>tebS>j_}TYU z`YR8BmmNSK4+^Ku*8*|(=mK7KIL#Z~opRF#Q`4^wQ0w`7-8|;I!CP(y-m=^e9s)19 z9eBx)f{*Ng?+QM$+y$O0u;}GghpF3Yi((2rykp@Lzn_{w)Ohc(!t8eKn>AzHI3U31-|PG`GzPG^^U?!KbgLpL>ipaYmmPG@Lf z%LX?GI_uruDBz(WhvTsh0gZ2O;B*GnuGi?kfPmHk_cf}2+hZu#@wQtUH)!0re&btj z>!cBKG`Y2Q?b^58cAsv>ZmLb2K%IBikD>YxApYud#btt{KSdLVG*%ogtr(>rc@Q$Edn zOTNU7GwCMuPB(E0&(4=Sg#7d5>9|#jG~6w$GYowZ-uH76LobG*H^b1UVd&Q|bZ!_r zHw+ychAs_5cZPQquVCn#aH*~@EqJ%>fhD+@d&^m{-L)$2qtq8?wm@Zfo9E8-FDh(`rh<)pA1bUt*jc>2I6bUo-6 zF?4_!`Y^8c4HO%8`SG*yF*w;}%&%S3;Ap6VvX~n}U+5b#^qLrYTnt?=PJO-^kL`R7 zLszYghu#|SCC0m)#|9z9_h@{N?pU^*)EgUgHACNwbKVYR=t?nkh|mu&*e>Lhg>f!D zzT$k{ zFSHZi;WCE)9YcSP^Jl!r&{G07%!l{wVCdK}^xzmeU%VmrGv2fL2j2TrAw!Rfp*zJp z*Dqt}gfVpB7E6uyCoiAf0?L!9< z^tpxVJl!_YTl=qK_p%)@`i$3JXnW#F378>V;sugQnLB||5Tp-0EV+Zde6hu*Nu z=Xv9JuDriCUvL)MLs#-V_*aD2#ZNc*L$~uh`2YRs|7Y=+J$y9Q)kxXH^?v6)HWl|j z9`&To@9jSo3kaJV(FQa3_Vp|mNAr}1FhSWv&ih1 z5*K*lp-+u9*9&`eAs_m6J3|*&w{jAeyr;*$6R{) z6zY7>GX_7Q_wj#re-`|qm&*rnenDrQp>wX!26Ltx+L?1h+J#O!LnhBhe%qnXVRNrA zp}9lJ_p)h28M^4&OI)0N?#mf&JMYlK`M+^#HbY09q3h1jb7$zCGj!_pxn<}p_zcJV zgU&ibkC^wKDrM+>Gjz)tI`=4hxgJ9q^KqRkGj!Y;y6+5~d4_H}Lm!zBox#1&1>744 zi5oBDBKJt~_FY8oiuUcp^KHs2K2~Z07sb$}X6SJ<^rU(6R>jbDXXxT%-c{CjlK6)& zW7r2UbjTU{&3wF+Iq@gO(1B;@!!zs!7<&EuzVlXw&OJ}trWiWi4Bc~vem6sBo7Wvy zs0Unttb6`=;cjCo)VA|YpUQ^vC3H}H8u!2LClt@wW&1_@6zBe`7&3pJxl=Lp&lz?A z3>yQ6O#?%BpTF9x_zda^4CsGzt%Vj}zrbSX>@#%y8G7;z{dCUSp%}6b?X$Kk-gwwz z=!f%$BdCwcHWr4C8YLH%O^zq8EdBynG04DMk1H-ld(tLkDm-AlV(82BdoH-pnP=Dz zFm(7C_67{S5MFagsARXj`IzF)D{cJ$cag=9f3FyJ0Q~xoik~S^`~<$CU(etFrg-*F zi?g=d^$q@p-X^p$v)h(md*0jM2Bw53F$qgcOF?^P$bC2)v=SJZpi|IyiUXG`e(nd` zJ69=&y@RIlTg4VbzN7IMSSK)t(lFOxH^H!>(B+ygwiq@O+-8~LkG3d=zCRD$XwwGz zA9}6|y=1J<{^!Bf?u!SO_Zon%KgP0@p`Xvt3*ph5Y#6i9moESVIyHPVaJuzti|+u< z1vkMF_6K_X8CswVdQQV$L$8DDP|g?Yp&{^31@65t7BVJ63t3Y#cCa_#bi|7g-;VKL z0r=8hc_;Xr@v!5HVVA(az(YSpw;^rGn)(tr3#{Okiu+^ig>D^J*f`1h6#okHVXtr& zESU>}E9_QuIjJY~78=~Y)W#pOK8D^6FU7igyIAq~62*s3GeiET`yu<%TJ#6f#RJ_!EXFa|Q8q<@f0 z@@FV_5cr9og8WTKnPF?fEB4!EC13i#`=nyn-|)+86bpT(qO9x}T`&eW0DcYN(sZ%n z#+VC3(U*#xJ%_O_F_y6X(ep=em-+V?+LAp$d~A{8hp;AKH>2l%B-XdAJJ~;=8>Qni zr=Uv(dfT|BA`W{OUI6@MKa{z$9N#YiXV}&-ZGs&NVHk&T7`*0V=OvclA=&Z2=-!hKRVm(N@j5}n;44WWLcbUhqWx^U^ zhJ6lCaHY>i9oRN$e5DR_)%5(7KFD}Bz&JzaO^=7{c>-J3;eCK9ZC2zY$M$9?VV;a} ztc^JcyDr_X%)NS;k1{7ACui7X@x8f<2V?FE?jM21kfm$-Lq^UK`HJ&EW66_s^bdHU z{ga^a81x6WOgs_uMA{H~z&46+N82}Hyjx(Ol)Xb>!(NPK&Kexg)d!R3!JZj5Mx6YM zjjxQKoNqFg&m9;159c&b_e|hqyjMTNyd8wm$O8?l)VRg zOnaP>vghtSbvBI+96{8?CHy1K)Hio8X+zS)L${Vc$2yb!QN{&!uKYOGitJO+ZPp&P zvD^joJ{rH*rMWaY?&SXWEh7z^^x~v0-?AO^$)>-G)h% zGA3?QM1E~&((QiOwOv^k9GJGGqG`W;+`CclGA7-|?Cc!Vws+{MWBT{!@Hg#U3jcK! zD6bE6>HFVH0nyW{1K$?Dl8zC*BMP$R+=YzzU$x3$bFUZv#R@Nk27g7_i`8!f_pj*F zg|un#xLDzZ;95~xb8?w;s-kk`({*`Q$0exfTk6^QScUF#!es@2|M+1g^6#xhixq6U z7`9#twqCrYXo-UT6~lfCHNaDK*FyyLy-VILp^+V2NGSD0 zo(Eefk6>8c2dHN;tl=JnwqI{v-;?%(kHj`Sv!j{Vz*_H`CGNylZ zzKjR#FBx{50>8p>Gvg|`NK;sJf2ed~UkgIxUM+2lBGX!*svS9w*(ditlha=15O< z`s8uFK17}c+fD_WSdpvg`2yQmwPM<6z5Yel<>awL3U;l~ExAGoUgx5hq_3L!fr9-f z!**B8=xkgyJ+({`cDdL~rprzt%TYVCH^3f~=Z%fgUgGAxV)CU9>^>RxxC}d8h7B&m z=2z20P6I%16%Vjlr{Dx%LUze7fl( z@XY=p`&=pZJMdN&>`fW=rV4hd44YR4n_`A-v4X8H!*&#~z& z-^E*22E@KFFueTTQN&`UajzmcSAAp9@kSqQ%2;KvEyROA4{(-j`CjFs@4FdmwJ zCNvQql!PL0giWsIcMGpxKS-~ql8{y%tZ3bxabSy)Q=zuNf#6~30F8y=c>{{;t`=fbZApH8&^?+Rn3@b}1g zP_d;tgO8fI-BQ9gP1vl|+rSAh)hzI6Ft?QOeBaq%DUshivC2{+V?d=S2&&EvFXpxwYX?6c11jS7EP@QDPiq@TgSQDhXF z=a2rvj!pCVh40t{G&lgBlKs#0+z!3*5gcGsrrreK4m>d>@+R2iqyHA>qostW4cmAn z@~{5uEG2w?*x9RBAYa1Rs%YTe59J4!jBpA&qs4+ zeD4R%rsMjCzaJp{WF>P*=puN?x8ULXRjFzSn6iF^KCmTJB2&`)roe+U`-2I+;Tb$k zsA1#7!;3;3iC8z-OB9dTWbx~m<04lOkH1l%`NJZQ6M3D~*KiP;w#tVFmV|;|MWLQA zO7=VabCtuF-rpgIP-g%L(?#dF0)4MD?U{NSzFo}Sj_Xs3p3`ZhkW)T1g-yEWyKIVl z882l_uHfYD3?3IFEXNM`w|@$s!eh5vN5gOH{r5#&7!QQnmSLBRH&O}1?pEe0W{Rhb_qXV-!5xLNK>uy1DAYis$(^ElUJuM-)v+&|>5^((yS3&}l1 z?gBk=9>LCA?~nHWZ{M9{e}>JsJ{#rC6uktIBg;KUwl6eGm5Bx?kQ)L)sC1MOH3%L6K34H|Lwc9F8;G zKC3-@mTAM_AuVfKL7pNy1EOanvQ3=bZd^@SQ`dxcx~FaV7H8KqeKnz->)HnXaPa>* C+3a-y diff --git a/react/public/index.html b/react/public/index.html deleted file mode 100644 index 1fc8b0df..00000000 --- a/react/public/index.html +++ /dev/null @@ -1,14 +0,0 @@ - - - - App - - - - - - -
- - - \ No newline at end of file diff --git a/react/src/app/settings.js b/react/src/app/settings.js deleted file mode 100644 index 0b1fd4cc..00000000 --- a/react/src/app/settings.js +++ /dev/null @@ -1,11 +0,0 @@ -/* global - __API_BASE__, - __ENABLE_KEYCLOAK__, -*/ - -const settings = { - API_BASE: __API_BASE__, - ENABLE_KEYCLOAK: __ENABLE_KEYCLOAK__, -}; - -export default settings; diff --git a/react/src/keycloak-source.json b/react/src/keycloak-source.json deleted file mode 100644 index 2659d6d3..00000000 --- a/react/src/keycloak-source.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "confidential-port": 0, - "auth-server-url": "https://dev.oidc.gov.bc.ca/auth/", - "realm": "onestopauth", - "ssl-required": "external", - "public-client": true, - "resource": "clean-transportation-data-hub-595" -} \ No newline at end of file diff --git a/react/start.js b/react/start.js deleted file mode 100644 index 6e828a8d..00000000 --- a/react/start.js +++ /dev/null @@ -1,32 +0,0 @@ -const Webpack = require('webpack'); -const WebpackDevServer = require('webpack-dev-server'); -const path = require('path'); - -const webpackConfig = require('./webpack.config'); - -const serverOptions = { - contentBase: path.join(__dirname, 'public'), - publicPath: '/', - index: '/index.html', - disableHostCheck: true, - historyApiFallback: { - verbose: true, - index: '/index.html', - rewrites: [{ - from: /\/api/, - to: '/api', - }], - }, - compress: true, - hot: true, - watchOptions: { - ignored: ['node_modules'], - poll: 1500, - }, -}; - -WebpackDevServer.addDevServerEntrypoints(webpackConfig, serverOptions); -const compiler = Webpack(webpackConfig); -const devServer = new WebpackDevServer(compiler, serverOptions); - -devServer.listen(3000, '0.0.0.0', () => {}); diff --git a/react/webpack.config.js b/react/webpack.config.js deleted file mode 100644 index eed6da6c..00000000 --- a/react/webpack.config.js +++ /dev/null @@ -1,114 +0,0 @@ -const Webpack = require('webpack'); -const path = require('path'); -const CopyWebpackPlugin = require('copy-webpack-plugin'); - -const isDevelopment = 'API_BASE' in process.env && process.env.API_BASE === 'http://localhost:8000'; - -module.exports = { - devServer: { - historyApiFallback: true, - hot: isDevelopment, - inline: isDevelopment - }, - devtool: 'source-map', - resolve: { - alias: { - process: 'process/browser', - }, - fallback: { - crypto: require.resolve('crypto-browserify'), - stream: require.resolve('stream-browserify'), - util: require.resolve('util/'), - }, - }, - entry: path.resolve(__dirname, 'src', 'index.js'), - mode: isDevelopment ? 'development' : 'production', - module: { - rules: [{ - enforce: 'pre', - test: /\.js$/, - use: [{ - loader: 'source-map-loader', - options: { - filterSourceMappingUrl: (url, resourcePath) => { - if (resourcePath.includes('/node_modules/')) { - return false; - } - - return true; - }, - }, - }], - }, { - exclude: /node_modules/, - include: path.resolve(__dirname, 'src'), - test: /\.(jsx|js)$/, - use: [{ - loader: 'babel-loader', - options: { - presets: [ - ['@babel/preset-env', { - targets: 'defaults', - }], - '@babel/preset-react', - ], - }, - }], - }, { - test: /\.css$/i, - use: ['style-loader', 'css-loader'], - }, { - test: /\.s[ac]ss$/i, - use: ['style-loader', 'css-loader', 'sass-loader'], - }, { - test: /\.(otf|eot|svg|png|ttf|woff|jpe?g|woff2)$/i, - use: [{ - loader: 'url-loader', - }], - }], - }, - output: { - crossOriginLoading: 'anonymous', - filename: 'static/bundle.js', - path: path.resolve(__dirname, 'public'), - publicPath: '/', - }, - plugins: [ - new Webpack.ProvidePlugin({ - Buffer: ['buffer', 'Buffer'], - process: 'process/browser', - }), - new CopyWebpackPlugin({ - patterns: [{ - from: './src/keycloak-source.json', - to: './keycloak.json', - force: true, - transform(content) { - const keycloak = JSON.parse(content.toString()); - - if ('KEYCLOAK_CLIENT_ID' in process.env) { - keycloak.resource = process.env.KEYCLOAK_CLIENT_ID; - } - - if ('KEYCLOAK_REALM' in process.env) { - keycloak.realm = process.env.KEYCLOAK_REALM; - } - - if ('KEYCLOAK_URL' in process.env) { - keycloak['auth-server-url'] = process.env.KEYCLOAK_URL; - - if (process.env.KEYCLOAK_URL === 'http://keycloak:8080/auth/') { - keycloak['ssl-required'] = 'none'; - } - } - - return JSON.stringify(keycloak, null, 2); - }, - }], - }), - new Webpack.DefinePlugin({ - __API_BASE__: 'API_BASE' in process.env ? JSON.stringify(process.env.API_BASE) : JSON.stringify('/'), - __ENABLE_KEYCLOAK__: 'ENABLE_KEYCLOAK' in process.env ? process.env.ENABLE_KEYCLOAK === 'true' : false, - }), - ], -}; From 0ff41502144f46edea40332185d0409ad62853e4 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Fri, 22 Mar 2024 11:52:12 -0700 Subject: [PATCH 086/152] add --force for npm or it will fail --- frontend/Dockerfile-Openshift | 2 +- openshift/templates/frontend/frontend-bc-docker.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frontend/Dockerfile-Openshift b/frontend/Dockerfile-Openshift index ed6deb62..f95636ce 100644 --- a/frontend/Dockerfile-Openshift +++ b/frontend/Dockerfile-Openshift @@ -10,7 +10,7 @@ FROM artifacts.developer.gov.bc.ca/docker-remote/node:16.13 as builder WORKDIR /usr/src/app COPY ./ ./ # RUN npm install -g npm@latest doesn't work for node 16 -RUN npm install +RUN npm install --force RUN pwd && \ ls -l && \ ls -l node_modules diff --git a/openshift/templates/frontend/frontend-bc-docker.yaml b/openshift/templates/frontend/frontend-bc-docker.yaml index 8b93b0dc..d20a9b33 100644 --- a/openshift/templates/frontend/frontend-bc-docker.yaml +++ b/openshift/templates/frontend/frontend-bc-docker.yaml @@ -65,7 +65,7 @@ objects: ref: ${GIT_REF} uri: ${GIT_URL} type: Git - contextDir: react + contextDir: frontend strategy: dockerStrategy: dockerfilePath: ./Dockerfile-Openshift From 50f1ffd7bf55bbb4981f145e0183a21a50581ef5 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Fri, 22 Mar 2024 12:16:57 -0700 Subject: [PATCH 087/152] use node 20 --- frontend/Dockerfile-Openshift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/Dockerfile-Openshift b/frontend/Dockerfile-Openshift index f95636ce..c58d43a9 100644 --- a/frontend/Dockerfile-Openshift +++ b/frontend/Dockerfile-Openshift @@ -6,7 +6,7 @@ # && npm install --omit=dev \ # && npm install -D webpack webpack-cli # RUN yes | npm run dist -FROM artifacts.developer.gov.bc.ca/docker-remote/node:16.13 as builder +FROM artifacts.developer.gov.bc.ca/docker-remote/node:20 as builder WORKDIR /usr/src/app COPY ./ ./ # RUN npm install -g npm@latest doesn't work for node 16 From b2a6368b270afaadd8d62687fe9fc8ff33acb9d2 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Fri, 22 Mar 2024 13:50:14 -0700 Subject: [PATCH 088/152] update oc-login --- .github/workflows/build-release.yaml | 12 ++++++------ .github/workflows/dev-ci.yaml | 2 +- .github/workflows/test-ci.yaml | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build-release.yaml b/.github/workflows/build-release.yaml index 130ff17d..1d73d2dd 100644 --- a/.github/workflows/build-release.yaml +++ b/.github/workflows/build-release.yaml @@ -3,8 +3,8 @@ name: CTHUB v0.2.0 on: - push: - branches: [ release-0.2.0 ] + # push: + # branches: [ release-0.2.0 ] workflow_dispatch: workflow_call: @@ -38,7 +38,7 @@ jobs: ## Log in to Openshift with a token of service account - name: Log in to Openshift - uses: redhat-actions/oc-login@v1.2 + uses: redhat-actions/oc-login@v1.3 with: openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} @@ -65,7 +65,7 @@ jobs: uses: actions/checkout@v3 - name: Log in to Openshift - uses: redhat-actions/oc-login@v1.2 + uses: redhat-actions/oc-login@v1.3 with: openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} @@ -92,7 +92,7 @@ jobs: uses: actions/checkout@v3 - name: Log in to Openshift - uses: redhat-actions/oc-login@v1.2 + uses: redhat-actions/oc-login@v1.3 with: openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} @@ -127,7 +127,7 @@ jobs: uses: actions/checkout@v3 - name: Log in to Openshift - uses: redhat-actions/oc-login@v1.2 + uses: redhat-actions/oc-login@v1.3 with: openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} diff --git a/.github/workflows/dev-ci.yaml b/.github/workflows/dev-ci.yaml index bcc52797..1cad9e78 100644 --- a/.github/workflows/dev-ci.yaml +++ b/.github/workflows/dev-ci.yaml @@ -50,7 +50,7 @@ jobs: uses: actions/checkout@v4.1.1 - name: Log in to Openshift - uses: redhat-actions/oc-login@v1.2 + uses: redhat-actions/oc-login@v1.3 with: openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} diff --git a/.github/workflows/test-ci.yaml b/.github/workflows/test-ci.yaml index 51532e78..cb6f4a45 100644 --- a/.github/workflows/test-ci.yaml +++ b/.github/workflows/test-ci.yaml @@ -45,7 +45,7 @@ jobs: uses: actions/checkout@v4.1.1 - name: Log in to Openshift - uses: redhat-actions/oc-login@v1.2 + uses: redhat-actions/oc-login@v1.3 with: openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} From b906d15ae5b530b9c2a1099808b82303ac2306e9 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 26 Mar 2024 11:35:41 -0700 Subject: [PATCH 089/152] add noCache forcePull for frontend build --- openshift/templates/frontend/frontend-bc-docker.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/openshift/templates/frontend/frontend-bc-docker.yaml b/openshift/templates/frontend/frontend-bc-docker.yaml index d20a9b33..50bb040b 100644 --- a/openshift/templates/frontend/frontend-bc-docker.yaml +++ b/openshift/templates/frontend/frontend-bc-docker.yaml @@ -80,7 +80,9 @@ objects: secretKeyRef: name: artifacts-default-idxprm key: password - type: Docker + type: Docker + noCache: true + forcePull: true successfulBuildsHistoryLimit: 5 triggers: - imageChange: {} From 7ac5ef1bf9b3f699760495b39a0a6811e9479f2f Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 26 Mar 2024 12:32:09 -0700 Subject: [PATCH 090/152] only build backend --- .github/workflows/dev-ci.yaml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/dev-ci.yaml b/.github/workflows/dev-ci.yaml index 1cad9e78..9c82116e 100644 --- a/.github/workflows/dev-ci.yaml +++ b/.github/workflows/dev-ci.yaml @@ -57,13 +57,13 @@ jobs: insecure_skip_tls_verify: true namespace: ${{ env.TOOLS_NAMESPACE }} - - name: Build CTHUB Backend - run: | - cd openshift/templates/backend - oc process -f ./backend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} - sleep 5s - oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 - oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} + # - name: Build CTHUB Backend + # run: | + # cd openshift/templates/backend + # oc process -f ./backend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + # sleep 5s + # oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 + # oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - name: Build CTHUB Frontend run: | @@ -97,10 +97,10 @@ jobs: with: cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml - - name: Update backend tag - uses: mikefarah/yq@v4.40.5 - with: - cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + # - name: Update backend tag + # uses: mikefarah/yq@v4.40.5 + # with: + # cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml - name: GitHub Commit & Push run: | From 0ab45cdef9bbcd8c02d2d6420e725391248c33e5 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 26 Mar 2024 13:32:29 -0700 Subject: [PATCH 091/152] updtae frontend docker build --- openshift/templates/frontend/frontend-bc-docker.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openshift/templates/frontend/frontend-bc-docker.yaml b/openshift/templates/frontend/frontend-bc-docker.yaml index 50bb040b..4a1146d2 100644 --- a/openshift/templates/frontend/frontend-bc-docker.yaml +++ b/openshift/templates/frontend/frontend-bc-docker.yaml @@ -80,9 +80,9 @@ objects: secretKeyRef: name: artifacts-default-idxprm key: password + noCache: true + forcePull: true type: Docker - noCache: true - forcePull: true successfulBuildsHistoryLimit: 5 triggers: - imageChange: {} From 135dc51b275cae13e19792d81604079adc8e43c9 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 26 Mar 2024 14:14:13 -0700 Subject: [PATCH 092/152] update docker build --- frontend/Dockerfile-Openshift | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/frontend/Dockerfile-Openshift b/frontend/Dockerfile-Openshift index c58d43a9..a20d497b 100644 --- a/frontend/Dockerfile-Openshift +++ b/frontend/Dockerfile-Openshift @@ -16,11 +16,11 @@ RUN pwd && \ ls -l node_modules RUN yes | npm run build -# Stage 2: Copy the JS React SPA into the Nginx HTML directory -FROM artifacts.developer.gov.bc.ca/docker-remote/bitnami/nginx:1.24.0 -COPY ./nginx.conf /opt/bitnami/nginx/conf/ -COPY --from=builder /usr/src/app/public /app -RUN ls -l /app -EXPOSE 8080 -CMD ["nginx", "-g", "daemon off;"] +# # Stage 2: Copy the JS React SPA into the Nginx HTML directory +# FROM artifacts.developer.gov.bc.ca/docker-remote/bitnami/nginx:1.24.0 +# COPY ./nginx.conf /opt/bitnami/nginx/conf/ +# COPY --from=builder /usr/src/app/public /app +# RUN ls -l /app +# EXPOSE 8080 +# CMD ["nginx", "-g", "daemon off;"] From abb058c780fecb71e3033377caa36ce96cd68790 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 26 Mar 2024 14:25:34 -0700 Subject: [PATCH 093/152] update build folder --- frontend/Dockerfile-Openshift | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/frontend/Dockerfile-Openshift b/frontend/Dockerfile-Openshift index a20d497b..13f50d19 100644 --- a/frontend/Dockerfile-Openshift +++ b/frontend/Dockerfile-Openshift @@ -16,11 +16,11 @@ RUN pwd && \ ls -l node_modules RUN yes | npm run build -# # Stage 2: Copy the JS React SPA into the Nginx HTML directory -# FROM artifacts.developer.gov.bc.ca/docker-remote/bitnami/nginx:1.24.0 -# COPY ./nginx.conf /opt/bitnami/nginx/conf/ -# COPY --from=builder /usr/src/app/public /app -# RUN ls -l /app -# EXPOSE 8080 -# CMD ["nginx", "-g", "daemon off;"] +# Stage 2: Copy the JS React SPA into the Nginx HTML directory +FROM artifacts.developer.gov.bc.ca/docker-remote/bitnami/nginx:1.24.0 +COPY ./nginx.conf /opt/bitnami/nginx/conf/ +COPY --from=builder /usr/src/app/build /app +RUN ls -l /app +EXPOSE 8080 +CMD ["nginx", "-g", "daemon off;"] From f0d49c1f5964b8511af790c3ecd1e535a54590f1 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 26 Mar 2024 15:24:17 -0700 Subject: [PATCH 094/152] open the backend build --- .github/workflows/dev-ci.yaml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/dev-ci.yaml b/.github/workflows/dev-ci.yaml index 9c82116e..1cad9e78 100644 --- a/.github/workflows/dev-ci.yaml +++ b/.github/workflows/dev-ci.yaml @@ -57,13 +57,13 @@ jobs: insecure_skip_tls_verify: true namespace: ${{ env.TOOLS_NAMESPACE }} - # - name: Build CTHUB Backend - # run: | - # cd openshift/templates/backend - # oc process -f ./backend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} - # sleep 5s - # oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 - # oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} + - name: Build CTHUB Backend + run: | + cd openshift/templates/backend + oc process -f ./backend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + sleep 5s + oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 + oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - name: Build CTHUB Frontend run: | @@ -97,10 +97,10 @@ jobs: with: cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml - # - name: Update backend tag - # uses: mikefarah/yq@v4.40.5 - # with: - # cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + - name: Update backend tag + uses: mikefarah/yq@v4.40.5 + with: + cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml - name: GitHub Commit & Push run: | From a6d5ba6ba58abbf1bb408b60390c20db4be276ec Mon Sep 17 00:00:00 2001 From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Date: Tue, 26 Mar 2024 15:55:13 -0700 Subject: [PATCH 095/152] Add Superset (#123) --- .../templates/redis/cthub-dev-values.yaml | 1935 +++++++++++++++++ .../templates/redis/cthub-test-values.yaml | 1935 +++++++++++++++++ openshift/templates/redis/readme.md | 18 + .../templates/redis/the-default-values.yaml | 1935 +++++++++++++++++ openshift/templates/superset/Dockerfile | 7 - .../superset-bc.yaml} | 34 +- openshift/templates/superset/README.md | 10 - .../superset/cthub-superset-dev.yaml | 888 ++++++++ .../superset/cthub-superset-test.yaml | 867 ++++++++ .../templates/superset/docker/Dockerfile | 5 + openshift/templates/superset/readme.md | 28 + openshift/templates/superset/redis/Dockerfile | 1 - .../templates/superset/redis/redis-dc.yaml | 126 -- .../superset/redis/superset-bc-redis.yaml | 55 - openshift/templates/superset/superset-bc.yaml | 49 - .../templates/superset/superset-dc-redis.yaml | 126 -- .../templates/superset/superset/Dockerfile | 10 - .../superset/superset/scripts/README.md | 75 - .../superset/scripts/docker-bootstrap.sh | 51 - .../superset/superset/scripts/docker-ci.sh | 26 - .../superset/scripts/docker-frontend.sh | 26 - .../superset/superset/scripts/docker-init.sh | 79 - .../superset/scripts/frontend-mem-nag.sh | 49 - .../scripts/pythonpath_dev/.gitignore | 23 - .../scripts/pythonpath_dev/superset_config.py | 114 - .../superset_config_local.example | 27 - .../superset/superset/scripts/run-server.sh | 32 - .../superset/superset/superset-bc.yaml | 55 - .../superset/superset/superset-dc-beat.yaml | 135 -- .../superset/superset-dc-superset.yaml | 213 -- .../superset/superset/superset-dc-worker.yaml | 132 -- .../superset/superset/superset-dc.yaml | 195 -- .../superset/the-default-values.yaml | 815 +++++++ 33 files changed, 8444 insertions(+), 1632 deletions(-) create mode 100644 openshift/templates/redis/cthub-dev-values.yaml create mode 100644 openshift/templates/redis/cthub-test-values.yaml create mode 100644 openshift/templates/redis/readme.md create mode 100644 openshift/templates/redis/the-default-values.yaml delete mode 100644 openshift/templates/superset/Dockerfile rename openshift/templates/superset/{superset/superset-bc-superset.yaml => Openshift/superset-bc.yaml} (58%) delete mode 100644 openshift/templates/superset/README.md create mode 100644 openshift/templates/superset/cthub-superset-dev.yaml create mode 100644 openshift/templates/superset/cthub-superset-test.yaml create mode 100644 openshift/templates/superset/docker/Dockerfile create mode 100644 openshift/templates/superset/readme.md delete mode 100644 openshift/templates/superset/redis/Dockerfile delete mode 100644 openshift/templates/superset/redis/redis-dc.yaml delete mode 100644 openshift/templates/superset/redis/superset-bc-redis.yaml delete mode 100644 openshift/templates/superset/superset-bc.yaml delete mode 100644 openshift/templates/superset/superset-dc-redis.yaml delete mode 100644 openshift/templates/superset/superset/Dockerfile delete mode 100644 openshift/templates/superset/superset/scripts/README.md delete mode 100755 openshift/templates/superset/superset/scripts/docker-bootstrap.sh delete mode 100755 openshift/templates/superset/superset/scripts/docker-ci.sh delete mode 100755 openshift/templates/superset/superset/scripts/docker-frontend.sh delete mode 100755 openshift/templates/superset/superset/scripts/docker-init.sh delete mode 100755 openshift/templates/superset/superset/scripts/frontend-mem-nag.sh delete mode 100644 openshift/templates/superset/superset/scripts/pythonpath_dev/.gitignore delete mode 100644 openshift/templates/superset/superset/scripts/pythonpath_dev/superset_config.py delete mode 100644 openshift/templates/superset/superset/scripts/pythonpath_dev/superset_config_local.example delete mode 100644 openshift/templates/superset/superset/scripts/run-server.sh delete mode 100644 openshift/templates/superset/superset/superset-bc.yaml delete mode 100644 openshift/templates/superset/superset/superset-dc-beat.yaml delete mode 100644 openshift/templates/superset/superset/superset-dc-superset.yaml delete mode 100644 openshift/templates/superset/superset/superset-dc-worker.yaml delete mode 100644 openshift/templates/superset/superset/superset-dc.yaml create mode 100644 openshift/templates/superset/the-default-values.yaml diff --git a/openshift/templates/redis/cthub-dev-values.yaml b/openshift/templates/redis/cthub-dev-values.yaml new file mode 100644 index 00000000..315dde91 --- /dev/null +++ b/openshift/templates/redis/cthub-dev-values.yaml @@ -0,0 +1,1935 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.redis.password Global Redis® password (overrides `auth.password`) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "netapp-block-standard" + redis: + password: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "cthub-redis-dev" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param secretAnnotations Annotations to add to secret +## +secretAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param useHostnames Use hostnames internally when announcing replication. If false, the hostname will be resolved to an IP address +## +useHostnames: true +## @param nameResolutionThreshold Failure threshold for internal hostnames resolution +## +nameResolutionThreshold: 5 +## @param nameResolutionTimeout Timeout seconds between probes for internal hostnames resolution +## +nameResolutionTimeout: 5 + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section Redis® Image parameters +## + +## Bitnami Redis® image +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## @param image.registry [default: REGISTRY_NAME] Redis® image registry +## @param image.repository [default: REPOSITORY_NAME/redis] Redis® image repository +## @skip image.tag Redis® image tag (immutable tags are recommended) +## @param image.digest Redis® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy Redis® image pull policy +## @param image.pullSecrets Redis® image pull secrets +## @param image.debug Enable image debug mode +## +image: + registry: docker.io + repository: bitnami/redis + tag: 7.2.2-debian-11-r0 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + +## @section Redis® common configuration parameters +## https://github.com/bitnami/containers/tree/main/bitnami/redis#configuration +## + +## @param architecture Redis® architecture. Allowed values: `standalone` or `replication` +## +architecture: replication +## Redis® Authentication parameters +## ref: https://github.com/bitnami/containers/tree/main/bitnami/redis#setting-the-server-password-on-first-run +## +auth: + ## @param auth.enabled Enable password authentication + ## + enabled: true + ## @param auth.sentinel Enable password authentication on sentinels too + ## + sentinel: true + ## @param auth.password Redis® password + ## Defaults to a random 10-character alphanumeric string if not set + ## + password: "" + ## @param auth.existingSecret The name of an existing secret with Redis® credentials + ## NOTE: When it's set, the previous `auth.password` parameter is ignored + ## + existingSecret: "cthub-redis-dev-secret" + ## @param auth.existingSecretPasswordKey Password key to be retrieved from existing secret + ## NOTE: ignored unless `auth.existingSecret` parameter is set + ## + existingSecretPasswordKey: "redis-password" + ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable + ## + usePasswordFiles: false + +## @param commonConfiguration [string] Common configuration to be added into the ConfigMap +## ref: https://redis.io/topics/config +## +commonConfiguration: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" +## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for Redis® nodes +## +existingConfigmap: "" + +## @section Redis® master configuration parameters +## + +master: + ## @param master.count Number of Redis® master instances to deploy (experimental, requires additional configuration) + ## + count: 1 + ## @param master.configuration Configuration for Redis® master nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param master.disableCommands Array with Redis® commands to disable on master nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + ## @param master.command Override default container command (useful when using custom images) + ## + command: [] + ## @param master.args Override default container args (useful when using custom images) + ## + args: [] + ## @param master.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## + enableServiceLinks: true + ## @param master.preExecCmds Additional commands to run prior to starting Redis® master + ## + preExecCmds: [] + ## @param master.extraFlags Array with additional command line flags for Redis® master + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param master.extraEnvVars Array with extra environment variables to add to Redis® master nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® master nodes + ## + extraEnvVarsCM: "" + ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® master nodes + ## + extraEnvVarsSecret: "" + ## @param master.containerPorts.redis Container port to open on Redis® master nodes + ## + containerPorts: + redis: 6379 + ## Configure extra options for Redis® containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param master.startupProbe.enabled Enable startupProbe on Redis® master nodes + ## @param master.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param master.startupProbe.periodSeconds Period seconds for startupProbe + ## @param master.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param master.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param master.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.livenessProbe.enabled Enable livenessProbe on Redis® master nodes + ## @param master.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param master.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param master.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param master.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param master.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.readinessProbe.enabled Enable readinessProbe on Redis® master nodes + ## @param master.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param master.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param master.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param master.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param master.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param master.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param master.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param master.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis® master resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param master.resources.limits The resources limits for the Redis® master containers + ## @param master.resources.requests The requested resources for the Redis® master containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.podSecurityContext.enabled Enabled Redis® master pods' Security Context + ## @param master.podSecurityContext.fsGroup Set Redis® master pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1010180000 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.containerSecurityContext.enabled Enabled Redis® master containers' Security Context + ## @param master.containerSecurityContext.runAsUser Set Redis® master containers' Security Context runAsUser + ## @param master.containerSecurityContext.runAsGroup Set Redis® master containers' Security Context runAsGroup + ## @param master.containerSecurityContext.runAsNonRoot Set Redis® master containers' Security Context runAsNonRoot + ## @param master.containerSecurityContext.allowPrivilegeEscalation Is it possible to escalate Redis® pod(s) privileges + ## @param master.containerSecurityContext.seccompProfile.type Set Redis® master containers' Security Context seccompProfile + ## @param master.containerSecurityContext.capabilities.drop Set Redis® master containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1010180000 + runAsGroup: 1010180000 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param master.kind Use either Deployment or StatefulSet (default) + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ + ## + kind: StatefulSet + ## @param master.schedulerName Alternate scheduler for Redis® master pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param master.updateStrategy.type Redis® master statefulset strategy type + ## @skip master.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment) + ## + type: RollingUpdate + ## @param master.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update + ## + minReadySeconds: 0 + ## @param master.priorityClassName Redis® master pods' priorityClassName + ## + priorityClassName: "" + ## @param master.hostAliases Redis® master pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param master.podLabels Extra labels for Redis® master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param master.podAnnotations Annotations for Redis® master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param master.shareProcessNamespace Share a single process namespace between all of the containers in Redis® master pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node master.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set + ## + key: "" + ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param master.affinity Affinity for Redis® master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param master.nodeSelector Node labels for Redis® master pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param master.tolerations Tolerations for Redis® master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param master.topologySpreadConstraints Spread Constraints for Redis® master pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: [] + ## @param master.dnsPolicy DNS Policy for Redis® master pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsPolicy: ClusterFirst + ## + dnsPolicy: "" + ## @param master.dnsConfig DNS Configuration for Redis® master pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsConfig: + ## options: + ## - name: ndots + ## value: "4" + ## - name: single-request-reopen + ## + dnsConfig: {} + ## @param master.lifecycleHooks for the Redis® master container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param master.extraVolumes Optionally specify extra list of additional volumes for the Redis® master pod(s) + ## + extraVolumes: [] + ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® master container(s) + ## + extraVolumeMounts: [] + ## @param master.sidecars Add additional sidecar containers to the Redis® master pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param master.initContainers Add additional init containers to the Redis® master pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence on Redis® master nodes using Persistent Volume Claims + ## + enabled: true + ## @param master.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param master.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## @param master.persistence.path The path the volume will be mounted at on Redis® master containers + ## NOTE: Useful when using different Redis® images + ## + path: /data + ## @param master.persistence.subPath The subdirectory of the volume to mount on Redis® master containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param master.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® master containers + ## + subPathExpr: "" + ## @param master.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume size + ## + size: 2Gi + ## @param master.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param master.persistence.labels Additional custom labels for the PVC + ## + labels: {} + ## @param master.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param master.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param master.persistence.existingClaim Use a existing PVC which must be created manually before bound + ## NOTE: requires master.persistence.enabled: true + ## + existingClaim: "" + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param master.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param master.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param master.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: false + whenScaled: Retain + whenDeleted: Retain + ## Redis® master service parameters + ## + service: + ## @param master.service.type Redis® master service type + ## + type: ClusterIP + ## @param master.service.ports.redis Redis® master service port + ## + ports: + redis: 6379 + ## @param master.service.nodePorts.redis Node port for Redis® master + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + redis: "" + ## @param master.service.externalTrafficPolicy Redis® master service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param master.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param master.service.internalTrafficPolicy Redis® master service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ + ## + internalTrafficPolicy: Cluster + ## @param master.service.clusterIP Redis® master service Cluster IP + ## + clusterIP: "" + ## @param master.service.loadBalancerIP Redis® master service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param master.service.loadBalancerSourceRanges Redis® master service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param master.service.externalIPs Redis® master service External IPs + ## https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## e.g. + ## externalIPs: + ## - 10.10.10.1 + ## - 201.22.30.1 + ## + externalIPs: [] + ## @param master.service.annotations Additional custom annotations for Redis® master service + ## + annotations: {} + ## @param master.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param master.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param master.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-master pods + ## + terminationGracePeriodSeconds: 30 + ## ServiceAccount configuration + ## + serviceAccount: + ## @param master.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: false + ## @param master.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param master.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param master.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + +## @section Redis® replicas configuration parameters +## + +replica: + ## @param replica.replicaCount Number of Redis® replicas to deploy + ## + replicaCount: 2 + ## @param replica.configuration Configuration for Redis® replicas nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param replica.disableCommands Array with Redis® commands to disable on replicas nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + ## @param replica.command Override default container command (useful when using custom images) + ## + command: [] + ## @param replica.args Override default container args (useful when using custom images) + ## + args: [] + ## @param replica.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## + enableServiceLinks: true + ## @param replica.preExecCmds Additional commands to run prior to starting Redis® replicas + ## + preExecCmds: [] + ## @param replica.extraFlags Array with additional command line flags for Redis® replicas + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param replica.extraEnvVars Array with extra environment variables to add to Redis® replicas nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param replica.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® replicas nodes + ## + extraEnvVarsCM: "" + ## @param replica.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® replicas nodes + ## + extraEnvVarsSecret: "" + ## @param replica.externalMaster.enabled Use external master for bootstrapping + ## @param replica.externalMaster.host External master host to bootstrap from + ## @param replica.externalMaster.port Port for Redis service external master host + ## + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param replica.containerPorts.redis Container port to open on Redis® replicas nodes + ## + containerPorts: + redis: 6379 + ## Configure extra options for Redis® containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param replica.startupProbe.enabled Enable startupProbe on Redis® replicas nodes + ## @param replica.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param replica.startupProbe.periodSeconds Period seconds for startupProbe + ## @param replica.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param replica.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param replica.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 22 + ## @param replica.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes + ## @param replica.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param replica.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param replica.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param replica.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param replica.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes + ## @param replica.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param replica.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param replica.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param replica.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param replica.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param replica.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param replica.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis® replicas resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param replica.resources.limits The resources limits for the Redis® replicas containers + ## @param replica.resources.requests The requested resources for the Redis® replicas containers + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: {} + # cpu: 250m + # memory: 256Mi + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.podSecurityContext.enabled Enabled Redis® replicas pods' Security Context + ## @param replica.podSecurityContext.fsGroup Set Redis® replicas pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1010180000 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.containerSecurityContext.enabled Enabled Redis® replicas containers' Security Context + ## @param replica.containerSecurityContext.runAsUser Set Redis® replicas containers' Security Context runAsUser + ## @param replica.containerSecurityContext.runAsGroup Set Redis® replicas containers' Security Context runAsGroup + ## @param replica.containerSecurityContext.runAsNonRoot Set Redis® replicas containers' Security Context runAsNonRoot + ## @param replica.containerSecurityContext.allowPrivilegeEscalation Set Redis® replicas pod's Security Context allowPrivilegeEscalation + ## @param replica.containerSecurityContext.seccompProfile.type Set Redis® replicas containers' Security Context seccompProfile + ## @param replica.containerSecurityContext.capabilities.drop Set Redis® replicas containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1010180000 + runAsGroup: 1010180000 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param replica.schedulerName Alternate scheduler for Redis® replicas pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param replica.updateStrategy.type Redis® replicas statefulset strategy type + ## @skip replica.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment) + ## + type: RollingUpdate + ## @param replica.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update + ## + minReadySeconds: 0 + ## @param replica.priorityClassName Redis® replicas pods' priorityClassName + ## + priorityClassName: "" + ## @param replica.podManagementPolicy podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "" + ## @param replica.hostAliases Redis® replicas pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param replica.podLabels Extra labels for Redis® replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param replica.podAnnotations Annotations for Redis® replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param replica.shareProcessNamespace Share a single process namespace between all of the containers in Redis® replicas pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param replica.podAffinityPreset Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param replica.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param replica.nodeAffinityPreset.type Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param replica.nodeAffinityPreset.key Node label key to match. Ignored if `replica.affinity` is set + ## + key: "" + ## @param replica.nodeAffinityPreset.values Node label values to match. Ignored if `replica.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param replica.affinity Affinity for Redis® replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `replica.podAffinityPreset`, `replica.podAntiAffinityPreset`, and `replica.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param replica.nodeSelector Node labels for Redis® replicas pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param replica.tolerations Tolerations for Redis® replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param replica.topologySpreadConstraints Spread Constraints for Redis® replicas pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: [] + ## @param replica.dnsPolicy DNS Policy for Redis® replica pods + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsPolicy: ClusterFirst + ## + dnsPolicy: "" + ## @param replica.dnsConfig DNS Configuration for Redis® replica pods + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsConfig: + ## options: + ## - name: ndots + ## value: "4" + ## - name: single-request-reopen + ## + dnsConfig: {} + ## @param replica.lifecycleHooks for the Redis® replica container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param replica.extraVolumes Optionally specify extra list of additional volumes for the Redis® replicas pod(s) + ## + extraVolumes: [] + ## @param replica.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® replicas container(s) + ## + extraVolumeMounts: [] + ## @param replica.sidecars Add additional sidecar containers to the Redis® replicas pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param replica.initContainers Add additional init containers to the Redis® replicas pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence Parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param replica.persistence.enabled Enable persistence on Redis® replicas nodes using Persistent Volume Claims + ## + enabled: true + ## @param replica.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param replica.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## @param replica.persistence.path The path the volume will be mounted at on Redis® replicas containers + ## NOTE: Useful when using different Redis® images + ## + path: /data + ## @param replica.persistence.subPath The subdirectory of the volume to mount on Redis® replicas containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param replica.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® replicas containers + ## + subPathExpr: "" + ## @param replica.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param replica.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param replica.persistence.size Persistent Volume size + ## + size: 2Gi + ## @param replica.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param replica.persistence.labels Additional custom labels for the PVC + ## + labels: {} + ## @param replica.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param replica.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param replica.persistence.existingClaim Use a existing PVC which must be created manually before bound + ## NOTE: requires replica.persistence.enabled: true + ## + existingClaim: "" + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param replica.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param replica.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param replica.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: false + whenScaled: Retain + whenDeleted: Retain + ## Redis® replicas service parameters + ## + service: + ## @param replica.service.type Redis® replicas service type + ## + type: ClusterIP + ## @param replica.service.ports.redis Redis® replicas service port + ## + ports: + redis: 6379 + ## @param replica.service.nodePorts.redis Node port for Redis® replicas + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + redis: "" + ## @param replica.service.externalTrafficPolicy Redis® replicas service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param replica.service.internalTrafficPolicy Redis® replicas service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ + ## + internalTrafficPolicy: Cluster + ## @param replica.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param replica.service.clusterIP Redis® replicas service Cluster IP + ## + clusterIP: "" + ## @param replica.service.loadBalancerIP Redis® replicas service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param replica.service.loadBalancerSourceRanges Redis® replicas service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param replica.service.annotations Additional custom annotations for Redis® replicas service + ## + annotations: {} + ## @param replica.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param replica.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param replica.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-replicas pods + ## + terminationGracePeriodSeconds: 30 + ## Autoscaling configuration + ## + autoscaling: + ## @param replica.autoscaling.enabled Enable replica autoscaling settings + ## + enabled: false + ## @param replica.autoscaling.minReplicas Minimum replicas for the pod autoscaling + ## + minReplicas: 1 + ## @param replica.autoscaling.maxReplicas Maximum replicas for the pod autoscaling + ## + maxReplicas: 11 + ## @param replica.autoscaling.targetCPU Percentage of CPU to consider when autoscaling + ## + targetCPU: "" + ## @param replica.autoscaling.targetMemory Percentage of Memory to consider when autoscaling + ## + targetMemory: "" + ## ServiceAccount configuration + ## + serviceAccount: + ## @param replica.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: false + ## @param replica.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param replica.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param replica.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## @section Redis® Sentinel configuration parameters +## + +sentinel: + ## @param sentinel.enabled Use Redis® Sentinel on Redis® pods. + ## IMPORTANT: this will disable the master and replicas services and + ## create a single Redis® service exposing both the Redis and Sentinel ports + ## + enabled: false + ## Bitnami Redis® Sentinel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## @param sentinel.image.registry [default: REGISTRY_NAME] Redis® Sentinel image registry + ## @param sentinel.image.repository [default: REPOSITORY_NAME/redis-sentinel] Redis® Sentinel image repository + ## @skip sentinel.image.tag Redis® Sentinel image tag (immutable tags are recommended) + ## @param sentinel.image.digest Redis® Sentinel image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param sentinel.image.pullPolicy Redis® Sentinel image pull policy + ## @param sentinel.image.pullSecrets Redis® Sentinel image pull secrets + ## @param sentinel.image.debug Enable image debug mode + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + tag: 7.2.2-debian-11-r0 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + ## @param sentinel.annotations Additional custom annotations for Redis® Sentinel resource + ## + annotations: {} + ## @param sentinel.masterSet Master set name + ## + masterSet: mymaster + ## @param sentinel.quorum Sentinel Quorum + ## + quorum: 2 + ## @param sentinel.getMasterTimeout Amount of time to allow before get_sentinel_master_info() times out. + ## + getMasterTimeout: 99 + ## @param sentinel.automateClusterRecovery Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. + ## This also prevents any new replica from starting until the last remaining replica is elected as master to guarantee that it is the one to be elected by Sentinel, and not a newly started replica with no data. + ## NOTE: This feature requires a "downAfterMilliseconds" value less or equal to 2000. + ## + automateClusterRecovery: false + ## @param sentinel.redisShutdownWaitFailover Whether the Redis® master container waits for the failover at shutdown (in addition to the Redis® Sentinel container). + ## + redisShutdownWaitFailover: true + ## Sentinel timing restrictions + ## @param sentinel.downAfterMilliseconds Timeout for detecting a Redis® node is down + ## @param sentinel.failoverTimeout Timeout for performing a election failover + ## + downAfterMilliseconds: 60000 + failoverTimeout: 180000 + ## @param sentinel.parallelSyncs Number of replicas that can be reconfigured in parallel to use the new master after a failover + ## + parallelSyncs: 1 + ## @param sentinel.configuration Configuration for Redis® Sentinel nodes + ## ref: https://redis.io/topics/sentinel + ## + configuration: "" + ## @param sentinel.command Override default container command (useful when using custom images) + ## + command: [] + ## @param sentinel.args Override default container args (useful when using custom images) + ## + args: [] + ## @param sentinel.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## + enableServiceLinks: true + ## @param sentinel.preExecCmds Additional commands to run prior to starting Redis® Sentinel + ## + preExecCmds: [] + ## @param sentinel.extraEnvVars Array with extra environment variables to add to Redis® Sentinel nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param sentinel.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® Sentinel nodes + ## + extraEnvVarsCM: "" + ## @param sentinel.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® Sentinel nodes + ## + extraEnvVarsSecret: "" + ## @param sentinel.externalMaster.enabled Use external master for bootstrapping + ## @param sentinel.externalMaster.host External master host to bootstrap from + ## @param sentinel.externalMaster.port Port for Redis service external master host + ## + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param sentinel.containerPorts.sentinel Container port to open on Redis® Sentinel nodes + ## + containerPorts: + sentinel: 26379 + ## Configure extra options for Redis® containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param sentinel.startupProbe.enabled Enable startupProbe on Redis® Sentinel nodes + ## @param sentinel.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param sentinel.startupProbe.periodSeconds Period seconds for startupProbe + ## @param sentinel.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param sentinel.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param sentinel.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 22 + ## @param sentinel.livenessProbe.enabled Enable livenessProbe on Redis® Sentinel nodes + ## @param sentinel.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param sentinel.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param sentinel.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param sentinel.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param sentinel.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + ## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis® Sentinel nodes + ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param sentinel.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param sentinel.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param sentinel.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 6 + ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param sentinel.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param sentinel.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param sentinel.persistence.enabled Enable persistence on Redis® sentinel nodes using Persistent Volume Claims (Experimental) + ## + enabled: false + ## @param sentinel.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param sentinel.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param sentinel.persistence.size Persistent Volume size + ## + size: 100Mi + ## @param sentinel.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param sentinel.persistence.labels Additional custom labels for the PVC + ## + labels: {} + ## @param sentinel.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param sentinel.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param sentinel.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param sentinel.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param sentinel.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param sentinel.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param sentinel.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: false + whenScaled: Retain + whenDeleted: Retain + ## Redis® Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the Redis® Sentinel containers + ## @param sentinel.resources.requests The requested resources for the Redis® Sentinel containers + ## + resources: + limits: {} + requests: {} + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param sentinel.containerSecurityContext.enabled Enabled Redis® Sentinel containers' Security Context + ## @param sentinel.containerSecurityContext.runAsUser Set Redis® Sentinel containers' Security Context runAsUser + ## @param sentinel.containerSecurityContext.runAsGroup Set Redis® Sentinel containers' Security Context runAsGroup + ## @param sentinel.containerSecurityContext.runAsNonRoot Set Redis® Sentinel containers' Security Context runAsNonRoot + ## @param sentinel.containerSecurityContext.allowPrivilegeEscalation Set Redis® Sentinel containers' Security Context allowPrivilegeEscalation + ## @param sentinel.containerSecurityContext.seccompProfile.type Set Redis® Sentinel containers' Security Context seccompProfile + ## @param sentinel.containerSecurityContext.capabilities.drop Set Redis® Sentinel containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1010180000 + runAsGroup: 1010180000 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param sentinel.lifecycleHooks for the Redis® sentinel container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param sentinel.extraVolumes Optionally specify extra list of additional volumes for the Redis® Sentinel + ## + extraVolumes: [] + ## @param sentinel.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® Sentinel container(s) + ## + extraVolumeMounts: [] + ## Redis® Sentinel service parameters + ## + service: + ## @param sentinel.service.type Redis® Sentinel service type + ## + type: ClusterIP + ## @param sentinel.service.ports.redis Redis® service port for Redis® + ## @param sentinel.service.ports.sentinel Redis® service port for Redis® Sentinel + ## + ports: + redis: 6379 + sentinel: 26379 + ## @param sentinel.service.nodePorts.redis Node port for Redis® + ## @param sentinel.service.nodePorts.sentinel Node port for Sentinel + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## NOTE: By leaving these values blank, they will be generated by ports-configmap + ## If setting manually, please leave at least replica.replicaCount + 1 in between sentinel.service.nodePorts.redis and sentinel.service.nodePorts.sentinel to take into account the ports that will be created while incrementing that base port + ## + nodePorts: + redis: "" + sentinel: "" + ## @param sentinel.service.externalTrafficPolicy Redis® Sentinel service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param sentinel.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param sentinel.service.clusterIP Redis® Sentinel service Cluster IP + ## + clusterIP: "" + ## @param sentinel.service.loadBalancerIP Redis® Sentinel service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param sentinel.service.loadBalancerSourceRanges Redis® Sentinel service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param sentinel.service.annotations Additional custom annotations for Redis® Sentinel service + ## + annotations: {} + ## @param sentinel.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param sentinel.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param sentinel.service.headless.annotations Annotations for the headless service. + ## + annotations: {} + ## @param sentinel.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-node pods + ## + terminationGracePeriodSeconds: 30 + +## @section Other Parameters +## + +## @param serviceBindings.enabled Create secret for service binding (Experimental) +## Ref: https://servicebinding.io/service-provider/ +## +serviceBindings: + enabled: false + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the ports + ## Redis® is listening on. When true, Redis® will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.extraIngress Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param networkPolicy.extraEgress Add extra egress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressNSMatchLabels Labels to match to allow traffic from other namespaces + ## @param networkPolicy.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + metrics: + ## @param networkPolicy.metrics.allowExternal Don't require client label for connections for metrics endpoint + ## When set to false, only pods with the correct client label will have network access to the metrics port + ## + allowExternal: true + ## @param networkPolicy.metrics.ingressNSMatchLabels Labels to match to allow traffic from other namespaces to metrics endpoint + ## @param networkPolicy.metrics.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces to metrics endpoint + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + create: false + ## @param podSecurityPolicy.enabled Enable PodSecurityPolicy's RBAC rules + ## + enabled: false +## RBAC configuration +## +rbac: + ## @param rbac.create Specifies whether RBAC resources should be created + ## + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## ServiceAccount configuration +## +serviceAccount: + ## @param serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Redis® Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Specifies whether a PodDisruptionBudget should be created + ## + create: false + ## @param pdb.minAvailable Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: "" +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic + ## + enabled: false + ## @param tls.authClients Require clients to authenticate + ## + authClients: true + ## @param tls.autoGenerated Enable autogenerated certificates + ## + autoGenerated: false + ## @param tls.existingSecret The name of the existing secret that contains the TLS certificates + ## + existingSecret: "" + ## @param tls.certificatesSecret DEPRECATED. Use existingSecret instead. + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate Key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## + certCAFilename: "" + ## @param tls.dhParamsFilename File containing DH params (in order to support DH based ciphers) + ## + dhParamsFilename: "" + +## @section Metrics Parameters +## + +metrics: + ## @param metrics.enabled Start a sidecar prometheus exporter to expose Redis® metrics + ## + enabled: false + ## Bitnami Redis® Exporter image + ## ref: https://hub.docker.com/r/bitnami/redis-exporter/tags/ + ## @param metrics.image.registry [default: REGISTRY_NAME] Redis® Exporter image registry + ## @param metrics.image.repository [default: REPOSITORY_NAME/redis-exporter] Redis® Exporter image repository + ## @skip metrics.image.tag Redis® Exporter image tag (immutable tags are recommended) + ## @param metrics.image.digest Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.image.pullPolicy Redis® Exporter image pull policy + ## @param metrics.image.pullSecrets Redis® Exporter image pull secrets + ## + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.55.0-debian-11-r0 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Configure extra options for Redis® containers' liveness, readiness & startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## @param metrics.startupProbe.enabled Enable startupProbe on Redis® replicas nodes + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param metrics.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param metrics.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param metrics.command Override default metrics container init command (useful when using custom images) + ## + command: [] + ## @param metrics.redisTargetHost A way to specify an alternative Redis® hostname + ## Useful for certificate CN/SAN matching + ## + redisTargetHost: "localhost" + ## @param metrics.extraArgs Extra arguments for Redis® exporter, for example: + ## e.g.: + ## extraArgs: + ## check-keys: myKey,myOtherKey + ## + extraArgs: {} + ## @param metrics.extraEnvVars Array with extra environment variables to add to Redis® exporter + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.containerSecurityContext.enabled Enabled Redis® exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set Redis® exporter containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsGroup Set Redis® exporter containers' Security Context runAsGroup + ## @param metrics.containerSecurityContext.runAsNonRoot Set Redis® exporter containers' Security Context runAsNonRoot + ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set Redis® exporter containers' Security Context allowPrivilegeEscalation + ## @param metrics.containerSecurityContext.seccompProfile.type Set Redis® exporter containers' Security Context seccompProfile + ## @param metrics.containerSecurityContext.capabilities.drop Set Redis® exporter containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1010180000 + runAsGroup: 1010180000 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the Redis® metrics sidecar + ## + extraVolumes: [] + ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® metrics sidecar + ## + extraVolumeMounts: [] + ## Redis® exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the Redis® exporter container + ## @param metrics.resources.requests The requested resources for the Redis® exporter container + ## + resources: + limits: {} + requests: {} + ## @param metrics.podLabels Extra labels for Redis® exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param metrics.podAnnotations [object] Annotations for Redis® exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + ## Redis® exporter service parameters + ## + service: + ## @param metrics.service.type Redis® exporter service type + ## + type: ClusterIP + ## @param metrics.service.port Redis® exporter service port + ## + port: 9121 + ## @param metrics.service.externalTrafficPolicy Redis® exporter service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param metrics.service.loadBalancerIP Redis® exporter service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param metrics.service.loadBalancerSourceRanges Redis® exporter service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param metrics.service.annotations Additional custom annotations for Redis® exporter service + ## + annotations: {} + ## @param metrics.service.clusterIP Redis® exporter service Cluster IP + ## + clusterIP: "" + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. + ## + relabellings: [] + ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Labels from the Kubernetes pod to be transferred to the created metrics + ## + podTargetLabels: [] + ## @param metrics.serviceMonitor.sampleLimit Limit of how many samples should be scraped from every Pod + ## + sampleLimit: false + ## @param metrics.serviceMonitor.targetLimit Limit of how many targets should be scraped + ## + targetLimit: false + ## Prometheus Pod Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#podmonitor + ## + podMonitor: + ## @param metrics.podMonitor.enabled Create PodMonitor resource(s) for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.podMonitor.namespace The namespace in which the PodMonitor will be created + ## + namespace: "" + ## @param metrics.podMonitor.interval The interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.podMonitor.scrapeTimeout The timeout after which the scrape is ended + ## + scrapeTimeout: "" + ## @param metrics.podMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. + ## + relabellings: [] + ## @param metrics.podMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.podMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.podMonitor.additionalLabels Additional labels that can be used so PodMonitor resource(s) can be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.podMonitor.podTargetLabels Labels from the Kubernetes pod to be transferred to the created metrics + ## + podTargetLabels: [] + ## @param metrics.podMonitor.sampleLimit Limit of how many samples should be scraped from every Pod + ## + sampleLimit: false + ## @param metrics.podMonitor.targetLimit Limit of how many targets should be scraped + ## + targetLimit: false + + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.prometheusRule.namespace The namespace in which the prometheusRule will be created + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Custom Prometheus rules + ## e.g: + ## rules: + ## - alert: RedisDown + ## expr: redis_up{service="{{ template "common.names.fullname" . }}-metrics"} == 0 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis® instance {{ "{{ $labels.instance }}" }} down + ## description: Redis® instance {{ "{{ $labels.instance }}" }} is down + ## - alert: RedisMemoryHigh + ## expr: > + ## redis_memory_used_bytes{service="{{ template "common.names.fullname" . }}-metrics"} * 100 + ## / + ## redis_memory_max_bytes{service="{{ template "common.names.fullname" . }}-metrics"} + ## > 90 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis® instance {{ "{{ $labels.instance }}" }} is using too much memory + ## description: | + ## Redis® instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + ## - alert: RedisKeyEviction + ## expr: | + ## increase(redis_evicted_keys_total{service="{{ template "common.names.fullname" . }}-metrics"}[5m]) > 0 + ## for: 1s + ## labels: + ## severity: error + ## annotations: + ## summary: Redis® instance {{ "{{ $labels.instance }}" }} has evicted keys + ## description: | + ## Redis® instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + ## + rules: [] + +## @section Init Container Parameters +## + +## 'volumePermissions' init container parameters +## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values +## based on the *podSecurityContext/*containerSecurityContext parameters +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` + ## + enabled: false + ## OS Shell + Utility image + ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository + ## @skip volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy OS Shell + Utility image pull policy + ## @param volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 11-debian-11-r90 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits The resources limits for the init container + ## @param volumePermissions.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + ## Init container Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser + ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the + ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) + ## + containerSecurityContext: + runAsUser: 0 + +## init-sysctl container parameters +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctl: + ## @param sysctl.enabled Enable init container to modify Kernel settings + ## + enabled: false + ## OS Shell + Utility image + ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ + ## @param sysctl.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry + ## @param sysctl.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository + ## @skip sysctl.image.tag OS Shell + Utility image tag (immutable tags are recommended) + ## @param sysctl.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param sysctl.image.pullPolicy OS Shell + Utility image pull policy + ## @param sysctl.image.pullSecrets OS Shell + Utility image pull secrets + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 11-debian-11-r90 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param sysctl.command Override default init-sysctl container command (useful when using custom images) + ## + command: [] + ## @param sysctl.mountHostSys Mount the host `/sys` folder to `/host-sys` + ## + mountHostSys: false + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sysctl.resources.limits The resources limits for the init container + ## @param sysctl.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + +## @section useExternalDNS Parameters +## +## @param useExternalDNS.enabled Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. +## @param useExternalDNS.additionalAnnotations Extra annotations to be utilized when `external-dns` is enabled. +## @param useExternalDNS.annotationKey The annotation key utilized when `external-dns` is enabled. Setting this to `false` will disable annotations. +## @param useExternalDNS.suffix The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. +## +useExternalDNS: + enabled: false + suffix: "" + annotationKey: external-dns.alpha.kubernetes.io/ + additionalAnnotations: {} diff --git a/openshift/templates/redis/cthub-test-values.yaml b/openshift/templates/redis/cthub-test-values.yaml new file mode 100644 index 00000000..379257b3 --- /dev/null +++ b/openshift/templates/redis/cthub-test-values.yaml @@ -0,0 +1,1935 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.redis.password Global Redis® password (overrides `auth.password`) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "netapp-block-standard" + redis: + password: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "cthub-redis-test" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param secretAnnotations Annotations to add to secret +## +secretAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param useHostnames Use hostnames internally when announcing replication. If false, the hostname will be resolved to an IP address +## +useHostnames: true +## @param nameResolutionThreshold Failure threshold for internal hostnames resolution +## +nameResolutionThreshold: 5 +## @param nameResolutionTimeout Timeout seconds between probes for internal hostnames resolution +## +nameResolutionTimeout: 5 + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section Redis® Image parameters +## + +## Bitnami Redis® image +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## @param image.registry [default: REGISTRY_NAME] Redis® image registry +## @param image.repository [default: REPOSITORY_NAME/redis] Redis® image repository +## @skip image.tag Redis® image tag (immutable tags are recommended) +## @param image.digest Redis® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy Redis® image pull policy +## @param image.pullSecrets Redis® image pull secrets +## @param image.debug Enable image debug mode +## +image: + registry: docker.io + repository: bitnami/redis + tag: 7.2.2-debian-11-r0 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + +## @section Redis® common configuration parameters +## https://github.com/bitnami/containers/tree/main/bitnami/redis#configuration +## + +## @param architecture Redis® architecture. Allowed values: `standalone` or `replication` +## +architecture: replication +## Redis® Authentication parameters +## ref: https://github.com/bitnami/containers/tree/main/bitnami/redis#setting-the-server-password-on-first-run +## +auth: + ## @param auth.enabled Enable password authentication + ## + enabled: true + ## @param auth.sentinel Enable password authentication on sentinels too + ## + sentinel: true + ## @param auth.password Redis® password + ## Defaults to a random 10-character alphanumeric string if not set + ## + password: "" + ## @param auth.existingSecret The name of an existing secret with Redis® credentials + ## NOTE: When it's set, the previous `auth.password` parameter is ignored + ## + existingSecret: "cthub-redis-test-secret" + ## @param auth.existingSecretPasswordKey Password key to be retrieved from existing secret + ## NOTE: ignored unless `auth.existingSecret` parameter is set + ## + existingSecretPasswordKey: "redis-password" + ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable + ## + usePasswordFiles: false + +## @param commonConfiguration [string] Common configuration to be added into the ConfigMap +## ref: https://redis.io/topics/config +## +commonConfiguration: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" +## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for Redis® nodes +## +existingConfigmap: "" + +## @section Redis® master configuration parameters +## + +master: + ## @param master.count Number of Redis® master instances to deploy (experimental, requires additional configuration) + ## + count: 1 + ## @param master.configuration Configuration for Redis® master nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param master.disableCommands Array with Redis® commands to disable on master nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + ## @param master.command Override default container command (useful when using custom images) + ## + command: [] + ## @param master.args Override default container args (useful when using custom images) + ## + args: [] + ## @param master.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## + enableServiceLinks: true + ## @param master.preExecCmds Additional commands to run prior to starting Redis® master + ## + preExecCmds: [] + ## @param master.extraFlags Array with additional command line flags for Redis® master + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param master.extraEnvVars Array with extra environment variables to add to Redis® master nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® master nodes + ## + extraEnvVarsCM: "" + ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® master nodes + ## + extraEnvVarsSecret: "" + ## @param master.containerPorts.redis Container port to open on Redis® master nodes + ## + containerPorts: + redis: 6379 + ## Configure extra options for Redis® containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param master.startupProbe.enabled Enable startupProbe on Redis® master nodes + ## @param master.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param master.startupProbe.periodSeconds Period seconds for startupProbe + ## @param master.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param master.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param master.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.livenessProbe.enabled Enable livenessProbe on Redis® master nodes + ## @param master.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param master.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param master.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param master.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param master.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.readinessProbe.enabled Enable readinessProbe on Redis® master nodes + ## @param master.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param master.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param master.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param master.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param master.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param master.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param master.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param master.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis® master resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param master.resources.limits The resources limits for the Redis® master containers + ## @param master.resources.requests The requested resources for the Redis® master containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.podSecurityContext.enabled Enabled Redis® master pods' Security Context + ## @param master.podSecurityContext.fsGroup Set Redis® master pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1010150000 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.containerSecurityContext.enabled Enabled Redis® master containers' Security Context + ## @param master.containerSecurityContext.runAsUser Set Redis® master containers' Security Context runAsUser + ## @param master.containerSecurityContext.runAsGroup Set Redis® master containers' Security Context runAsGroup + ## @param master.containerSecurityContext.runAsNonRoot Set Redis® master containers' Security Context runAsNonRoot + ## @param master.containerSecurityContext.allowPrivilegeEscalation Is it possible to escalate Redis® pod(s) privileges + ## @param master.containerSecurityContext.seccompProfile.type Set Redis® master containers' Security Context seccompProfile + ## @param master.containerSecurityContext.capabilities.drop Set Redis® master containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1010150000 + runAsGroup: 1010150000 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param master.kind Use either Deployment or StatefulSet (default) + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ + ## + kind: StatefulSet + ## @param master.schedulerName Alternate scheduler for Redis® master pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param master.updateStrategy.type Redis® master statefulset strategy type + ## @skip master.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment) + ## + type: RollingUpdate + ## @param master.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update + ## + minReadySeconds: 0 + ## @param master.priorityClassName Redis® master pods' priorityClassName + ## + priorityClassName: "" + ## @param master.hostAliases Redis® master pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param master.podLabels Extra labels for Redis® master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param master.podAnnotations Annotations for Redis® master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param master.shareProcessNamespace Share a single process namespace between all of the containers in Redis® master pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node master.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set + ## + key: "" + ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param master.affinity Affinity for Redis® master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param master.nodeSelector Node labels for Redis® master pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param master.tolerations Tolerations for Redis® master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param master.topologySpreadConstraints Spread Constraints for Redis® master pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: [] + ## @param master.dnsPolicy DNS Policy for Redis® master pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsPolicy: ClusterFirst + ## + dnsPolicy: "" + ## @param master.dnsConfig DNS Configuration for Redis® master pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsConfig: + ## options: + ## - name: ndots + ## value: "4" + ## - name: single-request-reopen + ## + dnsConfig: {} + ## @param master.lifecycleHooks for the Redis® master container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param master.extraVolumes Optionally specify extra list of additional volumes for the Redis® master pod(s) + ## + extraVolumes: [] + ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® master container(s) + ## + extraVolumeMounts: [] + ## @param master.sidecars Add additional sidecar containers to the Redis® master pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param master.initContainers Add additional init containers to the Redis® master pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence on Redis® master nodes using Persistent Volume Claims + ## + enabled: true + ## @param master.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param master.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## @param master.persistence.path The path the volume will be mounted at on Redis® master containers + ## NOTE: Useful when using different Redis® images + ## + path: /data + ## @param master.persistence.subPath The subdirectory of the volume to mount on Redis® master containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param master.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® master containers + ## + subPathExpr: "" + ## @param master.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume size + ## + size: 2Gi + ## @param master.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param master.persistence.labels Additional custom labels for the PVC + ## + labels: {} + ## @param master.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param master.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param master.persistence.existingClaim Use a existing PVC which must be created manually before bound + ## NOTE: requires master.persistence.enabled: true + ## + existingClaim: "" + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param master.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param master.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param master.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: false + whenScaled: Retain + whenDeleted: Retain + ## Redis® master service parameters + ## + service: + ## @param master.service.type Redis® master service type + ## + type: ClusterIP + ## @param master.service.ports.redis Redis® master service port + ## + ports: + redis: 6379 + ## @param master.service.nodePorts.redis Node port for Redis® master + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + redis: "" + ## @param master.service.externalTrafficPolicy Redis® master service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param master.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param master.service.internalTrafficPolicy Redis® master service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ + ## + internalTrafficPolicy: Cluster + ## @param master.service.clusterIP Redis® master service Cluster IP + ## + clusterIP: "" + ## @param master.service.loadBalancerIP Redis® master service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param master.service.loadBalancerSourceRanges Redis® master service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param master.service.externalIPs Redis® master service External IPs + ## https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## e.g. + ## externalIPs: + ## - 10.10.10.1 + ## - 201.22.30.1 + ## + externalIPs: [] + ## @param master.service.annotations Additional custom annotations for Redis® master service + ## + annotations: {} + ## @param master.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param master.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param master.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-master pods + ## + terminationGracePeriodSeconds: 30 + ## ServiceAccount configuration + ## + serviceAccount: + ## @param master.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: false + ## @param master.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param master.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param master.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + +## @section Redis® replicas configuration parameters +## + +replica: + ## @param replica.replicaCount Number of Redis® replicas to deploy + ## + replicaCount: 2 + ## @param replica.configuration Configuration for Redis® replicas nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param replica.disableCommands Array with Redis® commands to disable on replicas nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + ## @param replica.command Override default container command (useful when using custom images) + ## + command: [] + ## @param replica.args Override default container args (useful when using custom images) + ## + args: [] + ## @param replica.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## + enableServiceLinks: true + ## @param replica.preExecCmds Additional commands to run prior to starting Redis® replicas + ## + preExecCmds: [] + ## @param replica.extraFlags Array with additional command line flags for Redis® replicas + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param replica.extraEnvVars Array with extra environment variables to add to Redis® replicas nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param replica.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® replicas nodes + ## + extraEnvVarsCM: "" + ## @param replica.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® replicas nodes + ## + extraEnvVarsSecret: "" + ## @param replica.externalMaster.enabled Use external master for bootstrapping + ## @param replica.externalMaster.host External master host to bootstrap from + ## @param replica.externalMaster.port Port for Redis service external master host + ## + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param replica.containerPorts.redis Container port to open on Redis® replicas nodes + ## + containerPorts: + redis: 6379 + ## Configure extra options for Redis® containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param replica.startupProbe.enabled Enable startupProbe on Redis® replicas nodes + ## @param replica.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param replica.startupProbe.periodSeconds Period seconds for startupProbe + ## @param replica.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param replica.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param replica.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 22 + ## @param replica.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes + ## @param replica.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param replica.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param replica.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param replica.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param replica.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes + ## @param replica.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param replica.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param replica.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param replica.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param replica.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param replica.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param replica.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis® replicas resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param replica.resources.limits The resources limits for the Redis® replicas containers + ## @param replica.resources.requests The requested resources for the Redis® replicas containers + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: {} + # cpu: 250m + # memory: 256Mi + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.podSecurityContext.enabled Enabled Redis® replicas pods' Security Context + ## @param replica.podSecurityContext.fsGroup Set Redis® replicas pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1010150000 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.containerSecurityContext.enabled Enabled Redis® replicas containers' Security Context + ## @param replica.containerSecurityContext.runAsUser Set Redis® replicas containers' Security Context runAsUser + ## @param replica.containerSecurityContext.runAsGroup Set Redis® replicas containers' Security Context runAsGroup + ## @param replica.containerSecurityContext.runAsNonRoot Set Redis® replicas containers' Security Context runAsNonRoot + ## @param replica.containerSecurityContext.allowPrivilegeEscalation Set Redis® replicas pod's Security Context allowPrivilegeEscalation + ## @param replica.containerSecurityContext.seccompProfile.type Set Redis® replicas containers' Security Context seccompProfile + ## @param replica.containerSecurityContext.capabilities.drop Set Redis® replicas containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1010150000 + runAsGroup: 1010150000 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param replica.schedulerName Alternate scheduler for Redis® replicas pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param replica.updateStrategy.type Redis® replicas statefulset strategy type + ## @skip replica.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment) + ## + type: RollingUpdate + ## @param replica.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update + ## + minReadySeconds: 0 + ## @param replica.priorityClassName Redis® replicas pods' priorityClassName + ## + priorityClassName: "" + ## @param replica.podManagementPolicy podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "" + ## @param replica.hostAliases Redis® replicas pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param replica.podLabels Extra labels for Redis® replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param replica.podAnnotations Annotations for Redis® replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param replica.shareProcessNamespace Share a single process namespace between all of the containers in Redis® replicas pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param replica.podAffinityPreset Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param replica.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param replica.nodeAffinityPreset.type Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param replica.nodeAffinityPreset.key Node label key to match. Ignored if `replica.affinity` is set + ## + key: "" + ## @param replica.nodeAffinityPreset.values Node label values to match. Ignored if `replica.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param replica.affinity Affinity for Redis® replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `replica.podAffinityPreset`, `replica.podAntiAffinityPreset`, and `replica.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param replica.nodeSelector Node labels for Redis® replicas pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param replica.tolerations Tolerations for Redis® replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param replica.topologySpreadConstraints Spread Constraints for Redis® replicas pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: [] + ## @param replica.dnsPolicy DNS Policy for Redis® replica pods + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsPolicy: ClusterFirst + ## + dnsPolicy: "" + ## @param replica.dnsConfig DNS Configuration for Redis® replica pods + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsConfig: + ## options: + ## - name: ndots + ## value: "4" + ## - name: single-request-reopen + ## + dnsConfig: {} + ## @param replica.lifecycleHooks for the Redis® replica container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param replica.extraVolumes Optionally specify extra list of additional volumes for the Redis® replicas pod(s) + ## + extraVolumes: [] + ## @param replica.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® replicas container(s) + ## + extraVolumeMounts: [] + ## @param replica.sidecars Add additional sidecar containers to the Redis® replicas pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param replica.initContainers Add additional init containers to the Redis® replicas pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence Parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param replica.persistence.enabled Enable persistence on Redis® replicas nodes using Persistent Volume Claims + ## + enabled: true + ## @param replica.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param replica.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## @param replica.persistence.path The path the volume will be mounted at on Redis® replicas containers + ## NOTE: Useful when using different Redis® images + ## + path: /data + ## @param replica.persistence.subPath The subdirectory of the volume to mount on Redis® replicas containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param replica.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® replicas containers + ## + subPathExpr: "" + ## @param replica.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param replica.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param replica.persistence.size Persistent Volume size + ## + size: 2Gi + ## @param replica.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param replica.persistence.labels Additional custom labels for the PVC + ## + labels: {} + ## @param replica.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param replica.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param replica.persistence.existingClaim Use a existing PVC which must be created manually before bound + ## NOTE: requires replica.persistence.enabled: true + ## + existingClaim: "" + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param replica.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param replica.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param replica.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: false + whenScaled: Retain + whenDeleted: Retain + ## Redis® replicas service parameters + ## + service: + ## @param replica.service.type Redis® replicas service type + ## + type: ClusterIP + ## @param replica.service.ports.redis Redis® replicas service port + ## + ports: + redis: 6379 + ## @param replica.service.nodePorts.redis Node port for Redis® replicas + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + redis: "" + ## @param replica.service.externalTrafficPolicy Redis® replicas service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param replica.service.internalTrafficPolicy Redis® replicas service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ + ## + internalTrafficPolicy: Cluster + ## @param replica.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param replica.service.clusterIP Redis® replicas service Cluster IP + ## + clusterIP: "" + ## @param replica.service.loadBalancerIP Redis® replicas service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param replica.service.loadBalancerSourceRanges Redis® replicas service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param replica.service.annotations Additional custom annotations for Redis® replicas service + ## + annotations: {} + ## @param replica.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param replica.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param replica.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-replicas pods + ## + terminationGracePeriodSeconds: 30 + ## Autoscaling configuration + ## + autoscaling: + ## @param replica.autoscaling.enabled Enable replica autoscaling settings + ## + enabled: false + ## @param replica.autoscaling.minReplicas Minimum replicas for the pod autoscaling + ## + minReplicas: 1 + ## @param replica.autoscaling.maxReplicas Maximum replicas for the pod autoscaling + ## + maxReplicas: 11 + ## @param replica.autoscaling.targetCPU Percentage of CPU to consider when autoscaling + ## + targetCPU: "" + ## @param replica.autoscaling.targetMemory Percentage of Memory to consider when autoscaling + ## + targetMemory: "" + ## ServiceAccount configuration + ## + serviceAccount: + ## @param replica.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: false + ## @param replica.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param replica.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param replica.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## @section Redis® Sentinel configuration parameters +## + +sentinel: + ## @param sentinel.enabled Use Redis® Sentinel on Redis® pods. + ## IMPORTANT: this will disable the master and replicas services and + ## create a single Redis® service exposing both the Redis and Sentinel ports + ## + enabled: false + ## Bitnami Redis® Sentinel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## @param sentinel.image.registry [default: REGISTRY_NAME] Redis® Sentinel image registry + ## @param sentinel.image.repository [default: REPOSITORY_NAME/redis-sentinel] Redis® Sentinel image repository + ## @skip sentinel.image.tag Redis® Sentinel image tag (immutable tags are recommended) + ## @param sentinel.image.digest Redis® Sentinel image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param sentinel.image.pullPolicy Redis® Sentinel image pull policy + ## @param sentinel.image.pullSecrets Redis® Sentinel image pull secrets + ## @param sentinel.image.debug Enable image debug mode + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + tag: 7.2.2-debian-11-r0 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + ## @param sentinel.annotations Additional custom annotations for Redis® Sentinel resource + ## + annotations: {} + ## @param sentinel.masterSet Master set name + ## + masterSet: mymaster + ## @param sentinel.quorum Sentinel Quorum + ## + quorum: 2 + ## @param sentinel.getMasterTimeout Amount of time to allow before get_sentinel_master_info() times out. + ## + getMasterTimeout: 99 + ## @param sentinel.automateClusterRecovery Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. + ## This also prevents any new replica from starting until the last remaining replica is elected as master to guarantee that it is the one to be elected by Sentinel, and not a newly started replica with no data. + ## NOTE: This feature requires a "downAfterMilliseconds" value less or equal to 2000. + ## + automateClusterRecovery: false + ## @param sentinel.redisShutdownWaitFailover Whether the Redis® master container waits for the failover at shutdown (in addition to the Redis® Sentinel container). + ## + redisShutdownWaitFailover: true + ## Sentinel timing restrictions + ## @param sentinel.downAfterMilliseconds Timeout for detecting a Redis® node is down + ## @param sentinel.failoverTimeout Timeout for performing a election failover + ## + downAfterMilliseconds: 60000 + failoverTimeout: 180000 + ## @param sentinel.parallelSyncs Number of replicas that can be reconfigured in parallel to use the new master after a failover + ## + parallelSyncs: 1 + ## @param sentinel.configuration Configuration for Redis® Sentinel nodes + ## ref: https://redis.io/topics/sentinel + ## + configuration: "" + ## @param sentinel.command Override default container command (useful when using custom images) + ## + command: [] + ## @param sentinel.args Override default container args (useful when using custom images) + ## + args: [] + ## @param sentinel.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## + enableServiceLinks: true + ## @param sentinel.preExecCmds Additional commands to run prior to starting Redis® Sentinel + ## + preExecCmds: [] + ## @param sentinel.extraEnvVars Array with extra environment variables to add to Redis® Sentinel nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param sentinel.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® Sentinel nodes + ## + extraEnvVarsCM: "" + ## @param sentinel.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® Sentinel nodes + ## + extraEnvVarsSecret: "" + ## @param sentinel.externalMaster.enabled Use external master for bootstrapping + ## @param sentinel.externalMaster.host External master host to bootstrap from + ## @param sentinel.externalMaster.port Port for Redis service external master host + ## + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param sentinel.containerPorts.sentinel Container port to open on Redis® Sentinel nodes + ## + containerPorts: + sentinel: 26379 + ## Configure extra options for Redis® containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param sentinel.startupProbe.enabled Enable startupProbe on Redis® Sentinel nodes + ## @param sentinel.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param sentinel.startupProbe.periodSeconds Period seconds for startupProbe + ## @param sentinel.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param sentinel.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param sentinel.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 22 + ## @param sentinel.livenessProbe.enabled Enable livenessProbe on Redis® Sentinel nodes + ## @param sentinel.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param sentinel.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param sentinel.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param sentinel.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param sentinel.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + ## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis® Sentinel nodes + ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param sentinel.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param sentinel.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param sentinel.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 6 + ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param sentinel.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param sentinel.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param sentinel.persistence.enabled Enable persistence on Redis® sentinel nodes using Persistent Volume Claims (Experimental) + ## + enabled: false + ## @param sentinel.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param sentinel.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param sentinel.persistence.size Persistent Volume size + ## + size: 100Mi + ## @param sentinel.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param sentinel.persistence.labels Additional custom labels for the PVC + ## + labels: {} + ## @param sentinel.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param sentinel.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param sentinel.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param sentinel.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param sentinel.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param sentinel.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param sentinel.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: false + whenScaled: Retain + whenDeleted: Retain + ## Redis® Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the Redis® Sentinel containers + ## @param sentinel.resources.requests The requested resources for the Redis® Sentinel containers + ## + resources: + limits: {} + requests: {} + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param sentinel.containerSecurityContext.enabled Enabled Redis® Sentinel containers' Security Context + ## @param sentinel.containerSecurityContext.runAsUser Set Redis® Sentinel containers' Security Context runAsUser + ## @param sentinel.containerSecurityContext.runAsGroup Set Redis® Sentinel containers' Security Context runAsGroup + ## @param sentinel.containerSecurityContext.runAsNonRoot Set Redis® Sentinel containers' Security Context runAsNonRoot + ## @param sentinel.containerSecurityContext.allowPrivilegeEscalation Set Redis® Sentinel containers' Security Context allowPrivilegeEscalation + ## @param sentinel.containerSecurityContext.seccompProfile.type Set Redis® Sentinel containers' Security Context seccompProfile + ## @param sentinel.containerSecurityContext.capabilities.drop Set Redis® Sentinel containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1010150000 + runAsGroup: 1010150000 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param sentinel.lifecycleHooks for the Redis® sentinel container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param sentinel.extraVolumes Optionally specify extra list of additional volumes for the Redis® Sentinel + ## + extraVolumes: [] + ## @param sentinel.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® Sentinel container(s) + ## + extraVolumeMounts: [] + ## Redis® Sentinel service parameters + ## + service: + ## @param sentinel.service.type Redis® Sentinel service type + ## + type: ClusterIP + ## @param sentinel.service.ports.redis Redis® service port for Redis® + ## @param sentinel.service.ports.sentinel Redis® service port for Redis® Sentinel + ## + ports: + redis: 6379 + sentinel: 26379 + ## @param sentinel.service.nodePorts.redis Node port for Redis® + ## @param sentinel.service.nodePorts.sentinel Node port for Sentinel + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## NOTE: By leaving these values blank, they will be generated by ports-configmap + ## If setting manually, please leave at least replica.replicaCount + 1 in between sentinel.service.nodePorts.redis and sentinel.service.nodePorts.sentinel to take into account the ports that will be created while incrementing that base port + ## + nodePorts: + redis: "" + sentinel: "" + ## @param sentinel.service.externalTrafficPolicy Redis® Sentinel service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param sentinel.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param sentinel.service.clusterIP Redis® Sentinel service Cluster IP + ## + clusterIP: "" + ## @param sentinel.service.loadBalancerIP Redis® Sentinel service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param sentinel.service.loadBalancerSourceRanges Redis® Sentinel service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param sentinel.service.annotations Additional custom annotations for Redis® Sentinel service + ## + annotations: {} + ## @param sentinel.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param sentinel.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param sentinel.service.headless.annotations Annotations for the headless service. + ## + annotations: {} + ## @param sentinel.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-node pods + ## + terminationGracePeriodSeconds: 30 + +## @section Other Parameters +## + +## @param serviceBindings.enabled Create secret for service binding (Experimental) +## Ref: https://servicebinding.io/service-provider/ +## +serviceBindings: + enabled: false + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the ports + ## Redis® is listening on. When true, Redis® will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.extraIngress Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param networkPolicy.extraEgress Add extra egress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressNSMatchLabels Labels to match to allow traffic from other namespaces + ## @param networkPolicy.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + metrics: + ## @param networkPolicy.metrics.allowExternal Don't require client label for connections for metrics endpoint + ## When set to false, only pods with the correct client label will have network access to the metrics port + ## + allowExternal: true + ## @param networkPolicy.metrics.ingressNSMatchLabels Labels to match to allow traffic from other namespaces to metrics endpoint + ## @param networkPolicy.metrics.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces to metrics endpoint + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + create: false + ## @param podSecurityPolicy.enabled Enable PodSecurityPolicy's RBAC rules + ## + enabled: false +## RBAC configuration +## +rbac: + ## @param rbac.create Specifies whether RBAC resources should be created + ## + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## ServiceAccount configuration +## +serviceAccount: + ## @param serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Redis® Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Specifies whether a PodDisruptionBudget should be created + ## + create: false + ## @param pdb.minAvailable Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: "" +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic + ## + enabled: false + ## @param tls.authClients Require clients to authenticate + ## + authClients: true + ## @param tls.autoGenerated Enable autogenerated certificates + ## + autoGenerated: false + ## @param tls.existingSecret The name of the existing secret that contains the TLS certificates + ## + existingSecret: "" + ## @param tls.certificatesSecret DEPRECATED. Use existingSecret instead. + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate Key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## + certCAFilename: "" + ## @param tls.dhParamsFilename File containing DH params (in order to support DH based ciphers) + ## + dhParamsFilename: "" + +## @section Metrics Parameters +## + +metrics: + ## @param metrics.enabled Start a sidecar prometheus exporter to expose Redis® metrics + ## + enabled: false + ## Bitnami Redis® Exporter image + ## ref: https://hub.docker.com/r/bitnami/redis-exporter/tags/ + ## @param metrics.image.registry [default: REGISTRY_NAME] Redis® Exporter image registry + ## @param metrics.image.repository [default: REPOSITORY_NAME/redis-exporter] Redis® Exporter image repository + ## @skip metrics.image.tag Redis® Exporter image tag (immutable tags are recommended) + ## @param metrics.image.digest Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.image.pullPolicy Redis® Exporter image pull policy + ## @param metrics.image.pullSecrets Redis® Exporter image pull secrets + ## + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.55.0-debian-11-r0 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Configure extra options for Redis® containers' liveness, readiness & startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## @param metrics.startupProbe.enabled Enable startupProbe on Redis® replicas nodes + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param metrics.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param metrics.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param metrics.command Override default metrics container init command (useful when using custom images) + ## + command: [] + ## @param metrics.redisTargetHost A way to specify an alternative Redis® hostname + ## Useful for certificate CN/SAN matching + ## + redisTargetHost: "localhost" + ## @param metrics.extraArgs Extra arguments for Redis® exporter, for example: + ## e.g.: + ## extraArgs: + ## check-keys: myKey,myOtherKey + ## + extraArgs: {} + ## @param metrics.extraEnvVars Array with extra environment variables to add to Redis® exporter + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.containerSecurityContext.enabled Enabled Redis® exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set Redis® exporter containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsGroup Set Redis® exporter containers' Security Context runAsGroup + ## @param metrics.containerSecurityContext.runAsNonRoot Set Redis® exporter containers' Security Context runAsNonRoot + ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set Redis® exporter containers' Security Context allowPrivilegeEscalation + ## @param metrics.containerSecurityContext.seccompProfile.type Set Redis® exporter containers' Security Context seccompProfile + ## @param metrics.containerSecurityContext.capabilities.drop Set Redis® exporter containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1010150000 + runAsGroup: 1010150000 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the Redis® metrics sidecar + ## + extraVolumes: [] + ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® metrics sidecar + ## + extraVolumeMounts: [] + ## Redis® exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the Redis® exporter container + ## @param metrics.resources.requests The requested resources for the Redis® exporter container + ## + resources: + limits: {} + requests: {} + ## @param metrics.podLabels Extra labels for Redis® exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param metrics.podAnnotations [object] Annotations for Redis® exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + ## Redis® exporter service parameters + ## + service: + ## @param metrics.service.type Redis® exporter service type + ## + type: ClusterIP + ## @param metrics.service.port Redis® exporter service port + ## + port: 9121 + ## @param metrics.service.externalTrafficPolicy Redis® exporter service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param metrics.service.loadBalancerIP Redis® exporter service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param metrics.service.loadBalancerSourceRanges Redis® exporter service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param metrics.service.annotations Additional custom annotations for Redis® exporter service + ## + annotations: {} + ## @param metrics.service.clusterIP Redis® exporter service Cluster IP + ## + clusterIP: "" + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. + ## + relabellings: [] + ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Labels from the Kubernetes pod to be transferred to the created metrics + ## + podTargetLabels: [] + ## @param metrics.serviceMonitor.sampleLimit Limit of how many samples should be scraped from every Pod + ## + sampleLimit: false + ## @param metrics.serviceMonitor.targetLimit Limit of how many targets should be scraped + ## + targetLimit: false + ## Prometheus Pod Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#podmonitor + ## + podMonitor: + ## @param metrics.podMonitor.enabled Create PodMonitor resource(s) for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.podMonitor.namespace The namespace in which the PodMonitor will be created + ## + namespace: "" + ## @param metrics.podMonitor.interval The interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.podMonitor.scrapeTimeout The timeout after which the scrape is ended + ## + scrapeTimeout: "" + ## @param metrics.podMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. + ## + relabellings: [] + ## @param metrics.podMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.podMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.podMonitor.additionalLabels Additional labels that can be used so PodMonitor resource(s) can be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.podMonitor.podTargetLabels Labels from the Kubernetes pod to be transferred to the created metrics + ## + podTargetLabels: [] + ## @param metrics.podMonitor.sampleLimit Limit of how many samples should be scraped from every Pod + ## + sampleLimit: false + ## @param metrics.podMonitor.targetLimit Limit of how many targets should be scraped + ## + targetLimit: false + + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.prometheusRule.namespace The namespace in which the prometheusRule will be created + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Custom Prometheus rules + ## e.g: + ## rules: + ## - alert: RedisDown + ## expr: redis_up{service="{{ template "common.names.fullname" . }}-metrics"} == 0 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis® instance {{ "{{ $labels.instance }}" }} down + ## description: Redis® instance {{ "{{ $labels.instance }}" }} is down + ## - alert: RedisMemoryHigh + ## expr: > + ## redis_memory_used_bytes{service="{{ template "common.names.fullname" . }}-metrics"} * 100 + ## / + ## redis_memory_max_bytes{service="{{ template "common.names.fullname" . }}-metrics"} + ## > 90 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis® instance {{ "{{ $labels.instance }}" }} is using too much memory + ## description: | + ## Redis® instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + ## - alert: RedisKeyEviction + ## expr: | + ## increase(redis_evicted_keys_total{service="{{ template "common.names.fullname" . }}-metrics"}[5m]) > 0 + ## for: 1s + ## labels: + ## severity: error + ## annotations: + ## summary: Redis® instance {{ "{{ $labels.instance }}" }} has evicted keys + ## description: | + ## Redis® instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + ## + rules: [] + +## @section Init Container Parameters +## + +## 'volumePermissions' init container parameters +## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values +## based on the *podSecurityContext/*containerSecurityContext parameters +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` + ## + enabled: false + ## OS Shell + Utility image + ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository + ## @skip volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy OS Shell + Utility image pull policy + ## @param volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 11-debian-11-r90 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits The resources limits for the init container + ## @param volumePermissions.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + ## Init container Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser + ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the + ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) + ## + containerSecurityContext: + runAsUser: 0 + +## init-sysctl container parameters +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctl: + ## @param sysctl.enabled Enable init container to modify Kernel settings + ## + enabled: false + ## OS Shell + Utility image + ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ + ## @param sysctl.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry + ## @param sysctl.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository + ## @skip sysctl.image.tag OS Shell + Utility image tag (immutable tags are recommended) + ## @param sysctl.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param sysctl.image.pullPolicy OS Shell + Utility image pull policy + ## @param sysctl.image.pullSecrets OS Shell + Utility image pull secrets + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 11-debian-11-r90 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param sysctl.command Override default init-sysctl container command (useful when using custom images) + ## + command: [] + ## @param sysctl.mountHostSys Mount the host `/sys` folder to `/host-sys` + ## + mountHostSys: false + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sysctl.resources.limits The resources limits for the init container + ## @param sysctl.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + +## @section useExternalDNS Parameters +## +## @param useExternalDNS.enabled Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. +## @param useExternalDNS.additionalAnnotations Extra annotations to be utilized when `external-dns` is enabled. +## @param useExternalDNS.annotationKey The annotation key utilized when `external-dns` is enabled. Setting this to `false` will disable annotations. +## @param useExternalDNS.suffix The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. +## +useExternalDNS: + enabled: false + suffix: "" + annotationKey: external-dns.alpha.kubernetes.io/ + additionalAnnotations: {} diff --git a/openshift/templates/redis/readme.md b/openshift/templates/redis/readme.md new file mode 100644 index 00000000..538af9d9 --- /dev/null +++ b/openshift/templates/redis/readme.md @@ -0,0 +1,18 @@ +# Redis + +## Source + +* https://artifacthub.io/packages/helm/bitnami/redis + +* https://github.com/bitnami/charts/tree/main/bitnami/redis + +### Install and Version + +helm repo add bitnami https://charts.bitnami.com/bitnami + +helm -n 30b186-dev upgrade --install -f ./cthub-dev-values.yaml cthub-redis-dev bitnami/redis --version 18.2.0 +helm -n 30b186-test upgrade --install -f ./cthub-test-values.yaml cthub-redis-test bitnami/redis --version 18.2.0 + +helm -n 30b186-dev uninstall cthub-redis-dev + +cthub-redis-dev-replicas-1 diff --git a/openshift/templates/redis/the-default-values.yaml b/openshift/templates/redis/the-default-values.yaml new file mode 100644 index 00000000..38147b56 --- /dev/null +++ b/openshift/templates/redis/the-default-values.yaml @@ -0,0 +1,1935 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.redis.password Global Redis® password (overrides `auth.password`) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + redis: + password: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param secretAnnotations Annotations to add to secret +## +secretAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param useHostnames Use hostnames internally when announcing replication. If false, the hostname will be resolved to an IP address +## +useHostnames: true +## @param nameResolutionThreshold Failure threshold for internal hostnames resolution +## +nameResolutionThreshold: 5 +## @param nameResolutionTimeout Timeout seconds between probes for internal hostnames resolution +## +nameResolutionTimeout: 5 + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section Redis® Image parameters +## + +## Bitnami Redis® image +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## @param image.registry [default: REGISTRY_NAME] Redis® image registry +## @param image.repository [default: REPOSITORY_NAME/redis] Redis® image repository +## @skip image.tag Redis® image tag (immutable tags are recommended) +## @param image.digest Redis® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy Redis® image pull policy +## @param image.pullSecrets Redis® image pull secrets +## @param image.debug Enable image debug mode +## +image: + registry: docker.io + repository: bitnami/redis + tag: 7.2.2-debian-11-r0 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + +## @section Redis® common configuration parameters +## https://github.com/bitnami/containers/tree/main/bitnami/redis#configuration +## + +## @param architecture Redis® architecture. Allowed values: `standalone` or `replication` +## +architecture: replication +## Redis® Authentication parameters +## ref: https://github.com/bitnami/containers/tree/main/bitnami/redis#setting-the-server-password-on-first-run +## +auth: + ## @param auth.enabled Enable password authentication + ## + enabled: true + ## @param auth.sentinel Enable password authentication on sentinels too + ## + sentinel: true + ## @param auth.password Redis® password + ## Defaults to a random 10-character alphanumeric string if not set + ## + password: "" + ## @param auth.existingSecret The name of an existing secret with Redis® credentials + ## NOTE: When it's set, the previous `auth.password` parameter is ignored + ## + existingSecret: "" + ## @param auth.existingSecretPasswordKey Password key to be retrieved from existing secret + ## NOTE: ignored unless `auth.existingSecret` parameter is set + ## + existingSecretPasswordKey: "" + ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable + ## + usePasswordFiles: false + +## @param commonConfiguration [string] Common configuration to be added into the ConfigMap +## ref: https://redis.io/topics/config +## +commonConfiguration: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" +## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for Redis® nodes +## +existingConfigmap: "" + +## @section Redis® master configuration parameters +## + +master: + ## @param master.count Number of Redis® master instances to deploy (experimental, requires additional configuration) + ## + count: 1 + ## @param master.configuration Configuration for Redis® master nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param master.disableCommands Array with Redis® commands to disable on master nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + ## @param master.command Override default container command (useful when using custom images) + ## + command: [] + ## @param master.args Override default container args (useful when using custom images) + ## + args: [] + ## @param master.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## + enableServiceLinks: true + ## @param master.preExecCmds Additional commands to run prior to starting Redis® master + ## + preExecCmds: [] + ## @param master.extraFlags Array with additional command line flags for Redis® master + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param master.extraEnvVars Array with extra environment variables to add to Redis® master nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® master nodes + ## + extraEnvVarsCM: "" + ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® master nodes + ## + extraEnvVarsSecret: "" + ## @param master.containerPorts.redis Container port to open on Redis® master nodes + ## + containerPorts: + redis: 6379 + ## Configure extra options for Redis® containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param master.startupProbe.enabled Enable startupProbe on Redis® master nodes + ## @param master.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param master.startupProbe.periodSeconds Period seconds for startupProbe + ## @param master.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param master.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param master.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.livenessProbe.enabled Enable livenessProbe on Redis® master nodes + ## @param master.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param master.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param master.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param master.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param master.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.readinessProbe.enabled Enable readinessProbe on Redis® master nodes + ## @param master.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param master.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param master.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param master.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param master.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param master.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param master.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param master.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis® master resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param master.resources.limits The resources limits for the Redis® master containers + ## @param master.resources.requests The requested resources for the Redis® master containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.podSecurityContext.enabled Enabled Redis® master pods' Security Context + ## @param master.podSecurityContext.fsGroup Set Redis® master pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.containerSecurityContext.enabled Enabled Redis® master containers' Security Context + ## @param master.containerSecurityContext.runAsUser Set Redis® master containers' Security Context runAsUser + ## @param master.containerSecurityContext.runAsGroup Set Redis® master containers' Security Context runAsGroup + ## @param master.containerSecurityContext.runAsNonRoot Set Redis® master containers' Security Context runAsNonRoot + ## @param master.containerSecurityContext.allowPrivilegeEscalation Is it possible to escalate Redis® pod(s) privileges + ## @param master.containerSecurityContext.seccompProfile.type Set Redis® master containers' Security Context seccompProfile + ## @param master.containerSecurityContext.capabilities.drop Set Redis® master containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsGroup: 0 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param master.kind Use either Deployment or StatefulSet (default) + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ + ## + kind: StatefulSet + ## @param master.schedulerName Alternate scheduler for Redis® master pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param master.updateStrategy.type Redis® master statefulset strategy type + ## @skip master.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment) + ## + type: RollingUpdate + ## @param master.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update + ## + minReadySeconds: 0 + ## @param master.priorityClassName Redis® master pods' priorityClassName + ## + priorityClassName: "" + ## @param master.hostAliases Redis® master pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param master.podLabels Extra labels for Redis® master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param master.podAnnotations Annotations for Redis® master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param master.shareProcessNamespace Share a single process namespace between all of the containers in Redis® master pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node master.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set + ## + key: "" + ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param master.affinity Affinity for Redis® master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param master.nodeSelector Node labels for Redis® master pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param master.tolerations Tolerations for Redis® master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param master.topologySpreadConstraints Spread Constraints for Redis® master pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: [] + ## @param master.dnsPolicy DNS Policy for Redis® master pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsPolicy: ClusterFirst + ## + dnsPolicy: "" + ## @param master.dnsConfig DNS Configuration for Redis® master pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsConfig: + ## options: + ## - name: ndots + ## value: "4" + ## - name: single-request-reopen + ## + dnsConfig: {} + ## @param master.lifecycleHooks for the Redis® master container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param master.extraVolumes Optionally specify extra list of additional volumes for the Redis® master pod(s) + ## + extraVolumes: [] + ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® master container(s) + ## + extraVolumeMounts: [] + ## @param master.sidecars Add additional sidecar containers to the Redis® master pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param master.initContainers Add additional init containers to the Redis® master pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence on Redis® master nodes using Persistent Volume Claims + ## + enabled: true + ## @param master.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param master.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## @param master.persistence.path The path the volume will be mounted at on Redis® master containers + ## NOTE: Useful when using different Redis® images + ## + path: /data + ## @param master.persistence.subPath The subdirectory of the volume to mount on Redis® master containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param master.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® master containers + ## + subPathExpr: "" + ## @param master.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume size + ## + size: 8Gi + ## @param master.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param master.persistence.labels Additional custom labels for the PVC + ## + labels: {} + ## @param master.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param master.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param master.persistence.existingClaim Use a existing PVC which must be created manually before bound + ## NOTE: requires master.persistence.enabled: true + ## + existingClaim: "" + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param master.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param master.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param master.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: false + whenScaled: Retain + whenDeleted: Retain + ## Redis® master service parameters + ## + service: + ## @param master.service.type Redis® master service type + ## + type: ClusterIP + ## @param master.service.ports.redis Redis® master service port + ## + ports: + redis: 6379 + ## @param master.service.nodePorts.redis Node port for Redis® master + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + redis: "" + ## @param master.service.externalTrafficPolicy Redis® master service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param master.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param master.service.internalTrafficPolicy Redis® master service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ + ## + internalTrafficPolicy: Cluster + ## @param master.service.clusterIP Redis® master service Cluster IP + ## + clusterIP: "" + ## @param master.service.loadBalancerIP Redis® master service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param master.service.loadBalancerSourceRanges Redis® master service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param master.service.externalIPs Redis® master service External IPs + ## https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## e.g. + ## externalIPs: + ## - 10.10.10.1 + ## - 201.22.30.1 + ## + externalIPs: [] + ## @param master.service.annotations Additional custom annotations for Redis® master service + ## + annotations: {} + ## @param master.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param master.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param master.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-master pods + ## + terminationGracePeriodSeconds: 30 + ## ServiceAccount configuration + ## + serviceAccount: + ## @param master.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: false + ## @param master.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param master.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param master.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + +## @section Redis® replicas configuration parameters +## + +replica: + ## @param replica.replicaCount Number of Redis® replicas to deploy + ## + replicaCount: 3 + ## @param replica.configuration Configuration for Redis® replicas nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param replica.disableCommands Array with Redis® commands to disable on replicas nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + ## @param replica.command Override default container command (useful when using custom images) + ## + command: [] + ## @param replica.args Override default container args (useful when using custom images) + ## + args: [] + ## @param replica.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## + enableServiceLinks: true + ## @param replica.preExecCmds Additional commands to run prior to starting Redis® replicas + ## + preExecCmds: [] + ## @param replica.extraFlags Array with additional command line flags for Redis® replicas + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param replica.extraEnvVars Array with extra environment variables to add to Redis® replicas nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param replica.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® replicas nodes + ## + extraEnvVarsCM: "" + ## @param replica.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® replicas nodes + ## + extraEnvVarsSecret: "" + ## @param replica.externalMaster.enabled Use external master for bootstrapping + ## @param replica.externalMaster.host External master host to bootstrap from + ## @param replica.externalMaster.port Port for Redis service external master host + ## + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param replica.containerPorts.redis Container port to open on Redis® replicas nodes + ## + containerPorts: + redis: 6379 + ## Configure extra options for Redis® containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param replica.startupProbe.enabled Enable startupProbe on Redis® replicas nodes + ## @param replica.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param replica.startupProbe.periodSeconds Period seconds for startupProbe + ## @param replica.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param replica.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param replica.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 22 + ## @param replica.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes + ## @param replica.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param replica.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param replica.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param replica.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param replica.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes + ## @param replica.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param replica.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param replica.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param replica.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param replica.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param replica.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param replica.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis® replicas resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param replica.resources.limits The resources limits for the Redis® replicas containers + ## @param replica.resources.requests The requested resources for the Redis® replicas containers + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: {} + # cpu: 250m + # memory: 256Mi + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.podSecurityContext.enabled Enabled Redis® replicas pods' Security Context + ## @param replica.podSecurityContext.fsGroup Set Redis® replicas pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.containerSecurityContext.enabled Enabled Redis® replicas containers' Security Context + ## @param replica.containerSecurityContext.runAsUser Set Redis® replicas containers' Security Context runAsUser + ## @param replica.containerSecurityContext.runAsGroup Set Redis® replicas containers' Security Context runAsGroup + ## @param replica.containerSecurityContext.runAsNonRoot Set Redis® replicas containers' Security Context runAsNonRoot + ## @param replica.containerSecurityContext.allowPrivilegeEscalation Set Redis® replicas pod's Security Context allowPrivilegeEscalation + ## @param replica.containerSecurityContext.seccompProfile.type Set Redis® replicas containers' Security Context seccompProfile + ## @param replica.containerSecurityContext.capabilities.drop Set Redis® replicas containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsGroup: 0 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param replica.schedulerName Alternate scheduler for Redis® replicas pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param replica.updateStrategy.type Redis® replicas statefulset strategy type + ## @skip replica.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment) + ## + type: RollingUpdate + ## @param replica.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update + ## + minReadySeconds: 0 + ## @param replica.priorityClassName Redis® replicas pods' priorityClassName + ## + priorityClassName: "" + ## @param replica.podManagementPolicy podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "" + ## @param replica.hostAliases Redis® replicas pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param replica.podLabels Extra labels for Redis® replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param replica.podAnnotations Annotations for Redis® replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param replica.shareProcessNamespace Share a single process namespace between all of the containers in Redis® replicas pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param replica.podAffinityPreset Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param replica.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param replica.nodeAffinityPreset.type Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param replica.nodeAffinityPreset.key Node label key to match. Ignored if `replica.affinity` is set + ## + key: "" + ## @param replica.nodeAffinityPreset.values Node label values to match. Ignored if `replica.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param replica.affinity Affinity for Redis® replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `replica.podAffinityPreset`, `replica.podAntiAffinityPreset`, and `replica.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param replica.nodeSelector Node labels for Redis® replicas pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param replica.tolerations Tolerations for Redis® replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param replica.topologySpreadConstraints Spread Constraints for Redis® replicas pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: [] + ## @param replica.dnsPolicy DNS Policy for Redis® replica pods + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsPolicy: ClusterFirst + ## + dnsPolicy: "" + ## @param replica.dnsConfig DNS Configuration for Redis® replica pods + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsConfig: + ## options: + ## - name: ndots + ## value: "4" + ## - name: single-request-reopen + ## + dnsConfig: {} + ## @param replica.lifecycleHooks for the Redis® replica container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param replica.extraVolumes Optionally specify extra list of additional volumes for the Redis® replicas pod(s) + ## + extraVolumes: [] + ## @param replica.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® replicas container(s) + ## + extraVolumeMounts: [] + ## @param replica.sidecars Add additional sidecar containers to the Redis® replicas pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param replica.initContainers Add additional init containers to the Redis® replicas pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence Parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param replica.persistence.enabled Enable persistence on Redis® replicas nodes using Persistent Volume Claims + ## + enabled: true + ## @param replica.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param replica.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## @param replica.persistence.path The path the volume will be mounted at on Redis® replicas containers + ## NOTE: Useful when using different Redis® images + ## + path: /data + ## @param replica.persistence.subPath The subdirectory of the volume to mount on Redis® replicas containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param replica.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® replicas containers + ## + subPathExpr: "" + ## @param replica.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param replica.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param replica.persistence.size Persistent Volume size + ## + size: 8Gi + ## @param replica.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param replica.persistence.labels Additional custom labels for the PVC + ## + labels: {} + ## @param replica.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param replica.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param replica.persistence.existingClaim Use a existing PVC which must be created manually before bound + ## NOTE: requires replica.persistence.enabled: true + ## + existingClaim: "" + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param replica.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param replica.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param replica.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: false + whenScaled: Retain + whenDeleted: Retain + ## Redis® replicas service parameters + ## + service: + ## @param replica.service.type Redis® replicas service type + ## + type: ClusterIP + ## @param replica.service.ports.redis Redis® replicas service port + ## + ports: + redis: 6379 + ## @param replica.service.nodePorts.redis Node port for Redis® replicas + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + redis: "" + ## @param replica.service.externalTrafficPolicy Redis® replicas service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param replica.service.internalTrafficPolicy Redis® replicas service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ + ## + internalTrafficPolicy: Cluster + ## @param replica.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param replica.service.clusterIP Redis® replicas service Cluster IP + ## + clusterIP: "" + ## @param replica.service.loadBalancerIP Redis® replicas service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param replica.service.loadBalancerSourceRanges Redis® replicas service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param replica.service.annotations Additional custom annotations for Redis® replicas service + ## + annotations: {} + ## @param replica.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param replica.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param replica.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-replicas pods + ## + terminationGracePeriodSeconds: 30 + ## Autoscaling configuration + ## + autoscaling: + ## @param replica.autoscaling.enabled Enable replica autoscaling settings + ## + enabled: false + ## @param replica.autoscaling.minReplicas Minimum replicas for the pod autoscaling + ## + minReplicas: 1 + ## @param replica.autoscaling.maxReplicas Maximum replicas for the pod autoscaling + ## + maxReplicas: 11 + ## @param replica.autoscaling.targetCPU Percentage of CPU to consider when autoscaling + ## + targetCPU: "" + ## @param replica.autoscaling.targetMemory Percentage of Memory to consider when autoscaling + ## + targetMemory: "" + ## ServiceAccount configuration + ## + serviceAccount: + ## @param replica.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: false + ## @param replica.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param replica.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param replica.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## @section Redis® Sentinel configuration parameters +## + +sentinel: + ## @param sentinel.enabled Use Redis® Sentinel on Redis® pods. + ## IMPORTANT: this will disable the master and replicas services and + ## create a single Redis® service exposing both the Redis and Sentinel ports + ## + enabled: false + ## Bitnami Redis® Sentinel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## @param sentinel.image.registry [default: REGISTRY_NAME] Redis® Sentinel image registry + ## @param sentinel.image.repository [default: REPOSITORY_NAME/redis-sentinel] Redis® Sentinel image repository + ## @skip sentinel.image.tag Redis® Sentinel image tag (immutable tags are recommended) + ## @param sentinel.image.digest Redis® Sentinel image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param sentinel.image.pullPolicy Redis® Sentinel image pull policy + ## @param sentinel.image.pullSecrets Redis® Sentinel image pull secrets + ## @param sentinel.image.debug Enable image debug mode + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + tag: 7.2.2-debian-11-r0 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + ## @param sentinel.annotations Additional custom annotations for Redis® Sentinel resource + ## + annotations: {} + ## @param sentinel.masterSet Master set name + ## + masterSet: mymaster + ## @param sentinel.quorum Sentinel Quorum + ## + quorum: 2 + ## @param sentinel.getMasterTimeout Amount of time to allow before get_sentinel_master_info() times out. + ## + getMasterTimeout: 99 + ## @param sentinel.automateClusterRecovery Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. + ## This also prevents any new replica from starting until the last remaining replica is elected as master to guarantee that it is the one to be elected by Sentinel, and not a newly started replica with no data. + ## NOTE: This feature requires a "downAfterMilliseconds" value less or equal to 2000. + ## + automateClusterRecovery: false + ## @param sentinel.redisShutdownWaitFailover Whether the Redis® master container waits for the failover at shutdown (in addition to the Redis® Sentinel container). + ## + redisShutdownWaitFailover: true + ## Sentinel timing restrictions + ## @param sentinel.downAfterMilliseconds Timeout for detecting a Redis® node is down + ## @param sentinel.failoverTimeout Timeout for performing a election failover + ## + downAfterMilliseconds: 60000 + failoverTimeout: 180000 + ## @param sentinel.parallelSyncs Number of replicas that can be reconfigured in parallel to use the new master after a failover + ## + parallelSyncs: 1 + ## @param sentinel.configuration Configuration for Redis® Sentinel nodes + ## ref: https://redis.io/topics/sentinel + ## + configuration: "" + ## @param sentinel.command Override default container command (useful when using custom images) + ## + command: [] + ## @param sentinel.args Override default container args (useful when using custom images) + ## + args: [] + ## @param sentinel.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## + enableServiceLinks: true + ## @param sentinel.preExecCmds Additional commands to run prior to starting Redis® Sentinel + ## + preExecCmds: [] + ## @param sentinel.extraEnvVars Array with extra environment variables to add to Redis® Sentinel nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param sentinel.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® Sentinel nodes + ## + extraEnvVarsCM: "" + ## @param sentinel.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® Sentinel nodes + ## + extraEnvVarsSecret: "" + ## @param sentinel.externalMaster.enabled Use external master for bootstrapping + ## @param sentinel.externalMaster.host External master host to bootstrap from + ## @param sentinel.externalMaster.port Port for Redis service external master host + ## + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param sentinel.containerPorts.sentinel Container port to open on Redis® Sentinel nodes + ## + containerPorts: + sentinel: 26379 + ## Configure extra options for Redis® containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param sentinel.startupProbe.enabled Enable startupProbe on Redis® Sentinel nodes + ## @param sentinel.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param sentinel.startupProbe.periodSeconds Period seconds for startupProbe + ## @param sentinel.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param sentinel.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param sentinel.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 22 + ## @param sentinel.livenessProbe.enabled Enable livenessProbe on Redis® Sentinel nodes + ## @param sentinel.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param sentinel.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param sentinel.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param sentinel.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param sentinel.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + ## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis® Sentinel nodes + ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param sentinel.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param sentinel.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param sentinel.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 6 + ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param sentinel.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param sentinel.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param sentinel.persistence.enabled Enable persistence on Redis® sentinel nodes using Persistent Volume Claims (Experimental) + ## + enabled: false + ## @param sentinel.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param sentinel.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param sentinel.persistence.size Persistent Volume size + ## + size: 100Mi + ## @param sentinel.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param sentinel.persistence.labels Additional custom labels for the PVC + ## + labels: {} + ## @param sentinel.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param sentinel.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param sentinel.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param sentinel.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## persistentVolumeClaimRetentionPolicy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## @param sentinel.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet + ## @param sentinel.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## @param sentinel.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + persistentVolumeClaimRetentionPolicy: + enabled: false + whenScaled: Retain + whenDeleted: Retain + ## Redis® Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the Redis® Sentinel containers + ## @param sentinel.resources.requests The requested resources for the Redis® Sentinel containers + ## + resources: + limits: {} + requests: {} + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param sentinel.containerSecurityContext.enabled Enabled Redis® Sentinel containers' Security Context + ## @param sentinel.containerSecurityContext.runAsUser Set Redis® Sentinel containers' Security Context runAsUser + ## @param sentinel.containerSecurityContext.runAsGroup Set Redis® Sentinel containers' Security Context runAsGroup + ## @param sentinel.containerSecurityContext.runAsNonRoot Set Redis® Sentinel containers' Security Context runAsNonRoot + ## @param sentinel.containerSecurityContext.allowPrivilegeEscalation Set Redis® Sentinel containers' Security Context allowPrivilegeEscalation + ## @param sentinel.containerSecurityContext.seccompProfile.type Set Redis® Sentinel containers' Security Context seccompProfile + ## @param sentinel.containerSecurityContext.capabilities.drop Set Redis® Sentinel containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsGroup: 0 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param sentinel.lifecycleHooks for the Redis® sentinel container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param sentinel.extraVolumes Optionally specify extra list of additional volumes for the Redis® Sentinel + ## + extraVolumes: [] + ## @param sentinel.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® Sentinel container(s) + ## + extraVolumeMounts: [] + ## Redis® Sentinel service parameters + ## + service: + ## @param sentinel.service.type Redis® Sentinel service type + ## + type: ClusterIP + ## @param sentinel.service.ports.redis Redis® service port for Redis® + ## @param sentinel.service.ports.sentinel Redis® service port for Redis® Sentinel + ## + ports: + redis: 6379 + sentinel: 26379 + ## @param sentinel.service.nodePorts.redis Node port for Redis® + ## @param sentinel.service.nodePorts.sentinel Node port for Sentinel + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## NOTE: By leaving these values blank, they will be generated by ports-configmap + ## If setting manually, please leave at least replica.replicaCount + 1 in between sentinel.service.nodePorts.redis and sentinel.service.nodePorts.sentinel to take into account the ports that will be created while incrementing that base port + ## + nodePorts: + redis: "" + sentinel: "" + ## @param sentinel.service.externalTrafficPolicy Redis® Sentinel service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param sentinel.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param sentinel.service.clusterIP Redis® Sentinel service Cluster IP + ## + clusterIP: "" + ## @param sentinel.service.loadBalancerIP Redis® Sentinel service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param sentinel.service.loadBalancerSourceRanges Redis® Sentinel service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param sentinel.service.annotations Additional custom annotations for Redis® Sentinel service + ## + annotations: {} + ## @param sentinel.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param sentinel.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param sentinel.service.headless.annotations Annotations for the headless service. + ## + annotations: {} + ## @param sentinel.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-node pods + ## + terminationGracePeriodSeconds: 30 + +## @section Other Parameters +## + +## @param serviceBindings.enabled Create secret for service binding (Experimental) +## Ref: https://servicebinding.io/service-provider/ +## +serviceBindings: + enabled: false + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the ports + ## Redis® is listening on. When true, Redis® will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.extraIngress Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param networkPolicy.extraEgress Add extra egress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressNSMatchLabels Labels to match to allow traffic from other namespaces + ## @param networkPolicy.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + metrics: + ## @param networkPolicy.metrics.allowExternal Don't require client label for connections for metrics endpoint + ## When set to false, only pods with the correct client label will have network access to the metrics port + ## + allowExternal: true + ## @param networkPolicy.metrics.ingressNSMatchLabels Labels to match to allow traffic from other namespaces to metrics endpoint + ## @param networkPolicy.metrics.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces to metrics endpoint + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + create: false + ## @param podSecurityPolicy.enabled Enable PodSecurityPolicy's RBAC rules + ## + enabled: false +## RBAC configuration +## +rbac: + ## @param rbac.create Specifies whether RBAC resources should be created + ## + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## ServiceAccount configuration +## +serviceAccount: + ## @param serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Redis® Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Specifies whether a PodDisruptionBudget should be created + ## + create: false + ## @param pdb.minAvailable Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: "" +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic + ## + enabled: false + ## @param tls.authClients Require clients to authenticate + ## + authClients: true + ## @param tls.autoGenerated Enable autogenerated certificates + ## + autoGenerated: false + ## @param tls.existingSecret The name of the existing secret that contains the TLS certificates + ## + existingSecret: "" + ## @param tls.certificatesSecret DEPRECATED. Use existingSecret instead. + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate Key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## + certCAFilename: "" + ## @param tls.dhParamsFilename File containing DH params (in order to support DH based ciphers) + ## + dhParamsFilename: "" + +## @section Metrics Parameters +## + +metrics: + ## @param metrics.enabled Start a sidecar prometheus exporter to expose Redis® metrics + ## + enabled: false + ## Bitnami Redis® Exporter image + ## ref: https://hub.docker.com/r/bitnami/redis-exporter/tags/ + ## @param metrics.image.registry [default: REGISTRY_NAME] Redis® Exporter image registry + ## @param metrics.image.repository [default: REPOSITORY_NAME/redis-exporter] Redis® Exporter image repository + ## @skip metrics.image.tag Redis® Exporter image tag (immutable tags are recommended) + ## @param metrics.image.digest Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.image.pullPolicy Redis® Exporter image pull policy + ## @param metrics.image.pullSecrets Redis® Exporter image pull secrets + ## + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.55.0-debian-11-r0 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Configure extra options for Redis® containers' liveness, readiness & startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## @param metrics.startupProbe.enabled Enable startupProbe on Redis® replicas nodes + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param metrics.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param metrics.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param metrics.command Override default metrics container init command (useful when using custom images) + ## + command: [] + ## @param metrics.redisTargetHost A way to specify an alternative Redis® hostname + ## Useful for certificate CN/SAN matching + ## + redisTargetHost: "localhost" + ## @param metrics.extraArgs Extra arguments for Redis® exporter, for example: + ## e.g.: + ## extraArgs: + ## check-keys: myKey,myOtherKey + ## + extraArgs: {} + ## @param metrics.extraEnvVars Array with extra environment variables to add to Redis® exporter + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.containerSecurityContext.enabled Enabled Redis® exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set Redis® exporter containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsGroup Set Redis® exporter containers' Security Context runAsGroup + ## @param metrics.containerSecurityContext.runAsNonRoot Set Redis® exporter containers' Security Context runAsNonRoot + ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set Redis® exporter containers' Security Context allowPrivilegeEscalation + ## @param metrics.containerSecurityContext.seccompProfile.type Set Redis® exporter containers' Security Context seccompProfile + ## @param metrics.containerSecurityContext.capabilities.drop Set Redis® exporter containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsGroup: 0 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the Redis® metrics sidecar + ## + extraVolumes: [] + ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® metrics sidecar + ## + extraVolumeMounts: [] + ## Redis® exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the Redis® exporter container + ## @param metrics.resources.requests The requested resources for the Redis® exporter container + ## + resources: + limits: {} + requests: {} + ## @param metrics.podLabels Extra labels for Redis® exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param metrics.podAnnotations [object] Annotations for Redis® exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + ## Redis® exporter service parameters + ## + service: + ## @param metrics.service.type Redis® exporter service type + ## + type: ClusterIP + ## @param metrics.service.port Redis® exporter service port + ## + port: 9121 + ## @param metrics.service.externalTrafficPolicy Redis® exporter service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param metrics.service.loadBalancerIP Redis® exporter service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param metrics.service.loadBalancerSourceRanges Redis® exporter service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param metrics.service.annotations Additional custom annotations for Redis® exporter service + ## + annotations: {} + ## @param metrics.service.clusterIP Redis® exporter service Cluster IP + ## + clusterIP: "" + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. + ## + relabellings: [] + ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Labels from the Kubernetes pod to be transferred to the created metrics + ## + podTargetLabels: [] + ## @param metrics.serviceMonitor.sampleLimit Limit of how many samples should be scraped from every Pod + ## + sampleLimit: false + ## @param metrics.serviceMonitor.targetLimit Limit of how many targets should be scraped + ## + targetLimit: false + ## Prometheus Pod Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#podmonitor + ## + podMonitor: + ## @param metrics.podMonitor.enabled Create PodMonitor resource(s) for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.podMonitor.namespace The namespace in which the PodMonitor will be created + ## + namespace: "" + ## @param metrics.podMonitor.interval The interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.podMonitor.scrapeTimeout The timeout after which the scrape is ended + ## + scrapeTimeout: "" + ## @param metrics.podMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. + ## + relabellings: [] + ## @param metrics.podMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.podMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.podMonitor.additionalLabels Additional labels that can be used so PodMonitor resource(s) can be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.podMonitor.podTargetLabels Labels from the Kubernetes pod to be transferred to the created metrics + ## + podTargetLabels: [] + ## @param metrics.podMonitor.sampleLimit Limit of how many samples should be scraped from every Pod + ## + sampleLimit: false + ## @param metrics.podMonitor.targetLimit Limit of how many targets should be scraped + ## + targetLimit: false + + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.prometheusRule.namespace The namespace in which the prometheusRule will be created + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Custom Prometheus rules + ## e.g: + ## rules: + ## - alert: RedisDown + ## expr: redis_up{service="{{ template "common.names.fullname" . }}-metrics"} == 0 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis® instance {{ "{{ $labels.instance }}" }} down + ## description: Redis® instance {{ "{{ $labels.instance }}" }} is down + ## - alert: RedisMemoryHigh + ## expr: > + ## redis_memory_used_bytes{service="{{ template "common.names.fullname" . }}-metrics"} * 100 + ## / + ## redis_memory_max_bytes{service="{{ template "common.names.fullname" . }}-metrics"} + ## > 90 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis® instance {{ "{{ $labels.instance }}" }} is using too much memory + ## description: | + ## Redis® instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + ## - alert: RedisKeyEviction + ## expr: | + ## increase(redis_evicted_keys_total{service="{{ template "common.names.fullname" . }}-metrics"}[5m]) > 0 + ## for: 1s + ## labels: + ## severity: error + ## annotations: + ## summary: Redis® instance {{ "{{ $labels.instance }}" }} has evicted keys + ## description: | + ## Redis® instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + ## + rules: [] + +## @section Init Container Parameters +## + +## 'volumePermissions' init container parameters +## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values +## based on the *podSecurityContext/*containerSecurityContext parameters +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` + ## + enabled: false + ## OS Shell + Utility image + ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository + ## @skip volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy OS Shell + Utility image pull policy + ## @param volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 11-debian-11-r90 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits The resources limits for the init container + ## @param volumePermissions.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + ## Init container Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser + ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the + ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) + ## + containerSecurityContext: + runAsUser: 0 + +## init-sysctl container parameters +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctl: + ## @param sysctl.enabled Enable init container to modify Kernel settings + ## + enabled: false + ## OS Shell + Utility image + ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ + ## @param sysctl.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry + ## @param sysctl.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository + ## @skip sysctl.image.tag OS Shell + Utility image tag (immutable tags are recommended) + ## @param sysctl.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param sysctl.image.pullPolicy OS Shell + Utility image pull policy + ## @param sysctl.image.pullSecrets OS Shell + Utility image pull secrets + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 11-debian-11-r90 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param sysctl.command Override default init-sysctl container command (useful when using custom images) + ## + command: [] + ## @param sysctl.mountHostSys Mount the host `/sys` folder to `/host-sys` + ## + mountHostSys: false + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sysctl.resources.limits The resources limits for the init container + ## @param sysctl.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + +## @section useExternalDNS Parameters +## +## @param useExternalDNS.enabled Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. +## @param useExternalDNS.additionalAnnotations Extra annotations to be utilized when `external-dns` is enabled. +## @param useExternalDNS.annotationKey The annotation key utilized when `external-dns` is enabled. Setting this to `false` will disable annotations. +## @param useExternalDNS.suffix The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. +## +useExternalDNS: + enabled: false + suffix: "" + annotationKey: external-dns.alpha.kubernetes.io/ + additionalAnnotations: {} diff --git a/openshift/templates/superset/Dockerfile b/openshift/templates/superset/Dockerfile deleted file mode 100644 index 2baf0606..00000000 --- a/openshift/templates/superset/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM artifacts.developer.gov.bc.ca/docker-remote/apache/superset -USER root -RUN pip install psycopg2-binary sqlalchemy-redshift \ - && chgrp -R root /app/superset_home \ - && chmod -R g+w /app/superset_home -COPY ./scripts /app/docker -USER superset \ No newline at end of file diff --git a/openshift/templates/superset/superset/superset-bc-superset.yaml b/openshift/templates/superset/Openshift/superset-bc.yaml similarity index 58% rename from openshift/templates/superset/superset/superset-bc-superset.yaml rename to openshift/templates/superset/Openshift/superset-bc.yaml index 42af6810..bd376578 100644 --- a/openshift/templates/superset/superset/superset-bc-superset.yaml +++ b/openshift/templates/superset/Openshift/superset-bc.yaml @@ -1,49 +1,51 @@ -apiVersion: template.openshift.io/v1 +--- kind: Template +apiVersion: template.openshift.io/v1 metadata: creationTimestamp: null - name: superset-superset-bc + name: superset objects: - apiVersion: image.openshift.io/v1 kind: ImageStream metadata: - annotations: - description: Keeps track of changes in the superset image - creationTimestamp: null name: superset + creationTimestamp: + labels: + shared: "true" spec: lookupPolicy: local: false + status: + dockerImageRepository: "" - apiVersion: build.openshift.io/v1 kind: BuildConfig metadata: creationTimestamp: null - name: superset + name: superset-bc spec: failedBuildsHistoryLimit: 5 nodeSelector: null output: to: kind: ImageStreamTag - name: superset:20211213 - namespace: 30b186-tools + name: superset:3.0.1-authlib postCommit: {} resources: {} runPolicy: Serial source: - contextDir: openshift/templates/superset/superset + contextDir: openshift/templates/superset/docker git: - ref: superset-0.1.0 - uri: https://github.com/bcgov/cthub.git + ref: "superset-0.2.0" + uri: "https://github.com/bcgov/cthub.git" type: Git strategy: dockerStrategy: - forcePull: true - noCache: true - pullSecret: - name: artifacts-pull-default-idxprm + imageOptimizationPolicy: SkipLayers type: Docker successfulBuildsHistoryLimit: 5 - triggers: [] + triggers: + - imageChange: {} + type: ImageChange + - type: ConfigChange status: lastVersion: 0 diff --git a/openshift/templates/superset/README.md b/openshift/templates/superset/README.md deleted file mode 100644 index 7541692f..00000000 --- a/openshift/templates/superset/README.md +++ /dev/null @@ -1,10 +0,0 @@ -## Create Superset -* superset/scripts folder, will be copied to /app/docker -* redis folder folder, build and deploy redis -* Dockerfile the dockerfile -* superset-bc.yaml, build superset image, the superset, worker and beat all use this image -* superset-dc-beat.yaml deploy superset beat -* superset-dc-worker.yaml deploy superset worker -* superset-dc-superset.yaml deploy superset - - diff --git a/openshift/templates/superset/cthub-superset-dev.yaml b/openshift/templates/superset/cthub-superset-dev.yaml new file mode 100644 index 00000000..e7c9a3f6 --- /dev/null +++ b/openshift/templates/superset/cthub-superset-dev.yaml @@ -0,0 +1,888 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Default values for superset. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# A README is automatically generated from this file to document it, using helm-docs (see https://github.com/norwoodj/helm-docs) +# To update it, install helm-docs and run helm-docs from the root of this chart + +# -- Provide a name to override the name of the chart +nameOverride: ~ +# -- Provide a name to override the full names of resources +fullnameOverride: cthub-superset-dev + +# -- User ID directive. This user must have enough permissions to run the bootstrap script +# Running containers as root is not recommended in production. Change this to another UID - e.g. 1000 to be more secure +runAsUser: 1010180000 + +# -- Specify service account name to be used +serviceAccountName: ~ +serviceAccount: + # -- Create custom service account for Superset. If create: true and serviceAccountName is not provided, `superset.fullname` will be used. + create: false + annotations: {} + +# -- Install additional packages and do any other bootstrap configuration in this script +# For production clusters it's recommended to build own image with this step done in CI +# @default -- see `values.yaml` +# if [ ! -f ~/bootstrap ]; then echo "Running Superset with uid {{ .Values.runAsUser }}" > /tmp/bootstrap; fi +bootstrapScript: | + #!/bin/bash + if [ ! -f ~/bootstrap ]; then echo "Running Superset with uid {{ .Values.runAsUser }}" > /tmp/bootstrap; fi + +# -- The name of the secret which we will use to generate a superset_config.py file +# Note: this secret must have the key superset_config.py in it and can include other files as well +configFromSecret: '{{ template "superset.fullname" . }}-config' + +# -- The name of the secret which we will use to populate env vars in deployed pods +# This can be useful for secret keys, etc. +envFromSecret: '{{ template "superset.fullname" . }}-env' +# -- This can be a list of templated strings +envFromSecrets: [] + +# -- Extra environment variables that will be passed into pods +extraEnv: {} + # Different gunicorn settings, refer to the gunicorn documentation + # https://docs.gunicorn.org/en/stable/settings.html# + # These variables are used as Flags at the gunicorn startup + # https://github.com/apache/superset/blob/master/docker/run-server.sh#L22 + # Extend timeout to allow long running queries. + # GUNICORN_TIMEOUT: 300 + # Increase the gunicorn worker amount, can improve performance drastically + # See: https://docs.gunicorn.org/en/stable/design.html#how-many-workers + # SERVER_WORKER_AMOUNT: 4 + # WORKER_MAX_REQUESTS: 0 + # WORKER_MAX_REQUESTS_JITTER: 0 + # SERVER_THREADS_AMOUNT: 20 + # GUNICORN_KEEPALIVE: 2 + # SERVER_LIMIT_REQUEST_LINE: 0 + # SERVER_LIMIT_REQUEST_FIELD_SIZE: 0 + + # OAUTH_HOME_DOMAIN: .. + # # If a whitelist is not set, any address that can use your OAuth2 endpoint will be able to login. + # # this includes any random Gmail address if your OAuth2 Web App is set to External. + # OAUTH_WHITELIST_REGEX: ... + +# -- Extra environment variables in RAW format that will be passed into pods +extraEnvRaw: [] + # Load DB password from other secret (e.g. for zalando operator) + # - name: DB_PASS + # valueFrom: + # secretKeyRef: + # name: superset.superset-postgres.credentials.postgresql.acid.zalan.do + # key: password + +# -- Extra environment variables to pass as secrets +extraSecretEnv: {} + # MAPBOX_API_KEY: ... + # # Google API Keys: https://console.cloud.google.com/apis/credentials + # GOOGLE_KEY: ... + # GOOGLE_SECRET: ... + +# -- Extra files to mount on `/app/pythonpath` + # import_datasources.yaml: | + # databases: + # - allow_file_upload: true + # allow_ctas: true + # allow_cvas: true + # database_name: example-db + # extra: "{\r\n \"metadata_params\": {},\r\n \"engine_params\": {},\r\n \"\ + # metadata_cache_timeout\": {},\r\n \"schemas_allowed_for_file_upload\": []\r\n\ + # }" + # sqlalchemy_uri: example://example-db.local + # tables: [] +extraConfigs: {} + +# -- Extra files to mount on `/app/pythonpath` as secrets +extraSecrets: + custom_sso_security_manager.py: | + import logging + from superset.security import SupersetSecurityManager + + class CustomSsoSecurityManager(SupersetSecurityManager): + + def oauth_user_info(self, provider, response=None): + logging.info("Oauth2 provider: {0}.".format(provider)) + if provider == 'egaSSO': + # As example, this line request a GET to base_url + '/' + userDetails with Bearer Authentication, + # and expects that authorization server checks the token, and response with user details + me = self.appbuilder.sm.oauth_remotes[provider].get('userDetails').data + logging.info("user_data: {0}".format(me)) + return { 'name' : me['name'], 'email' : me['email'], 'id' : me['user_name'], 'username' : me['user_name'], 'first_name':'', 'last_name':''} + + +extraVolumes: [] + # - name: customConfig + # configMap: + # name: '{{ template "superset.fullname" . }}-custom-config' + # - name: additionalSecret + # secret: + # secretName: my-secret + # defaultMode: 0600 + +extraVolumeMounts: [] + # - name: customConfig + # mountPath: /mnt/config + # readOnly: true + # - name: additionalSecret: + # mountPath: /mnt/secret + +# -- A dictionary of overrides to append at the end of superset_config.py - the name does not matter +# WARNING: the order is not guaranteed +# Files can be passed as helm --set-file configOverrides.my-override=my-file.py +configOverrides: + enable_oauth: | + + from flask_appbuilder.security.manager import AUTH_OAUTH + + # Set the authentication type to OAuth + AUTH_TYPE = AUTH_OAUTH + + OAUTH_PROVIDERS = [ + { 'name':'egaSSO', + 'token_key':'access_token', # Name of the token in the response of access_token_url + 'icon':'fa-address-card', # Icon for the provider + 'remote_app': { + 'client_id':'cthub-on-gold-cluster-3974', # Client Id (Identify Superset application) + 'client_secret': None, # Secret for this Client Id (Identify Superset application) + 'client_kwargs':{ + 'scope': 'read' # Scope for the Authorization + }, + 'access_token_method':'POST', # HTTP Method to call access_token_url + 'access_token_params':{ # Additional parameters for calls to access_token_url + 'client_id':'cthub-on-gold-cluster-3974' + }, + 'jwks_uri': None, # may be required to generate token + 'access_token_headers':{ # Additional headers for calls to access_token_url + 'Authorization': 'Basic Base64EncodedClientIdAndSecret' + }, + 'api_base_url':'https://dev.loginproxy.gov.bc.ca/auth/realms/standard/protocol/openid-connect', + 'access_token_url':'https://dev.loginproxy.gov.bc.ca/auth/realms/standard/protocol/openid-connect/token', + 'authorize_url':'hhttps://dev.loginproxy.gov.bc.ca/auth/realms/standard/protocol/openid-connect/auth' + } + } + ] + + # Will allow user self registration, allowing to create Flask users from Authorized User + AUTH_USER_REGISTRATION = True + + # The default user self registration role + AUTH_USER_REGISTRATION_ROLE = "Public" + + from custom_sso_security_manager import CustomSsoSecurityManager + CUSTOM_SECURITY_MANAGER = CustomSsoSecurityManager + + # extend_timeout: | + # # Extend timeout to allow long running queries. + # SUPERSET_WEBSERVER_TIMEOUT = ... + # enable_oauth: | + # from flask_appbuilder.security.manager import (AUTH_DB, AUTH_OAUTH) + # AUTH_TYPE = AUTH_OAUTH + # OAUTH_PROVIDERS = [ + # { + # "name": "google", + # "whitelist": [ os.getenv("OAUTH_WHITELIST_REGEX", "") ], + # "icon": "fa-google", + # "token_key": "access_token", + # "remote_app": { + # "client_id": os.environ.get("GOOGLE_KEY"), + # "client_secret": os.environ.get("GOOGLE_SECRET"), + # "api_base_url": "https://www.googleapis.com/oauth2/v2/", + # "client_kwargs": {"scope": "email profile"}, + # "request_token_url": None, + # "access_token_url": "https://accounts.google.com/o/oauth2/token", + # "authorize_url": "https://accounts.google.com/o/oauth2/auth", + # "authorize_params": {"hd": os.getenv("OAUTH_HOME_DOMAIN", "")} + # } + # } + # ] + # # Map Authlib roles to superset roles + # AUTH_ROLE_ADMIN = 'Admin' + # AUTH_ROLE_PUBLIC = 'Public' + # # Will allow user self registration, allowing to create Flask users from Authorized User + # AUTH_USER_REGISTRATION = True + # # The default user self registration role + # AUTH_USER_REGISTRATION_ROLE = "Admin" + # secret: | + # # Generate your own secret key for encryption. Use openssl rand -base64 42 to generate a good key + # SECRET_KEY = 'YOUR_OWN_RANDOM_GENERATED_SECRET_KEY' + +# -- Same as above but the values are files +configOverridesFiles: {} + # extend_timeout: extend_timeout.py + # enable_oauth: enable_oauth.py + +configMountPath: "/app/pythonpath" + +extraConfigMountPath: "/app/configs" + +image: + #repository: apachesuperset.docker.scarf.sh/apache/superset + repository: image-registry.openshift-image-registry.svc:5000/30b186-dev/superset + #tag: "" + tag: 3.0.1-authlib + # pullPolicy: IfNotPresent + pullPolicy: Always + +imagePullSecrets: [] + +initImage: + repository: apache/superset + tag: dockerize + # pullPolicy: IfNotPresent + pullPolicy: Always + +service: + type: ClusterIP + port: 8088 + annotations: {} + # cloud.google.com/load-balancer-type: "Internal" + loadBalancerIP: ~ + nodePort: + # -- (int) + http: nil + +ingress: + enabled: false + ingressClassName: ~ + annotations: {} + # kubernetes.io/tls-acme: "true" + ## Extend timeout to allow long running queries. + # nginx.ingress.kubernetes.io/proxy-connect-timeout: "300" + # nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + # nginx.ingress.kubernetes.io/proxy-send-timeout: "300" + path: / + pathType: ImplementationSpecific + hosts: + - chart-example.local + tls: [] + extraHostsRaw: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # The limits below will apply to all Superset components. To set individual resource limitations refer to the pod specific values below. + # The pod specific values will overwrite anything that is set here. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +# -- Custom hostAliases for all superset pods +## https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/ +hostAliases: [] +# - hostnames: +# - nodns.my.lan +# ip: 18.27.36.45 + +# Superset node configuration +supersetNode: + replicaCount: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + + # -- Startup command + # @default -- See `values.yaml` + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; /usr/bin/run-server.sh" + connections: + # -- Change in case of bringing your own redis and then also set redis.enabled:false + # redis_host: '{{ .Release.Name }}-redis-headless' + redis_host: toBeOverrided + redis_password: toBeOverrided + redis_port: "6379" + # You need to change below configuration incase bringing own PostgresSQL instance and also set postgresql.enabled:false + # db_host: '{{ .Release.Name }}-postgresql' + db_host: toBeOverrided + db_port: "5432" + db_user: toBeOverrided + db_pass: toBeOverrided + db_name: superset + env: {} + # -- If true, forces deployment to reload on each upgrade + forceReload: false + # -- Init containers + # @default -- a container waiting for postgres + initContainers: + - name: wait-for-postgres + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -timeout 120s + + # -- Launch additional containers into supersetNode pod + extraContainers: [] + # -- Annotations to be added to supersetNode deployment + deploymentAnnotations: {} + # -- Labels to be added to supersetNode deployment + deploymentLabels: {} + # -- Affinity to be added to supersetNode deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetNode deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetNode pods + podAnnotations: {} + # -- Labels to be added to supersetNode pods + podLabels: {} + startupProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 15 + timeoutSeconds: 1 + failureThreshold: 60 + periodSeconds: 5 + successThreshold: 1 + livenessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 15 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 15 + successThreshold: 1 + readinessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 15 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 15 + successThreshold: 1 + # -- Resource settings for the supersetNode pods - these settings overwrite might existing values from the global resources object defined above. + resources: + limits: + cpu: 200m + memory: 500Mi + requests: + cpu: 100m + memory: 250Mi + podSecurityContext: {} + containerSecurityContext: {} + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + +# Superset Celery worker configuration +supersetWorker: + replicaCount: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + + # -- Worker startup command + # @default -- a `celery worker` command + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; celery --app=superset.tasks.celery_app:app worker" + # -- If true, forces deployment to reload on each upgrade + forceReload: false + # -- Init container + # @default -- a container waiting for postgres and redis + initContainers: + - name: wait-for-postgres-redis + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -wait "tcp://$REDIS_HOST:$REDIS_PORT" -timeout 120s + # -- Launch additional containers into supersetWorker pod + extraContainers: [] + # -- Annotations to be added to supersetWorker deployment + deploymentAnnotations: {} + # -- Labels to be added to supersetWorker deployment + deploymentLabels: {} + # -- Affinity to be added to supersetWorker deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetWorker deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetWorker pods + podAnnotations: {} + # -- Labels to be added to supersetWorker pods + podLabels: {} + # -- Resource settings for the supersetWorker pods - these settings overwrite might existing values from the global resources object defined above. + resources: + limits: + cpu: 400m + memory: 4Gi + requests: + cpu: 200m + memory: 2Gi + podSecurityContext: {} + containerSecurityContext: {} + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + livenessProbe: + exec: + # -- Liveness probe command + # @default -- a `celery inspect ping` command + command: + - sh + - -c + - celery -A superset.tasks.celery_app:app inspect ping -d celery@$HOSTNAME + initialDelaySeconds: 120 + timeoutSeconds: 60 + failureThreshold: 3 + periodSeconds: 60 + successThreshold: 1 + # -- No startup/readiness probes by default since we don't really care about its startup time (it doesn't serve traffic) + startupProbe: {} + # -- No startup/readiness probes by default since we don't really care about its startup time (it doesn't serve traffic) + readinessProbe: {} + +# Superset beat configuration (to trigger scheduled jobs like reports) +supersetCeleryBeat: + # -- This is only required if you intend to use alerts and reports + enabled: false + # -- Command + # @default -- a `celery beat` command + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; celery --app=superset.tasks.celery_app:app beat --pidfile /tmp/celerybeat.pid --schedule /tmp/celerybeat-schedule" + # -- If true, forces deployment to reload on each upgrade + forceReload: false + # -- List of init containers + # @default -- a container waiting for postgres + initContainers: + - name: wait-for-postgres-redis + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -wait "tcp://$REDIS_HOST:$REDIS_PORT" -timeout 120s + # -- Annotations to be added to supersetCeleryBeat deployment + deploymentAnnotations: {} + # -- Affinity to be added to supersetCeleryBeat deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetCeleryBeat deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetCeleryBeat pods + podAnnotations: {} + # -- Labels to be added to supersetCeleryBeat pods + podLabels: {} + # -- Resource settings for the CeleryBeat pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + podSecurityContext: {} + containerSecurityContext: {} + +supersetCeleryFlower: + # -- Enables a Celery flower deployment (management UI to monitor celery jobs) + # WARNING: on superset 1.x, this requires a Superset image that has `flower<1.0.0` installed (which is NOT the case of the default images) + # flower>=1.0.0 requires Celery 5+ which Superset 1.5 does not support + enabled: false + replicaCount: 1 + # -- Command + # @default -- a `celery flower` command + command: + - "/bin/sh" + - "-c" + - "celery --app=superset.tasks.celery_app:app flower" + service: + type: ClusterIP + annotations: {} + loadBalancerIP: ~ + port: 5555 + nodePort: + # -- (int) + http: nil + startupProbe: + httpGet: + path: /api/workers + port: flower + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 60 + periodSeconds: 5 + successThreshold: 1 + livenessProbe: + httpGet: + path: /api/workers + port: flower + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 5 + successThreshold: 1 + readinessProbe: + httpGet: + path: /api/workers + port: flower + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 5 + successThreshold: 1 + # -- List of init containers + # @default -- a container waiting for postgres and redis + initContainers: + - name: wait-for-postgres-redis + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -wait "tcp://$REDIS_HOST:$REDIS_PORT" -timeout 120s + # -- Annotations to be added to supersetCeleryFlower deployment + deploymentAnnotations: {} + # -- Affinity to be added to supersetCeleryFlower deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetCeleryFlower deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetCeleryFlower pods + podAnnotations: {} + # -- Labels to be added to supersetCeleryFlower pods + podLabels: {} + # -- Resource settings for the CeleryBeat pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + podSecurityContext: {} + containerSecurityContext: {} + +supersetWebsockets: + # -- This is only required if you intend to use `GLOBAL_ASYNC_QUERIES` in `ws` mode + # see https://github.com/apache/superset/blob/master/CONTRIBUTING.md#async-chart-queries + enabled: false + replicaCount: 1 + ingress: + path: /ws + pathType: Prefix + image: + # -- There is no official image (yet), this one is community-supported + repository: oneacrefund/superset-websocket + tag: latest + pullPolicy: IfNotPresent + # -- The config.json to pass to the server, see https://github.com/apache/superset/tree/master/superset-websocket + # Note that the configuration can also read from environment variables (which will have priority), see https://github.com/apache/superset/blob/master/superset-websocket/src/config.ts for a list of supported variables + # @default -- see `values.yaml` + config: + { + "port": 8080, + "logLevel": "debug", + "logToFile": false, + "logFilename": "app.log", + "statsd": { "host": "127.0.0.1", "port": 8125, "globalTags": [] }, + "redis": + { + "port": 6379, + "host": "127.0.0.1", + "password": "", + "db": 0, + "ssl": false, + }, + "redisStreamPrefix": "async-events-", + "jwtSecret": "CHANGE-ME", + "jwtCookieName": "async-token", + } + service: + type: ClusterIP + annotations: {} + loadBalancerIP: ~ + port: 8080 + nodePort: + # -- (int) + http: nil + command: [] + resources: {} + deploymentAnnotations: {} + # -- Affinity to be added to supersetWebsockets deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetWebsockets deployments + topologySpreadConstraints: [] + podAnnotations: {} + podLabels: {} + strategy: {} + podSecurityContext: {} + containerSecurityContext: {} + startupProbe: + httpGet: + path: /health + port: ws + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 60 + periodSeconds: 5 + successThreshold: 1 + livenessProbe: + httpGet: + path: /health + port: ws + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 5 + successThreshold: 1 + readinessProbe: + httpGet: + path: /health + port: ws + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 5 + successThreshold: 1 + +init: + # Configure resources + # Warning: fab command consumes a lot of ram and can + # cause the process to be killed due to OOM if it exceeds limit + # Make sure you are giving a strong password for the admin user creation( else make sure you are changing after setup) + # Also change the admin email to your own custom email. + resources: {} + # limits: + # cpu: + # memory: + # requests: + # cpu: + # memory: + # -- Command + # @default -- a `superset_init.sh` command + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; . {{ .Values.configMountPath }}/superset_init.sh" + enabled: true + jobAnnotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": "before-hook-creation" + loadExamples: false + createAdmin: true + adminUser: + username: admin + firstname: Superset + lastname: Admin + email: admin@superset.com + password: admin + # -- List of initContainers + # @default -- a container waiting for postgres + initContainers: + - name: wait-for-postgres + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -timeout 120s + # -- A Superset init script + # @default -- a script to create admin user and initialize roles + initscript: |- + #!/bin/sh + set -eu + echo "Upgrading DB schema..." + superset db upgrade + echo "Initializing roles..." + superset init + {{ if .Values.init.createAdmin }} + echo "Creating admin user..." + superset fab create-admin \ + --username {{ .Values.init.adminUser.username }} \ + --firstname {{ .Values.init.adminUser.firstname }} \ + --lastname {{ .Values.init.adminUser.lastname }} \ + --email {{ .Values.init.adminUser.email }} \ + --password {{ .Values.init.adminUser.password }} \ + || true + {{- end }} + {{ if .Values.init.loadExamples }} + echo "Loading examples..." + superset load_examples + {{- end }} + if [ -f "{{ .Values.extraConfigMountPath }}/import_datasources.yaml" ]; then + echo "Importing database connections.... " + superset import_datasources -p {{ .Values.extraConfigMountPath }}/import_datasources.yaml + fi + # -- Launch additional containers into init job pod + extraContainers: [] + ## Annotations to be added to init job pods + podAnnotations: {} + podSecurityContext: {} + # fsGroup: 1010180000 + containerSecurityContext: {} + # runAsUser: 1010180000 + # runAsGroup: 1010180000 + # runAsNonRoot: true + # allowPrivilegeEscalation: false + # seccompProfile: + # type: RuntimeDefault + # capabilities: + # drop: + # - ALL + ## Tolerations to be added to init job pods + tolerations: [] + ## Affinity to be added to init job pods + affinity: {} + # -- TopologySpreadConstrains to be added to init job + topologySpreadConstraints: [] + +# -- Configuration values for the postgresql dependency. +# ref: https://github.com/bitnami/charts/tree/main/bitnami/postgresql +# @default -- see `values.yaml` +postgresql: + ## + ## Use the PostgreSQL chart dependency. + ## Set to false if bringing your own PostgreSQL. + enabled: false + + ## Authentication parameters + auth: + ## The name of an existing secret that contains the postgres password. + existingSecret: + ## PostgreSQL name for a custom user to create + username: superset + ## PostgreSQL password for the custom user to create. Ignored if `auth.existingSecret` with key `password` is provided + password: superset + ## PostgreSQL name for a custom database to create + database: superset + + image: + tag: "14.6.0-debian-11-r13" + + ## PostgreSQL Primary parameters + primary: + ## + ## Persistent Volume Storage configuration. + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes + persistence: + ## + ## Enable PostgreSQL persistence using Persistent Volume Claims. + enabled: true + ## + ## Persistent class + # storageClass: classname + ## + ## Access modes: + accessModes: + - ReadWriteOnce + ## PostgreSQL port + service: + ports: + postgresql: "5432" + +# -- Configuration values for the Redis dependency. +# ref: https://github.com/bitnami/charts/blob/master/bitnami/redis +# More documentation can be found here: https://artifacthub.io/packages/helm/bitnami/redis +# @default -- see `values.yaml` +redis: + ## + ## Use the redis chart dependency. + ## + ## If you are bringing your own redis, you can set the host in supersetNode.connections.redis_host + ## + ## Set to false if bringing your own redis. + enabled: false + ## + ## Set architecture to standalone/replication + architecture: standalone + ## + ## Auth configuration: + ## + auth: + ## Enable password authentication + enabled: false + ## The name of an existing secret that contains the redis password. + existingSecret: "" + ## Name of the key containing the secret. + existingSecretKey: "" + ## Redis password + password: superset + ## + ## Master configuration + ## + master: + ## + ## Image configuration + # image: + ## + ## docker registry secret names (list) + # pullSecrets: nil + ## + ## Configure persistance + persistence: + ## + ## Use a PVC to persist data. + enabled: false + ## + ## Persistent class + # storageClass: classname + ## + ## Access mode: + accessModes: + - ReadWriteOnce + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +# -- TopologySpreadConstrains to be added to all deployments +topologySpreadConstraints: [] diff --git a/openshift/templates/superset/cthub-superset-test.yaml b/openshift/templates/superset/cthub-superset-test.yaml new file mode 100644 index 00000000..a19a0f9a --- /dev/null +++ b/openshift/templates/superset/cthub-superset-test.yaml @@ -0,0 +1,867 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Default values for superset. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# A README is automatically generated from this file to document it, using helm-docs (see https://github.com/norwoodj/helm-docs) +# To update it, install helm-docs and run helm-docs from the root of this chart + +# -- Provide a name to override the name of the chart +nameOverride: ~ +# -- Provide a name to override the full names of resources +fullnameOverride: cthub-superset-test + +# -- User ID directive. This user must have enough permissions to run the bootstrap script +# Running containers as root is not recommended in production. Change this to another UID - e.g. 1000 to be more secure +runAsUser: 1010150000 + +# -- Specify service account name to be used +serviceAccountName: ~ +serviceAccount: + # -- Create custom service account for Superset. If create: true and serviceAccountName is not provided, `superset.fullname` will be used. + create: false + annotations: {} + +# -- Install additional packages and do any other bootstrap configuration in this script +# For production clusters it's recommended to build own image with this step done in CI +# @default -- see `values.yaml` +# if [ ! -f ~/bootstrap ]; then echo "Running Superset with uid {{ .Values.runAsUser }}" > /tmp/bootstrap; fi +bootstrapScript: | + #!/bin/bash + + +# -- The name of the secret which we will use to generate a superset_config.py file +# Note: this secret must have the key superset_config.py in it and can include other files as well +configFromSecret: '{{ template "superset.fullname" . }}-config' + +# -- The name of the secret which we will use to populate env vars in deployed pods +# This can be useful for secret keys, etc. +envFromSecret: '{{ template "superset.fullname" . }}-env' +# -- This can be a list of templated strings +envFromSecrets: [] + +# -- Extra environment variables that will be passed into pods +extraEnv: {} + # Different gunicorn settings, refer to the gunicorn documentation + # https://docs.gunicorn.org/en/stable/settings.html# + # These variables are used as Flags at the gunicorn startup + # https://github.com/apache/superset/blob/master/docker/run-server.sh#L22 + # Extend timeout to allow long running queries. + # GUNICORN_TIMEOUT: 300 + # Increase the gunicorn worker amount, can improve performance drastically + # See: https://docs.gunicorn.org/en/stable/design.html#how-many-workers + # SERVER_WORKER_AMOUNT: 4 + # WORKER_MAX_REQUESTS: 0 + # WORKER_MAX_REQUESTS_JITTER: 0 + # SERVER_THREADS_AMOUNT: 20 + # GUNICORN_KEEPALIVE: 2 + # SERVER_LIMIT_REQUEST_LINE: 0 + # SERVER_LIMIT_REQUEST_FIELD_SIZE: 0 + + # OAUTH_HOME_DOMAIN: .. + # # If a whitelist is not set, any address that can use your OAuth2 endpoint will be able to login. + # # this includes any random Gmail address if your OAuth2 Web App is set to External. + # OAUTH_WHITELIST_REGEX: ... + +# -- Extra environment variables in RAW format that will be passed into pods +extraEnvRaw: [] + # Load DB password from other secret (e.g. for zalando operator) + # - name: DB_PASS + # valueFrom: + # secretKeyRef: + # name: superset.superset-postgres.credentials.postgresql.acid.zalan.do + # key: password + +# -- Extra environment variables to pass as secrets +extraSecretEnv: {} + # MAPBOX_API_KEY: ... + # # Google API Keys: https://console.cloud.google.com/apis/credentials + # GOOGLE_KEY: ... + # GOOGLE_SECRET: ... + +# -- Extra files to mount on `/app/pythonpath` +extraConfigs: {} + # import_datasources.yaml: | + # databases: + # - allow_file_upload: true + # allow_ctas: true + # allow_cvas: true + # database_name: example-db + # extra: "{\r\n \"metadata_params\": {},\r\n \"engine_params\": {},\r\n \"\ + # metadata_cache_timeout\": {},\r\n \"schemas_allowed_for_file_upload\": []\r\n\ + # }" + # sqlalchemy_uri: example://example-db.local + # tables: [] + +# -- Extra files to mount on `/app/pythonpath` as secrets +extraSecrets: {} + +extraVolumes: [] + # - name: customConfig + # configMap: + # name: '{{ template "superset.fullname" . }}-custom-config' + # - name: additionalSecret + # secret: + # secretName: my-secret + # defaultMode: 0600 + +extraVolumeMounts: [] + # - name: customConfig + # mountPath: /mnt/config + # readOnly: true + # - name: additionalSecret: + # mountPath: /mnt/secret + +# -- A dictionary of overrides to append at the end of superset_config.py - the name does not matter +# WARNING: the order is not guaranteed +# Files can be passed as helm --set-file configOverrides.my-override=my-file.py +configOverrides: {} + # extend_timeout: | + # # Extend timeout to allow long running queries. + # SUPERSET_WEBSERVER_TIMEOUT = ... + # enable_oauth: | + # from flask_appbuilder.security.manager import (AUTH_DB, AUTH_OAUTH) + # AUTH_TYPE = AUTH_OAUTH + # OAUTH_PROVIDERS = [ + # { + # "name": "google", + # "whitelist": [ os.getenv("OAUTH_WHITELIST_REGEX", "") ], + # "icon": "fa-google", + # "token_key": "access_token", + # "remote_app": { + # "client_id": os.environ.get("GOOGLE_KEY"), + # "client_secret": os.environ.get("GOOGLE_SECRET"), + # "api_base_url": "https://www.googleapis.com/oauth2/v2/", + # "client_kwargs": {"scope": "email profile"}, + # "request_token_url": None, + # "access_token_url": "https://accounts.google.com/o/oauth2/token", + # "authorize_url": "https://accounts.google.com/o/oauth2/auth", + # "authorize_params": {"hd": os.getenv("OAUTH_HOME_DOMAIN", "")} + # } + # } + # ] + # # Map Authlib roles to superset roles + # AUTH_ROLE_ADMIN = 'Admin' + # AUTH_ROLE_PUBLIC = 'Public' + # # Will allow user self registration, allowing to create Flask users from Authorized User + # AUTH_USER_REGISTRATION = True + # # The default user self registration role + # AUTH_USER_REGISTRATION_ROLE = "Admin" + # secret: | + # # Generate your own secret key for encryption. Use openssl rand -base64 42 to generate a good key + # SECRET_KEY = 'YOUR_OWN_RANDOM_GENERATED_SECRET_KEY' + +# -- Same as above but the values are files +configOverridesFiles: {} + # extend_timeout: extend_timeout.py + # enable_oauth: enable_oauth.py + +configMountPath: "/app/pythonpath" + +extraConfigMountPath: "/app/configs" + +image: + repository: apachesuperset.docker.scarf.sh/apache/superset + tag: "" + pullPolicy: IfNotPresent + +imagePullSecrets: [] + +initImage: + repository: apache/superset + tag: dockerize + pullPolicy: IfNotPresent + +service: + type: ClusterIP + port: 8088 + annotations: {} + # cloud.google.com/load-balancer-type: "Internal" + loadBalancerIP: ~ + nodePort: + # -- (int) + http: nil + +ingress: + enabled: false + ingressClassName: ~ + annotations: {} + # kubernetes.io/tls-acme: "true" + ## Extend timeout to allow long running queries. + # nginx.ingress.kubernetes.io/proxy-connect-timeout: "300" + # nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + # nginx.ingress.kubernetes.io/proxy-send-timeout: "300" + path: / + pathType: ImplementationSpecific + hosts: + - chart-example.local + tls: [] + extraHostsRaw: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # The limits below will apply to all Superset components. To set individual resource limitations refer to the pod specific values below. + # The pod specific values will overwrite anything that is set here. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +# -- Custom hostAliases for all superset pods +## https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/ +hostAliases: [] +# - hostnames: +# - nodns.my.lan +# ip: 18.27.36.45 + +# Superset node configuration +supersetNode: + replicaCount: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + + # -- Startup command + # @default -- See `values.yaml` + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; /usr/bin/run-server.sh" + connections: + # -- Change in case of bringing your own redis and then also set redis.enabled:false + # redis_host: '{{ .Release.Name }}-redis-headless' + redis_host: toBeOverrided + redis_password: toBeOverrided + redis_port: "6379" + # You need to change below configuration incase bringing own PostgresSQL instance and also set postgresql.enabled:false + # db_host: '{{ .Release.Name }}-postgresql' + db_host: toBeOverrided + db_port: "5432" + db_user: toBeOverrided + db_pass: toBeOverrided + db_name: superset + env: {} + # -- If true, forces deployment to reload on each upgrade + forceReload: false + # -- Init containers + # @default -- a container waiting for postgres + initContainers: + - name: wait-for-postgres + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -timeout 120s + + # -- Launch additional containers into supersetNode pod + extraContainers: [] + # -- Annotations to be added to supersetNode deployment + deploymentAnnotations: {} + # -- Labels to be added to supersetNode deployment + deploymentLabels: {} + # -- Affinity to be added to supersetNode deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetNode deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetNode pods + podAnnotations: {} + # -- Labels to be added to supersetNode pods + podLabels: {} + startupProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 15 + timeoutSeconds: 1 + failureThreshold: 60 + periodSeconds: 5 + successThreshold: 1 + livenessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 15 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 15 + successThreshold: 1 + readinessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 15 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 15 + successThreshold: 1 + # -- Resource settings for the supersetNode pods - these settings overwrite might existing values from the global resources object defined above. + resources: + limits: + cpu: 200m + memory: 500Mi + requests: + cpu: 100m + memory: 250Mi + podSecurityContext: {} + # fsGroup: 1010150000 + containerSecurityContext: {} + # runAsUser: 1010150000 + # runAsGroup: 1010150000 + # runAsNonRoot: true + # allowPrivilegeEscalation: false + # seccompProfile: + # type: RuntimeDefault + # capabilities: + # drop: + # - ALL + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + +# Superset Celery worker configuration +supersetWorker: + replicaCount: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + + # -- Worker startup command + # @default -- a `celery worker` command + command: + - "/bin/sh" + - "-c" + - "pwd; ls -l; . {{ .Values.configMountPath }}/superset_bootstrap.sh; celery --app=superset.tasks.celery_app:app worker" + # -- If true, forces deployment to reload on each upgrade + forceReload: false + # -- Init container + # @default -- a container waiting for postgres and redis + initContainers: + - name: wait-for-postgres-redis + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -wait "tcp://$REDIS_HOST:$REDIS_PORT" -timeout 120s + # -- Launch additional containers into supersetWorker pod + extraContainers: [] + # -- Annotations to be added to supersetWorker deployment + deploymentAnnotations: {} + # -- Labels to be added to supersetWorker deployment + deploymentLabels: {} + # -- Affinity to be added to supersetWorker deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetWorker deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetWorker pods + podAnnotations: {} + # -- Labels to be added to supersetWorker pods + podLabels: {} + # -- Resource settings for the supersetWorker pods - these settings overwrite might existing values from the global resources object defined above. + resources: + limits: + cpu: 400m + memory: 4Gi + requests: + cpu: 200m + memory: 2Gi + podSecurityContext: {} + containerSecurityContext: {} + + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + livenessProbe: + exec: + # -- Liveness probe command + # @default -- a `celery inspect ping` command + command: + - sh + - -c + - celery -A superset.tasks.celery_app:app inspect ping -d celery@$HOSTNAME + initialDelaySeconds: 120 + timeoutSeconds: 60 + failureThreshold: 3 + periodSeconds: 60 + successThreshold: 1 + # -- No startup/readiness probes by default since we don't really care about its startup time (it doesn't serve traffic) + startupProbe: {} + # -- No startup/readiness probes by default since we don't really care about its startup time (it doesn't serve traffic) + readinessProbe: {} + +# Superset beat configuration (to trigger scheduled jobs like reports) +supersetCeleryBeat: + # -- This is only required if you intend to use alerts and reports + enabled: false + # -- Command + # @default -- a `celery beat` command + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; celery --app=superset.tasks.celery_app:app beat --pidfile /tmp/celerybeat.pid --schedule /tmp/celerybeat-schedule" + # -- If true, forces deployment to reload on each upgrade + forceReload: false + # -- List of init containers + # @default -- a container waiting for postgres + initContainers: + - name: wait-for-postgres-redis + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -wait "tcp://$REDIS_HOST:$REDIS_PORT" -timeout 120s + # -- Annotations to be added to supersetCeleryBeat deployment + deploymentAnnotations: {} + # -- Affinity to be added to supersetCeleryBeat deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetCeleryBeat deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetCeleryBeat pods + podAnnotations: {} + # -- Labels to be added to supersetCeleryBeat pods + podLabels: {} + # -- Resource settings for the CeleryBeat pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + podSecurityContext: + fsGroup: 1010150000 + containerSecurityContext: + runAsUser: 1010150000 + runAsGroup: 1010150000 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + + +supersetCeleryFlower: + # -- Enables a Celery flower deployment (management UI to monitor celery jobs) + # WARNING: on superset 1.x, this requires a Superset image that has `flower<1.0.0` installed (which is NOT the case of the default images) + # flower>=1.0.0 requires Celery 5+ which Superset 1.5 does not support + enabled: false + replicaCount: 1 + # -- Command + # @default -- a `celery flower` command + command: + - "/bin/sh" + - "-c" + - "celery --app=superset.tasks.celery_app:app flower" + service: + type: ClusterIP + annotations: {} + loadBalancerIP: ~ + port: 5555 + nodePort: + # -- (int) + http: nil + startupProbe: + httpGet: + path: /api/workers + port: flower + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 60 + periodSeconds: 5 + successThreshold: 1 + livenessProbe: + httpGet: + path: /api/workers + port: flower + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 5 + successThreshold: 1 + readinessProbe: + httpGet: + path: /api/workers + port: flower + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 5 + successThreshold: 1 + # -- List of init containers + # @default -- a container waiting for postgres and redis + initContainers: + - name: wait-for-postgres-redis + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -wait "tcp://$REDIS_HOST:$REDIS_PORT" -timeout 120s + # -- Annotations to be added to supersetCeleryFlower deployment + deploymentAnnotations: {} + # -- Affinity to be added to supersetCeleryFlower deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetCeleryFlower deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetCeleryFlower pods + podAnnotations: {} + # -- Labels to be added to supersetCeleryFlower pods + podLabels: {} + # -- Resource settings for the CeleryBeat pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + podSecurityContext: {} + # fsGroup: 1010150000 + containerSecurityContext: {} + # runAsUser: 1010150000 + # runAsGroup: 1010150000 + # runAsNonRoot: true + # allowPrivilegeEscalation: false + # seccompProfile: + # type: RuntimeDefault + # capabilities: + # drop: + # - ALL + +supersetWebsockets: + # -- This is only required if you intend to use `GLOBAL_ASYNC_QUERIES` in `ws` mode + # see https://github.com/apache/superset/blob/master/CONTRIBUTING.md#async-chart-queries + enabled: false + replicaCount: 1 + ingress: + path: /ws + pathType: Prefix + image: + # -- There is no official image (yet), this one is community-supported + repository: oneacrefund/superset-websocket + tag: latest + pullPolicy: IfNotPresent + # -- The config.json to pass to the server, see https://github.com/apache/superset/tree/master/superset-websocket + # Note that the configuration can also read from environment variables (which will have priority), see https://github.com/apache/superset/blob/master/superset-websocket/src/config.ts for a list of supported variables + # @default -- see `values.yaml` + config: + { + "port": 8080, + "logLevel": "debug", + "logToFile": false, + "logFilename": "app.log", + "statsd": { "host": "127.0.0.1", "port": 8125, "globalTags": [] }, + "redis": + { + "port": 6379, + "host": "127.0.0.1", + "password": "", + "db": 0, + "ssl": false, + }, + "redisStreamPrefix": "async-events-", + "jwtSecret": "CHANGE-ME", + "jwtCookieName": "async-token", + } + service: + type: ClusterIP + annotations: {} + loadBalancerIP: ~ + port: 8080 + nodePort: + # -- (int) + http: nil + command: [] + resources: {} + deploymentAnnotations: {} + # -- Affinity to be added to supersetWebsockets deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetWebsockets deployments + topologySpreadConstraints: [] + podAnnotations: {} + podLabels: {} + strategy: {} + podSecurityContext: {} + # fsGroup: 1010150000 + containerSecurityContext: {} + # runAsUser: 1010150000 + # runAsGroup: 1010150000 + # runAsNonRoot: true + # allowPrivilegeEscalation: false + # seccompProfile: + # type: RuntimeDefault + # capabilities: + # drop: + # - ALL + startupProbe: + httpGet: + path: /health + port: ws + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 60 + periodSeconds: 5 + successThreshold: 1 + livenessProbe: + httpGet: + path: /health + port: ws + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 5 + successThreshold: 1 + readinessProbe: + httpGet: + path: /health + port: ws + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 5 + successThreshold: 1 + +init: + # Configure resources + # Warning: fab command consumes a lot of ram and can + # cause the process to be killed due to OOM if it exceeds limit + # Make sure you are giving a strong password for the admin user creation( else make sure you are changing after setup) + # Also change the admin email to your own custom email. + resources: {} + # limits: + # cpu: + # memory: + # requests: + # cpu: + # memory: + # -- Command + # @default -- a `superset_init.sh` command + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; . {{ .Values.configMountPath }}/superset_init.sh" + enabled: true + jobAnnotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": "before-hook-creation" + loadExamples: false + createAdmin: true + adminUser: + username: admin + firstname: Superset + lastname: Admin + email: admin@superset.com + password: admin + # -- List of initContainers + # @default -- a container waiting for postgres + initContainers: + - name: wait-for-postgres + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -timeout 120s + # -- A Superset init script + # @default -- a script to create admin user and initialize roles + initscript: |- + #!/bin/sh + set -eu + echo "Upgrading DB schema..." + superset db upgrade + echo "Initializing roles..." + superset init + {{ if .Values.init.createAdmin }} + echo "Creating admin user..." + superset fab create-admin \ + --username {{ .Values.init.adminUser.username }} \ + --firstname {{ .Values.init.adminUser.firstname }} \ + --lastname {{ .Values.init.adminUser.lastname }} \ + --email {{ .Values.init.adminUser.email }} \ + --password {{ .Values.init.adminUser.password }} \ + || true + {{- end }} + {{ if .Values.init.loadExamples }} + echo "Loading examples..." + superset load_examples + {{- end }} + if [ -f "{{ .Values.extraConfigMountPath }}/import_datasources.yaml" ]; then + echo "Importing database connections.... " + superset import_datasources -p {{ .Values.extraConfigMountPath }}/import_datasources.yaml + fi + # -- Launch additional containers into init job pod + extraContainers: [] + ## Annotations to be added to init job pods + podAnnotations: {} + podSecurityContext: {} + # fsGroup: 1010150000 + containerSecurityContext: {} + # runAsUser: 1010150000 + # runAsGroup: 1010150000 + # runAsNonRoot: true + # allowPrivilegeEscalation: false + # seccompProfile: + # type: RuntimeDefault + # capabilities: + # drop: + # - ALL + ## Tolerations to be added to init job pods + tolerations: [] + ## Affinity to be added to init job pods + affinity: {} + # -- TopologySpreadConstrains to be added to init job + topologySpreadConstraints: [] + +# -- Configuration values for the postgresql dependency. +# ref: https://github.com/bitnami/charts/tree/main/bitnami/postgresql +# @default -- see `values.yaml` +postgresql: + ## + ## Use the PostgreSQL chart dependency. + ## Set to false if bringing your own PostgreSQL. + enabled: false + + ## Authentication parameters + auth: + ## The name of an existing secret that contains the postgres password. + existingSecret: + ## PostgreSQL name for a custom user to create + username: superset + ## PostgreSQL password for the custom user to create. Ignored if `auth.existingSecret` with key `password` is provided + password: superset + ## PostgreSQL name for a custom database to create + database: superset + + image: + tag: "14.6.0-debian-11-r13" + + ## PostgreSQL Primary parameters + primary: + ## + ## Persistent Volume Storage configuration. + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes + persistence: + ## + ## Enable PostgreSQL persistence using Persistent Volume Claims. + enabled: true + ## + ## Persistent class + # storageClass: classname + ## + ## Access modes: + accessModes: + - ReadWriteOnce + ## PostgreSQL port + service: + ports: + postgresql: "5432" + +# -- Configuration values for the Redis dependency. +# ref: https://github.com/bitnami/charts/blob/master/bitnami/redis +# More documentation can be found here: https://artifacthub.io/packages/helm/bitnami/redis +# @default -- see `values.yaml` +redis: + ## + ## Use the redis chart dependency. + ## + ## If you are bringing your own redis, you can set the host in supersetNode.connections.redis_host + ## + ## Set to false if bringing your own redis. + enabled: false + ## + ## Set architecture to standalone/replication + architecture: standalone + ## + ## Auth configuration: + ## + auth: + ## Enable password authentication + enabled: false + ## The name of an existing secret that contains the redis password. + existingSecret: "" + ## Name of the key containing the secret. + existingSecretKey: "" + ## Redis password + password: superset + ## + ## Master configuration + ## + master: + ## + ## Image configuration + # image: + ## + ## docker registry secret names (list) + # pullSecrets: nil + ## + ## Configure persistance + persistence: + ## + ## Use a PVC to persist data. + enabled: false + ## + ## Persistent class + # storageClass: classname + ## + ## Access mode: + accessModes: + - ReadWriteOnce + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +# -- TopologySpreadConstrains to be added to all deployments +topologySpreadConstraints: [] diff --git a/openshift/templates/superset/docker/Dockerfile b/openshift/templates/superset/docker/Dockerfile new file mode 100644 index 00000000..ff259323 --- /dev/null +++ b/openshift/templates/superset/docker/Dockerfile @@ -0,0 +1,5 @@ +FROM apachesuperset.docker.scarf.sh/apache/superset:3.0.1 +USER root +RUN pip install --upgrade pip +RUN pip install authlib +USER superset \ No newline at end of file diff --git a/openshift/templates/superset/readme.md b/openshift/templates/superset/readme.md new file mode 100644 index 00000000..9450d684 --- /dev/null +++ b/openshift/templates/superset/readme.md @@ -0,0 +1,28 @@ + + +https://artifacthub.io/packages/helm/superset/superset + +helm repo add superset http://apache.github.io/superset/ + +helm upgrade --install --set +cthub-superset-dev superset/superset --version 0.10.14 + + +supersetNode.connections.redis_host=cthub-redis-dev-headless +supersetNode.connections.redis_password=xxx +supersetNode.connections.db_host=cthub-crunchy-dev-pgbouncer +supersetNode.connections.db_user=xxx +supersetNode.connections.db_pass=xxx + + +supersetNode.connections.redis_password=xxxxx\\ + + +create supersetuser in database + +update patroni secret to ass superset_username and superset_password + +create superset user and superset database in crunchy + + + diff --git a/openshift/templates/superset/redis/Dockerfile b/openshift/templates/superset/redis/Dockerfile deleted file mode 100644 index 02bca664..00000000 --- a/openshift/templates/superset/redis/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -FROM artifacts.developer.gov.bc.ca/docker-remote/redis:6.2.6 \ No newline at end of file diff --git a/openshift/templates/superset/redis/redis-dc.yaml b/openshift/templates/superset/redis/redis-dc.yaml deleted file mode 100644 index 3a4ebc3d..00000000 --- a/openshift/templates/superset/redis/redis-dc.yaml +++ /dev/null @@ -1,126 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: redis-dc -parameters: - - name: ENV_NAME - value: test - required: true - - name: CPU_REQUEST - value: 50M - displayName: Requested CPU - description: Requested CPU - required: true - - name: CPU_LIMIT - value: 200M - displayName: CPU upper limit - description: CPU upper limit - required: true - - name: MEMORY_REQUEST - value: 50M - displayName: Requested memory - description: Requested memory - required: true - - name: MEMORY_LIMIT - value: 500M - displayName: Memory upper limit - description: Memory upper limit - required: true - - name: REPLICAS - description: | - The number of replicas to use. - displayName: REPLICAS - value: "1" -objects: -- apiVersion: v1 - kind: Service - metadata: - annotations: - openshift.io/generated-by: OpenShiftWebConsole - creationTimestamp: null - labels: - app: redis - name: redis - spec: - ports: - - name: redis - port: 6379 - protocol: TCP - targetPort: 6379 - selector: - app: redis - sessionAffinity: None - type: ClusterIP - status: - loadBalancer: {} -- kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: redis - annotations: - volume.beta.kubernetes.io/storage-class: netapp-block-standard - template.openshift.io.bcgov/create: 'true' - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi -- apiVersion: apps.openshift.io/v1 - kind: DeploymentConfig - metadata: - creationTimestamp: null - labels: - app: redis - name: redis - spec: - strategy: - type: Recreate - recreateParams: - timeoutSeconds: 600 - resources: {} - activeDeadlineSeconds: 21600 - triggers: - - type: ConfigChange - replicas: 1 - revisionHistoryLimit: 10 - test: false - selector: - app: redis - template: - metadata: - creationTimestamp: null - labels: - app: redis - spec: - volumes: - - name: redis - persistentVolumeClaim: - claimName: redis - containers: - - name: redis - image: >- - image-registry.openshift-image-registry.svc:5000/30b186-tools/redis:6.2.6 - ports: - - containerPort: 6379 - protocol: TCP - resources: {} - volumeMounts: - - name: redis - mountPath: /data - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - imagePullPolicy: Always - restartPolicy: Always - terminationGracePeriodSeconds: 30 - dnsPolicy: ClusterFirst - securityContext: {} - schedulerName: default-scheduler - status: - availableReplicas: 0 - latestVersion: 0 - observedGeneration: 0 - replicas: 0 - unavailableReplicas: 0 - updatedReplicas: 0 \ No newline at end of file diff --git a/openshift/templates/superset/redis/superset-bc-redis.yaml b/openshift/templates/superset/redis/superset-bc-redis.yaml deleted file mode 100644 index 66b4cdf9..00000000 --- a/openshift/templates/superset/redis/superset-bc-redis.yaml +++ /dev/null @@ -1,55 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: superset-redis-bc -objects: - - apiVersion: image.openshift.io/v1 - kind: ImageStream - metadata: - annotations: - description: Keeps track of changes in the redis image - creationTimestamp: null - name: redis - spec: - lookupPolicy: - local: false - - apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - creationTimestamp: null - name: redis - spec: - failedBuildsHistoryLimit: 5 - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: redis:6.2.6 - namespace: 30b186-tools - postCommit: {} - resources: - limits: - cpu: 40m - memory: 100Mi - requests: - cpu: 20m - memory: 50Mi - runPolicy: Serial - source: - contextDir: openshift/templates/superset/redis - git: - ref: superset-0.1.0 - uri: https://github.com/bcgov/cthub.git - type: Git - strategy: - dockerStrategy: - forcePull: true - noCache: true - pullSecret: - name: artifacts-pull-default-idxprm - type: Docker - successfulBuildsHistoryLimit: 5 - triggers: [] - status: - lastVersion: 0 diff --git a/openshift/templates/superset/superset-bc.yaml b/openshift/templates/superset/superset-bc.yaml deleted file mode 100644 index 60bf3916..00000000 --- a/openshift/templates/superset/superset-bc.yaml +++ /dev/null @@ -1,49 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: superset-bc -objects: -- apiVersion: image.openshift.io/v1 - kind: ImageStream - metadata: - annotations: - description: Keeps track of changes in the metabase image - creationTimestamp: null - name: superset - spec: - lookupPolicy: - local: false -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - creationTimestamp: null - name: superset - spec: - failedBuildsHistoryLimit: 5 - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: superset:v1.0 - namespace: 30b186-tools - postCommit: {} - resources: {} - runPolicy: Serial - source: - contextDir: openshift/templates/superset - git: - ref: superset-0.1.0 - uri: https://github.com/bcgov/cthub.git - type: Git - strategy: - dockerStrategy: - forcePull: true - noCache: true - pullSecret: - name: artifacts-pull-default-idxprm - type: Docker - successfulBuildsHistoryLimit: 5 - triggers: [] - status: - lastVersion: 0 diff --git a/openshift/templates/superset/superset-dc-redis.yaml b/openshift/templates/superset/superset-dc-redis.yaml deleted file mode 100644 index aa6f7e8e..00000000 --- a/openshift/templates/superset/superset-dc-redis.yaml +++ /dev/null @@ -1,126 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: redis-dc -parameters: - - name: ENV_NAME - value: test - required: true - - name: CPU_REQUEST - value: 50M - displayName: Requested CPU - description: Requested CPU - required: true - - name: CPU_LIMIT - value: 200M - displayName: CPU upper limit - description: CPU upper limit - required: true - - name: MEMORY_REQUEST - value: 50M - displayName: Requested memory - description: Requested memory - required: true - - name: MEMORY_LIMIT - value: 500M - displayName: Memory upper limit - description: Memory upper limit - required: true - - name: REPLICAS - description: | - The number of replicas to use. - displayName: REPLICAS - value: "1" -objects: -- apiVersion: v1 - kind: Service - metadata: - annotations: - openshift.io/generated-by: OpenShiftWebConsole - creationTimestamp: null - labels: - app: redis - name: redis - spec: - ports: - - name: redis - port: 6379 - protocol: TCP - targetPort: 6379 - selector: - app: redis - sessionAffinity: None - type: ClusterIP - status: - loadBalancer: {} -- kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: redis - annotations: - volume.beta.kubernetes.io/storage-class: netapp-block-standard - template.openshift.io.bcgov/create: 'true' - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi -- apiVersion: apps.openshift.io/v1 - kind: DeploymentConfig - metadata: - creationTimestamp: null - labels: - app: redis - name: redis - spec: - strategy: - type: Recreate - recreateParams: - timeoutSeconds: 600 - resources: {} - activeDeadlineSeconds: 21600 - triggers: - - type: ConfigChange - replicas: 1 - revisionHistoryLimit: 10 - test: false - selector: - app: redis - template: - metadata: - creationTimestamp: null - labels: - app: redis - spec: - volumes: - - name: redis - persistentVolumeClaim: - claimName: redis - containers: - - name: redis - image: >- - image-registry.openshift-image-registry.svc:5000/30b186-tools/redis:6.2.6 - ports: - - containerPort: 6379 - protocol: TCP - resources: {} - volumeMounts: - - name: redis - mountPath: /data - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - imagePullPolicy: Always - restartPolicy: Always - terminationGracePeriodSeconds: 30 - dnsPolicy: ClusterFirst - securityContext: {} - schedulerName: default-scheduler - status: - availableReplicas: 0 - latestVersion: 0 - observedGeneration: 0 - replicas: 0 - unavailableReplicas: 0 - updatedReplicas: 0 diff --git a/openshift/templates/superset/superset/Dockerfile b/openshift/templates/superset/superset/Dockerfile deleted file mode 100644 index ed2a08ee..00000000 --- a/openshift/templates/superset/superset/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM artifacts.developer.gov.bc.ca/docker-remote/apache/superset:latest -USER root -# RUN pip install --upgrade pip \ -# pip install psycopg2-binary sqlalchemy-redshift -RUN chgrp -R root /app/superset_home \ - && chmod -R g+w /app/superset_home -RUN mkdir /app/docker -COPY ./scripts /app/docker -ENTRYPOINT ["/app/docker/docker-bootstrap.sh"] -CMD ["app-gunicorn"] \ No newline at end of file diff --git a/openshift/templates/superset/superset/scripts/README.md b/openshift/templates/superset/superset/scripts/README.md deleted file mode 100644 index c867121d..00000000 --- a/openshift/templates/superset/superset/scripts/README.md +++ /dev/null @@ -1,75 +0,0 @@ - - -# Getting Started with Superset using Docker - -Docker is an easy way to get started with Superset. - -## Prerequisites - -1. Docker! [link](https://www.docker.com/get-started) -2. Docker-compose [link](https://docs.docker.com/compose/install/) - -## Configuration - -The `/app/pythonpath` folder is mounted from [`./docker/pythonpath_dev`](./pythonpath_dev) -which contains a base configuration [`./docker/pythonpath_dev/superset_config.py`](./pythonpath_dev/superset_config.py) -intended for use with local development. - -### Local overrides - -In order to override configuration settings locally, simply make a copy of [`./docker/pythonpath_dev/superset_config_local.example`](./pythonpath_dev/superset_config_local.example) -into `./docker/pythonpath_dev/superset_config_docker.py` (git ignored) and fill in your overrides. - -### Local packages - -If you want to add Python packages in order to test things like databases locally, you can simply add a local requirements.txt (`./docker/requirements-local.txt`) -and rebuild your Docker stack. - -Steps: - -1. Create `./docker/requirements-local.txt` -2. Add your new packages -3. Rebuild docker-compose - 1. `docker-compose down -v` - 2. `docker-compose up` - -## Initializing Database - -The database will initialize itself upon startup via the init container ([`superset-init`](./docker-init.sh)). This may take a minute. - -## Normal Operation - -To run the container, simply run: `docker-compose up` - -After waiting several minutes for Superset initialization to finish, you can open a browser and view [`http://localhost:8088`](http://localhost:8088) -to start your journey. - -## Developing - -While running, the container server will reload on modification of the Superset Python and JavaScript source code. -Don't forget to reload the page to take the new frontend into account though. - -## Production - -It is possible to run Superset in non-development mode by using [`docker-compose-non-dev.yml`](../docker-compose-non-dev.yml). This file excludes the volumes needed for development and uses [`./docker/.env-non-dev`](./.env-non-dev) which sets the variable `SUPERSET_ENV` to `production`. - -## Resource Constraints - -If you are attempting to build on macOS and it exits with 137 you need to increase your Docker resources. See instructions [here](https://docs.docker.com/docker-for-mac/#advanced) (search for memory) diff --git a/openshift/templates/superset/superset/scripts/docker-bootstrap.sh b/openshift/templates/superset/superset/scripts/docker-bootstrap.sh deleted file mode 100755 index 6f6ed6c9..00000000 --- a/openshift/templates/superset/superset/scripts/docker-bootstrap.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -set -eo pipefail - -REQUIREMENTS_LOCAL="/app/docker/requirements-local.txt" -# If Cypress run – overwrite the password for admin and export env variables -if [ "$CYPRESS_CONFIG" == "true" ]; then - export SUPERSET_CONFIG=tests.integration_tests.superset_test_config - export SUPERSET_TESTENV=true - export ENABLE_REACT_CRUD_VIEWS=true - export SUPERSET__SQLALCHEMY_DATABASE_URI=postgresql+psycopg2://superset:superset@db:5432/superset -fi -# -# Make sure we have dev requirements installed -# -if [ -f "${REQUIREMENTS_LOCAL}" ]; then - echo "Installing local overrides at ${REQUIREMENTS_LOCAL}" - pip install -r "${REQUIREMENTS_LOCAL}" -else - echo "Skipping local overrides" -fi - -if [[ "${1}" == "worker" ]]; then - echo "Starting Celery worker..." - celery --app=superset.tasks.celery_app:app worker -O fair -l INFO -elif [[ "${1}" == "beat" ]]; then - echo "Starting Celery beat..." - celery --app=superset.tasks.celery_app:app beat --pidfile /tmp/celerybeat.pid -l INFO -s "${SUPERSET_HOME}"/celerybeat-schedule -elif [[ "${1}" == "app" ]]; then - echo "Starting web app..." - flask run -p 8088 --with-threads --reload --debugger --host=0.0.0.0 -elif [[ "${1}" == "app-gunicorn" ]]; then - echo "Starting web app..." - /usr/bin/run-server.sh -fi diff --git a/openshift/templates/superset/superset/scripts/docker-ci.sh b/openshift/templates/superset/superset/scripts/docker-ci.sh deleted file mode 100755 index 9e97cbba..00000000 --- a/openshift/templates/superset/superset/scripts/docker-ci.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -/app/docker/docker-init.sh - -# TODO: copy config overrides from ENV vars - -# TODO: run celery in detached state -export SERVER_THREADS_AMOUNT=8 -# start up the web server - -/usr/bin/run-server.sh diff --git a/openshift/templates/superset/superset/scripts/docker-frontend.sh b/openshift/templates/superset/superset/scripts/docker-frontend.sh deleted file mode 100755 index 4c0d01e0..00000000 --- a/openshift/templates/superset/superset/scripts/docker-frontend.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -set -e - -cd /app/superset-frontend -npm install -g npm@7 -npm install -f --no-optional --global webpack webpack-cli -npm install -f --no-optional - -echo "Running frontend" -npm run dev diff --git a/openshift/templates/superset/superset/scripts/docker-init.sh b/openshift/templates/superset/superset/scripts/docker-init.sh deleted file mode 100755 index d5ead503..00000000 --- a/openshift/templates/superset/superset/scripts/docker-init.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -set -e - -# -# Always install local overrides first -# -/app/docker/docker-bootstrap.sh - -STEP_CNT=4 - -echo_step() { -cat < str: - """Get the environment variable or raise exception.""" - try: - return os.environ[var_name] - except KeyError: - if default is not None: - return default - else: - error_msg = "The environment variable {} was missing, abort...".format( - var_name - ) - raise EnvironmentError(error_msg) - - -DATABASE_DIALECT = get_env_variable("DATABASE_DIALECT") -DATABASE_USER = get_env_variable("DATABASE_USER") -DATABASE_PASSWORD = get_env_variable("DATABASE_PASSWORD") -DATABASE_HOST = get_env_variable("DATABASE_HOST") -DATABASE_PORT = get_env_variable("DATABASE_PORT") -DATABASE_DB = get_env_variable("DATABASE_DB") - -# The SQLAlchemy connection string. -SQLALCHEMY_DATABASE_URI = "%s://%s:%s@%s:%s/%s" % ( - DATABASE_DIALECT, - DATABASE_USER, - DATABASE_PASSWORD, - DATABASE_HOST, - DATABASE_PORT, - DATABASE_DB, -) - -REDIS_HOST = get_env_variable("REDIS_HOST") -REDIS_PORT = get_env_variable("REDIS_PORT") -REDIS_CELERY_DB = get_env_variable("REDIS_CELERY_DB", "0") -REDIS_RESULTS_DB = get_env_variable("REDIS_RESULTS_DB", "1") - -RESULTS_BACKEND = FileSystemCache("/app/superset_home/sqllab") - - -class CeleryConfig(object): - BROKER_URL = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_CELERY_DB}" - CELERY_IMPORTS = ("superset.sql_lab", "superset.tasks") - CELERY_RESULT_BACKEND = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_RESULTS_DB}" - CELERYD_LOG_LEVEL = "DEBUG" - CELERYD_PREFETCH_MULTIPLIER = 1 - CELERY_ACKS_LATE = False - CELERYBEAT_SCHEDULE = { - "reports.scheduler": { - "task": "reports.scheduler", - "schedule": crontab(minute="*", hour="*"), - }, - "reports.prune_log": { - "task": "reports.prune_log", - "schedule": crontab(minute=10, hour=0), - }, - } - - -CELERY_CONFIG = CeleryConfig - -FEATURE_FLAGS = {"ALERT_REPORTS": True} -ALERT_REPORTS_NOTIFICATION_DRY_RUN = True -WEBDRIVER_BASEURL = "http://superset:8088/" -# The base URL for the email report hyperlinks. -WEBDRIVER_BASEURL_USER_FRIENDLY = WEBDRIVER_BASEURL - -SQLLAB_CTAS_NO_LIMIT = True - -# -# Optionally import superset_config_docker.py (which will have been included on -# the PYTHONPATH) in order to allow for local settings to be overridden -# -try: - import superset_config_docker - from superset_config_docker import * # noqa - - logger.info( - f"Loaded your Docker configuration at " f"[{superset_config_docker.__file__}]" - ) -except ImportError: - logger.info("Using default Docker config...") diff --git a/openshift/templates/superset/superset/scripts/pythonpath_dev/superset_config_local.example b/openshift/templates/superset/superset/scripts/pythonpath_dev/superset_config_local.example deleted file mode 100644 index dfa03bb7..00000000 --- a/openshift/templates/superset/superset/scripts/pythonpath_dev/superset_config_local.example +++ /dev/null @@ -1,27 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# -# This is an example "local" configuration file. In order to set/override config -# options that ONLY apply to your local environment, simply copy/rename this file -# to docker/pythonpath/superset_config_docker.py -# It ends up being imported by docker/superset_config.py which is loaded by -# superset/config.py -# - -SQLALCHEMY_DATABASE_URI = "postgresql+psycopg2://pguser:pgpwd@some.host/superset" -SQLALCHEMY_ECHO = True diff --git a/openshift/templates/superset/superset/scripts/run-server.sh b/openshift/templates/superset/superset/scripts/run-server.sh deleted file mode 100644 index 5519ff5d..00000000 --- a/openshift/templates/superset/superset/scripts/run-server.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -HYPHEN_SYMBOL='-' - -gunicorn \ - --bind "${SUPERSET_BIND_ADDRESS:-0.0.0.0}:${SUPERSET_PORT:-8088}" \ - --access-logfile "${ACCESS_LOG_FILE:-$HYPHEN_SYMBOL}" \ - --error-logfile "${ERROR_LOG_FILE:-$HYPHEN_SYMBOL}" \ - --workers ${SERVER_WORKER_AMOUNT:-1} \ - --worker-class ${SERVER_WORKER_CLASS:-gthread} \ - --threads ${SERVER_THREADS_AMOUNT:-20} \ - --timeout ${GUNICORN_TIMEOUT:-60} \ - --limit-request-line ${SERVER_LIMIT_REQUEST_LINE:-0} \ - --limit-request-field_size ${SERVER_LIMIT_REQUEST_FIELD_SIZE:-0} \ - "${FLASK_APP}" diff --git a/openshift/templates/superset/superset/superset-bc.yaml b/openshift/templates/superset/superset/superset-bc.yaml deleted file mode 100644 index e1bf5e48..00000000 --- a/openshift/templates/superset/superset/superset-bc.yaml +++ /dev/null @@ -1,55 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: superset-bc -objects: - - apiVersion: image.openshift.io/v1 - kind: ImageStream - metadata: - annotations: - description: Keeps track of changes in the superset image - creationTimestamp: null - name: superset - spec: - lookupPolicy: - local: false - - apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - creationTimestamp: null - name: superset - spec: - failedBuildsHistoryLimit: 5 - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: superset:20211213 - namespace: 30b186-tools - postCommit: {} - resources: - limits: - cpu: '1' - memory: 500Mi - requests: - cpu: 500m - memory: 250Mi - runPolicy: Serial - source: - contextDir: openshift/templates/superset/superset - git: - ref: superset2-0.1.0 - uri: https://github.com/bcgov/cthub.git - type: Git - strategy: - dockerStrategy: - forcePull: true - noCache: true - pullSecret: - name: artifacts-pull-default-idxprm - type: Docker - successfulBuildsHistoryLimit: 5 - triggers: [] - status: - lastVersion: 0 diff --git a/openshift/templates/superset/superset/superset-dc-beat.yaml b/openshift/templates/superset/superset/superset-dc-beat.yaml deleted file mode 100644 index 473f9ab1..00000000 --- a/openshift/templates/superset/superset/superset-dc-beat.yaml +++ /dev/null @@ -1,135 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: superset-dc-beat -parameters: - - name: ENV_NAME - required: true - - name: CPU_REQUEST - value: 40m - displayName: Requested CPU - description: Requested CPU - required: true - - name: CPU_LIMIT - value: 80m - displayName: CPU upper limit - description: CPU upper limit - required: true - - name: MEMORY_REQUEST - value: 200Mi - displayName: Requested memory - description: Requested memory - required: true - - name: MEMORY_LIMIT - value: 400Mi - displayName: Memory upper limit - description: Memory upper limit - required: true - - name: REPLICAS - description: | - The number of replicas to use. - displayName: REPLICAS - value: "1" -objects: -- apiVersion: apps.openshift.io/v1 - kind: DeploymentConfig - metadata: - creationTimestamp: null - labels: - app: superset-beat - name: superset-beat - spec: - strategy: - type: Recreate - recreateParams: - timeoutSeconds: 600 - resources: {} - activeDeadlineSeconds: 21600 - triggers: - - type: ConfigChange - replicas: 1 - revisionHistoryLimit: 10 - test: false - selector: - app: superset-beat - template: - metadata: - creationTimestamp: null - labels: - app: superset-beat - spec: - volumes: - - name: superset-home - persistentVolumeClaim: - claimName: superset-home - containers: - - name: superset-beat - image: >- - image-registry.openshift-image-registry.svc:5000/30b186-tools/superset:20211213 - args: ["beat"] - resources: - limits: - cpu: ${CPU_LIMIT} - memory: ${MEMORY_LIMIT} - requests: - cpu: ${CPU_REQUEST} - memory: ${MEMORY_REQUEST} - env: - - name: COMPOSE_PROJECT_NAME - value: superset - - name: DATABASE_DB - valueFrom: - secretKeyRef: - key: superset-db-name - name: patroni-creds-${ENV_NAME} - - name: DATABASE_HOST - value: patroni-master-test - - name: DATABASE_USER - valueFrom: - secretKeyRef: - key: superset-db-username - name: patroni-creds-${ENV_NAME} - - name: DATABASE_PASSWORD - valueFrom: - secretKeyRef: - key: superset-db-password - name: patroni-creds-${ENV_NAME} - - name: DATABASE_PORT - value: '5432' - - name: DATABASE_DIALECT - value: postgresql - - name: PYTHONPATH - value: '/app/pythonpath:/app/docker/pythonpath_dev' - - name: REDIS_HOST - value: redis - - name: REDIS_PORT - value: '6379' - - name: FLASK_ENV - value: production - - name: SUPERSET_ENV - value: production - - name: SUPERSET_LOAD_EXAMPLES - value: 'no' - - name: CYPRESS_CONFIG - value: 'false' - - name: SUPERSET_PORT - value: '8088' - volumeMounts: - - name: superset-home - mountPath: /app/superset_home - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - imagePullPolicy: Always - restartPolicy: Always - terminationGracePeriodSeconds: 30 - dnsPolicy: ClusterFirst - securityContext: {} - schedulerName: default-scheduler - status: - availableReplicas: 0 - latestVersion: 0 - observedGeneration: 0 - replicas: 0 - unavailableReplicas: 0 - updatedReplicas: 0 diff --git a/openshift/templates/superset/superset/superset-dc-superset.yaml b/openshift/templates/superset/superset/superset-dc-superset.yaml deleted file mode 100644 index 80da7675..00000000 --- a/openshift/templates/superset/superset/superset-dc-superset.yaml +++ /dev/null @@ -1,213 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: superset-dc -parameters: - - name: ENV_NAME - value: test - required: true - - name: CPU_REQUEST - value: 200m - displayName: Requested CPU - description: Requested CPU - required: true - - name: CPU_LIMIT - value: 400m - displayName: CPU upper limit - description: CPU upper limit - required: true - - name: MEMORY_REQUEST - value: 700Mi - displayName: Requested memory - description: Requested memory - required: true - - name: MEMORY_LIMIT - value: 1400Mi - displayName: Memory upper limit - description: Memory upper limit - required: true - - name: REPLICAS - description: | - The number of replicas to use. - displayName: REPLICAS - value: "1" -objects: -- apiVersion: v1 - kind: Service - metadata: - annotations: - openshift.io/generated-by: OpenShiftWebConsole - creationTimestamp: null - labels: - app: superset - name: superset - spec: - ports: - - name: superset-web - port: 8088 - protocol: TCP - targetPort: 8088 - selector: - app: superset - sessionAffinity: None - type: ClusterIP - status: - loadBalancer: {} -- apiVersion: v1 - kind: Service - metadata: - creationTimestamp: null - labels: - cluster-name: patroni-${ENV_NAME} - name: patroni-master-${ENV_NAME}-superset - spec: - ports: - - port: 5432 - protocol: TCP - targetPort: 5432 - selector: - cluster-name: patroni-${ENV_NAME} - role: master - app.kubernetes.io/name: patroni - sessionAffinity: None - type: ClusterIP - status: -- apiVersion: route.openshift.io/v1 - kind: Route - metadata: - annotations: - openshift.io/host.generated: "true" - creationTimestamp: null - labels: - app: superset - name: superset - spec: - host: superset-${ENV_NAME}.apps.silver.devops.gov.bc.ca - path: / - port: - targetPort: superset-web - tls: - insecureEdgeTerminationPolicy: Redirect - termination: edge - to: - kind: Service - name: superset - weight: 100 - wildcardPolicy: None -- kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: superset-home - annotations: - volume.beta.kubernetes.io/storage-class: netapp-file-standard - template.openshift.io.bcgov/create: 'true' - spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 2Gi -- apiVersion: apps.openshift.io/v1 - kind: DeploymentConfig - metadata: - creationTimestamp: null - labels: - app: superset - name: superset - spec: - strategy: - type: Recreate - recreateParams: - timeoutSeconds: 600 - resources: {} - activeDeadlineSeconds: 21600 - triggers: - - type: ConfigChange - replicas: ${{REPLICAS}} - revisionHistoryLimit: 10 - test: false - selector: - app: superset - template: - metadata: - creationTimestamp: null - labels: - app: superset - spec: - volumes: - - name: superset-home - persistentVolumeClaim: - claimName: superset-home - containers: - - name: superset-app-gunicorn - image: >- - image-registry.openshift-image-registry.svc:5000/30b186-tools/superset:20211213 - ports: - - containerPort: 8088 - protocol: TCP - resources: - limits: - cpu: ${CPU_LIMIT} - memory: ${MEMORY_LIMIT} - requests: - cpu: ${CPU_REQUEST} - memory: ${MEMORY_REQUEST} - env: - - name: COMPOSE_PROJECT_NAME - value: superset - - name: DATABASE_DB - valueFrom: - secretKeyRef: - key: superset-db-name - name: patroni-creds-${ENV_NAME} - - name: DATABASE_HOST - value: patroni-master-test - - name: DATABASE_USER - valueFrom: - secretKeyRef: - key: superset-db-username - name: patroni-creds-${ENV_NAME} - - name: DATABASE_PASSWORD - valueFrom: - secretKeyRef: - key: superset-db-password - name: patroni-creds-${ENV_NAME} - - name: DATABASE_PORT - value: '5432' - - name: DATABASE_DIALECT - value: postgresql - - name: PYTHONPATH - value: '/app/pythonpath:/app/docker/pythonpath_dev' - - name: REDIS_HOST - value: redis - - name: REDIS_PORT - value: '6379' - - name: FLASK_ENV - value: production - - name: SUPERSET_ENV - value: production - - name: SUPERSET_LOAD_EXAMPLES - value: 'no' - - name: CYPRESS_CONFIG - value: 'false' - - name: SUPERSET_PORT - value: '8088' - volumeMounts: - - name: superset-home - mountPath: /app/superset_home - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - imagePullPolicy: Always - restartPolicy: Always - terminationGracePeriodSeconds: 30 - dnsPolicy: ClusterFirst - securityContext: {} - schedulerName: default-scheduler - status: - availableReplicas: 0 - latestVersion: 0 - observedGeneration: 0 - replicas: 0 - unavailableReplicas: 0 - updatedReplicas: 0 diff --git a/openshift/templates/superset/superset/superset-dc-worker.yaml b/openshift/templates/superset/superset/superset-dc-worker.yaml deleted file mode 100644 index 54193111..00000000 --- a/openshift/templates/superset/superset/superset-dc-worker.yaml +++ /dev/null @@ -1,132 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: superset-dc-worker -parameters: - - name: ENV_NAME - required: true - - name: CPU_REQUEST - value: 100m - displayName: Requested CPU - description: Requested CPU - required: true - - name: CPU_LIMIT - value: 200m - displayName: CPU upper limit - description: CPU upper limit - required: true - - name: MEMORY_REQUEST - value: 2200Mi - displayName: Requested memory - description: Requested memory - required: true - - name: MEMORY_LIMIT - value: 4400Mi - displayName: Memory upper limit - description: Memory upper limit - required: true - - name: REPLICAS - description: | - The number of replicas to use. - displayName: REPLICAS - value: "1" -objects: -- apiVersion: apps.openshift.io/v1 - kind: DeploymentConfig - metadata: - creationTimestamp: null - labels: - app: superset-worker - name: superset-worker - spec: - strategy: - type: Recreate - recreateParams: - timeoutSeconds: 600 - resources: {} - activeDeadlineSeconds: 21600 - triggers: - - type: ConfigChange - replicas: 1 - revisionHistoryLimit: 10 - test: false - selector: - app: superset-worker - template: - metadata: - creationTimestamp: null - labels: - app: superset-worker - spec: - volumes: - - name: superset-home - persistentVolumeClaim: - claimName: superset-home - containers: - - name: superset-worker - image: >- - image-registry.openshift-image-registry.svc:5000/30b186-tools/superset:20211213 - args: ["worker"] - resources: - limits: - cpu: ${CPU_LIMIT} - memory: ${MEMORY_LIMIT} - requests: - cpu: ${CPU_REQUEST} - memory: ${MEMORY_REQUEST} - env: - - name: COMPOSE_PROJECT_NAME - value: superset - - name: DATABASE_DB - valueFrom: - secretKeyRef: - key: superset-db-name - name: patroni-creds-${ENV_NAME} - - name: DATABASE_HOST - value: patroni-master-test - - name: DATABASE_USER - valueFrom: - secretKeyRef: - key: superset-db-username - name: patroni-creds-${ENV_NAME} - - name: DATABASE_PASSWORD - valueFrom: - secretKeyRef: - key: superset-db-password - name: patroni-creds-${ENV_NAME} - - name: DATABASE_PORT - value: '5432' - - name: DATABASE_DIALECT - value: postgresql - - name: PYTHONPATH - value: '/app/pythonpath:/app/docker/pythonpath_dev' - - name: REDIS_HOST - value: redis - - name: REDIS_PORT - value: '6379' - - name: FLASK_ENV - value: production - - name: SUPERSET_ENV - value: production - - name: SUPERSET_LOAD_EXAMPLES - value: 'no' - - name: CYPRESS_CONFIG - value: 'false' - - name: SUPERSET_PORT - value: '8088' - volumeMounts: - - name: superset-home - mountPath: /app/superset_home - restartPolicy: Always - terminationGracePeriodSeconds: 30 - dnsPolicy: ClusterFirst - securityContext: {} - schedulerName: default-scheduler - status: - availableReplicas: 0 - latestVersion: 0 - observedGeneration: 0 - replicas: 0 - unavailableReplicas: 0 - updatedReplicas: 0 diff --git a/openshift/templates/superset/superset/superset-dc.yaml b/openshift/templates/superset/superset/superset-dc.yaml deleted file mode 100644 index 3dd95918..00000000 --- a/openshift/templates/superset/superset/superset-dc.yaml +++ /dev/null @@ -1,195 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: superset-dc -parameters: - - name: ENV_NAME - value: test - required: true - - name: CPU_REQUEST - value: 50M - displayName: Requested CPU - description: Requested CPU - required: true - - name: CPU_LIMIT - value: 200M - displayName: CPU upper limit - description: CPU upper limit - required: true - - name: MEMORY_REQUEST - value: 50M - displayName: Requested memory - description: Requested memory - required: true - - name: MEMORY_LIMIT - value: 500M - displayName: Memory upper limit - description: Memory upper limit - required: true - - name: REPLICAS - description: | - The number of replicas to use. - displayName: REPLICAS - value: "1" -objects: -# - apiVersion: v1 -# kind: ServiceAccount -# metadata: -# name: cthub-superset -# labels: -# app: cthub-superset -# service: cthub-superset -# - apiVersion: v1 -# kind: Service -# metadata: -# annotations: -# openshift.io/generated-by: OpenShiftWebConsole -# creationTimestamp: null -# labels: -# app: cthub-superset -# name: cthub-superset -# spec: -# ports: -# - name: superset-web -# port: 8080 -# protocol: TCP -# targetPort: 8080 -# selector: -# deploymentconfig: cthub-superset -# sessionAffinity: None -# type: ClusterIP -# status: -# loadBalancer: {} -# - apiVersion: route.openshift.io/v1 -# kind: Route -# metadata: -# annotations: -# openshift.io/host.generated: "true" -# creationTimestamp: null -# labels: -# app: cthub-superset -# name: cthub-superset -# spec: -# host: superset-${ENV_NAME}.apps.silver.devops.gov.bc.ca -# path: / -# port: -# targetPort: superset-web -# tls: -# insecureEdgeTerminationPolicy: Redirect -# termination: edge -# to: -# kind: Service -# name: cthub-superset -# weight: 100 -# wildcardPolicy: None -- kind: PersistentVolumeClaim - apiVersion: v1 - metadata: - name: superset-home - annotations: - volume.beta.kubernetes.io/storage-class: netapp-file-standard - template.openshift.io.bcgov/create: 'true' - spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 2Gi -- apiVersion: apps.openshift.io/v1 - kind: DeploymentConfig - metadata: - creationTimestamp: null - labels: - app: superset - name: superset - spec: - strategy: - type: Recreate - recreateParams: - timeoutSeconds: 600 - resources: {} - activeDeadlineSeconds: 21600 - triggers: - - type: ConfigChange - replicas: 1 - revisionHistoryLimit: 10 - test: false - selector: - app: superset - template: - metadata: - creationTimestamp: null - labels: - app: superset - spec: - volumes: - - name: superset-home - persistentVolumeClaim: - claimName: superset-home - containers: - - name: superset - image: >- - image-registry.openshift-image-registry.svc:5000/30b186-tools/superset:20211213 - ports: - - containerPort: 8088 - protocol: TCP - resources: {} - env: - - name: COMPOSE_PROJECT_NAME - value: superset - - name: DATABASE_DB - valueFrom: - secretKeyRef: - key: superset-db-name - name: patroni-creds-${ENV_NAME} - - name: DATABASE_HOST - value: patroni-master-test - - name: DATABASE_USER - valueFrom: - secretKeyRef: - key: superset-db-username - name: patroni-creds-${ENV_NAME} - - name: DATABASE_PASSWORD - valueFrom: - secretKeyRef: - key: superset-db-password - name: patroni-creds-${ENV_NAME} - - name: DATABASE_PORT - value: '5432' - - name: DATABASE_DIALECT - value: postgresql - - name: PYTHONPATH - value: '/app/pythonpath:/app/docker/pythonpath_dev' - - name: REDIS_HOST - value: localhost - - name: REDIS_PORT - value: '6379' - - name: FLASK_ENV - value: production - - name: SUPERSET_ENV - value: production - - name: SUPERSET_LOAD_EXAMPLES - value: 'no' - - name: CYPRESS_CONFIG - value: 'false' - - name: SUPERSET_PORT - value: '8088' - volumeMounts: - - name: superset-home - mountPath: /app/superset_home - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - imagePullPolicy: Always - restartPolicy: Always - terminationGracePeriodSeconds: 30 - dnsPolicy: ClusterFirst - securityContext: {} - schedulerName: default-scheduler - status: - availableReplicas: 0 - latestVersion: 0 - observedGeneration: 0 - replicas: 0 - unavailableReplicas: 0 - updatedReplicas: 0 diff --git a/openshift/templates/superset/the-default-values.yaml b/openshift/templates/superset/the-default-values.yaml new file mode 100644 index 00000000..67f685bf --- /dev/null +++ b/openshift/templates/superset/the-default-values.yaml @@ -0,0 +1,815 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Default values for superset. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# A README is automatically generated from this file to document it, using helm-docs (see https://github.com/norwoodj/helm-docs) +# To update it, install helm-docs and run helm-docs from the root of this chart + +# -- Provide a name to override the name of the chart +nameOverride: ~ +# -- Provide a name to override the full names of resources +fullnameOverride: ~ + +# -- User ID directive. This user must have enough permissions to run the bootstrap script +# Running containers as root is not recommended in production. Change this to another UID - e.g. 1000 to be more secure +runAsUser: 0 + +# -- Specify service account name to be used +serviceAccountName: ~ +serviceAccount: + # -- Create custom service account for Superset. If create: true and serviceAccountName is not provided, `superset.fullname` will be used. + create: false + annotations: {} + +# -- Install additional packages and do any other bootstrap configuration in this script +# For production clusters it's recommended to build own image with this step done in CI +# @default -- see `values.yaml` +bootstrapScript: | + #!/bin/bash + if [ ! -f ~/bootstrap ]; then echo "Running Superset with uid {{ .Values.runAsUser }}" > ~/bootstrap; fi + +# -- The name of the secret which we will use to generate a superset_config.py file +# Note: this secret must have the key superset_config.py in it and can include other files as well +configFromSecret: '{{ template "superset.fullname" . }}-config' + +# -- The name of the secret which we will use to populate env vars in deployed pods +# This can be useful for secret keys, etc. +envFromSecret: '{{ template "superset.fullname" . }}-env' +# -- This can be a list of templated strings +envFromSecrets: [] + +# -- Extra environment variables that will be passed into pods +extraEnv: {} + # Different gunicorn settings, refer to the gunicorn documentation + # https://docs.gunicorn.org/en/stable/settings.html# + # These variables are used as Flags at the gunicorn startup + # https://github.com/apache/superset/blob/master/docker/run-server.sh#L22 + # Extend timeout to allow long running queries. + # GUNICORN_TIMEOUT: 300 + # Increase the gunicorn worker amount, can improve performance drastically + # See: https://docs.gunicorn.org/en/stable/design.html#how-many-workers + # SERVER_WORKER_AMOUNT: 4 + # WORKER_MAX_REQUESTS: 0 + # WORKER_MAX_REQUESTS_JITTER: 0 + # SERVER_THREADS_AMOUNT: 20 + # GUNICORN_KEEPALIVE: 2 + # SERVER_LIMIT_REQUEST_LINE: 0 + # SERVER_LIMIT_REQUEST_FIELD_SIZE: 0 + + # OAUTH_HOME_DOMAIN: .. + # # If a whitelist is not set, any address that can use your OAuth2 endpoint will be able to login. + # # this includes any random Gmail address if your OAuth2 Web App is set to External. + # OAUTH_WHITELIST_REGEX: ... + +# -- Extra environment variables in RAW format that will be passed into pods +extraEnvRaw: [] + # Load DB password from other secret (e.g. for zalando operator) + # - name: DB_PASS + # valueFrom: + # secretKeyRef: + # name: superset.superset-postgres.credentials.postgresql.acid.zalan.do + # key: password + +# -- Extra environment variables to pass as secrets +extraSecretEnv: {} + # MAPBOX_API_KEY: ... + # # Google API Keys: https://console.cloud.google.com/apis/credentials + # GOOGLE_KEY: ... + # GOOGLE_SECRET: ... + +# -- Extra files to mount on `/app/pythonpath` +extraConfigs: {} + # import_datasources.yaml: | + # databases: + # - allow_file_upload: true + # allow_ctas: true + # allow_cvas: true + # database_name: example-db + # extra: "{\r\n \"metadata_params\": {},\r\n \"engine_params\": {},\r\n \"\ + # metadata_cache_timeout\": {},\r\n \"schemas_allowed_for_file_upload\": []\r\n\ + # }" + # sqlalchemy_uri: example://example-db.local + # tables: [] + +# -- Extra files to mount on `/app/pythonpath` as secrets +extraSecrets: {} + +extraVolumes: [] + # - name: customConfig + # configMap: + # name: '{{ template "superset.fullname" . }}-custom-config' + # - name: additionalSecret + # secret: + # secretName: my-secret + # defaultMode: 0600 + +extraVolumeMounts: [] + # - name: customConfig + # mountPath: /mnt/config + # readOnly: true + # - name: additionalSecret: + # mountPath: /mnt/secret + +# -- A dictionary of overrides to append at the end of superset_config.py - the name does not matter +# WARNING: the order is not guaranteed +# Files can be passed as helm --set-file configOverrides.my-override=my-file.py +configOverrides: {} + # extend_timeout: | + # # Extend timeout to allow long running queries. + # SUPERSET_WEBSERVER_TIMEOUT = ... + # enable_oauth: | + # from flask_appbuilder.security.manager import (AUTH_DB, AUTH_OAUTH) + # AUTH_TYPE = AUTH_OAUTH + # OAUTH_PROVIDERS = [ + # { + # "name": "google", + # "whitelist": [ os.getenv("OAUTH_WHITELIST_REGEX", "") ], + # "icon": "fa-google", + # "token_key": "access_token", + # "remote_app": { + # "client_id": os.environ.get("GOOGLE_KEY"), + # "client_secret": os.environ.get("GOOGLE_SECRET"), + # "api_base_url": "https://www.googleapis.com/oauth2/v2/", + # "client_kwargs": {"scope": "email profile"}, + # "request_token_url": None, + # "access_token_url": "https://accounts.google.com/o/oauth2/token", + # "authorize_url": "https://accounts.google.com/o/oauth2/auth", + # "authorize_params": {"hd": os.getenv("OAUTH_HOME_DOMAIN", "")} + # } + # } + # ] + # # Map Authlib roles to superset roles + # AUTH_ROLE_ADMIN = 'Admin' + # AUTH_ROLE_PUBLIC = 'Public' + # # Will allow user self registration, allowing to create Flask users from Authorized User + # AUTH_USER_REGISTRATION = True + # # The default user self registration role + # AUTH_USER_REGISTRATION_ROLE = "Admin" + # secret: | + # # Generate your own secret key for encryption. Use openssl rand -base64 42 to generate a good key + # SECRET_KEY = 'YOUR_OWN_RANDOM_GENERATED_SECRET_KEY' + +# -- Same as above but the values are files +configOverridesFiles: {} + # extend_timeout: extend_timeout.py + # enable_oauth: enable_oauth.py + +configMountPath: "/app/pythonpath" + +extraConfigMountPath: "/app/configs" + +image: + repository: apachesuperset.docker.scarf.sh/apache/superset + tag: "" + pullPolicy: IfNotPresent + +imagePullSecrets: [] + +initImage: + repository: apache/superset + tag: dockerize + pullPolicy: IfNotPresent + +service: + type: ClusterIP + port: 8088 + annotations: {} + # cloud.google.com/load-balancer-type: "Internal" + loadBalancerIP: ~ + nodePort: + # -- (int) + http: nil + +ingress: + enabled: false + ingressClassName: ~ + annotations: {} + # kubernetes.io/tls-acme: "true" + ## Extend timeout to allow long running queries. + # nginx.ingress.kubernetes.io/proxy-connect-timeout: "300" + # nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + # nginx.ingress.kubernetes.io/proxy-send-timeout: "300" + path: / + pathType: ImplementationSpecific + hosts: + - chart-example.local + tls: [] + extraHostsRaw: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # The limits below will apply to all Superset components. To set individual resource limitations refer to the pod specific values below. + # The pod specific values will overwrite anything that is set here. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +# -- Custom hostAliases for all superset pods +## https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/ +hostAliases: [] +# - hostnames: +# - nodns.my.lan +# ip: 18.27.36.45 + +# Superset node configuration +supersetNode: + replicaCount: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + + # -- Startup command + # @default -- See `values.yaml` + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; /usr/bin/run-server.sh" + connections: + # -- Change in case of bringing your own redis and then also set redis.enabled:false + redis_host: '{{ .Release.Name }}-redis-headless' + # redis_password: superset + redis_port: "6379" + # You need to change below configuration incase bringing own PostgresSQL instance and also set postgresql.enabled:false + db_host: '{{ .Release.Name }}-postgresql' + db_port: "5432" + db_user: superset + db_pass: superset + db_name: superset + env: {} + # -- If true, forces deployment to reload on each upgrade + forceReload: false + # -- Init containers + # @default -- a container waiting for postgres + initContainers: + - name: wait-for-postgres + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -timeout 120s + + # -- Launch additional containers into supersetNode pod + extraContainers: [] + # -- Annotations to be added to supersetNode deployment + deploymentAnnotations: {} + # -- Labels to be added to supersetNode deployment + deploymentLabels: {} + # -- Affinity to be added to supersetNode deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetNode deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetNode pods + podAnnotations: {} + # -- Labels to be added to supersetNode pods + podLabels: {} + startupProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 15 + timeoutSeconds: 1 + failureThreshold: 60 + periodSeconds: 5 + successThreshold: 1 + livenessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 15 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 15 + successThreshold: 1 + readinessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 15 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 15 + successThreshold: 1 + # -- Resource settings for the supersetNode pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + podSecurityContext: {} + containerSecurityContext: {} + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + +# Superset Celery worker configuration +supersetWorker: + replicaCount: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + + # -- Worker startup command + # @default -- a `celery worker` command + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; celery --app=superset.tasks.celery_app:app worker" + # -- If true, forces deployment to reload on each upgrade + forceReload: false + # -- Init container + # @default -- a container waiting for postgres and redis + initContainers: + - name: wait-for-postgres-redis + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -wait "tcp://$REDIS_HOST:$REDIS_PORT" -timeout 120s + # -- Launch additional containers into supersetWorker pod + extraContainers: [] + # -- Annotations to be added to supersetWorker deployment + deploymentAnnotations: {} + # -- Labels to be added to supersetWorker deployment + deploymentLabels: {} + # -- Affinity to be added to supersetWorker deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetWorker deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetWorker pods + podAnnotations: {} + # -- Labels to be added to supersetWorker pods + podLabels: {} + # -- Resource settings for the supersetWorker pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + podSecurityContext: {} + containerSecurityContext: {} + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + livenessProbe: + exec: + # -- Liveness probe command + # @default -- a `celery inspect ping` command + command: + - sh + - -c + - celery -A superset.tasks.celery_app:app inspect ping -d celery@$HOSTNAME + initialDelaySeconds: 120 + timeoutSeconds: 60 + failureThreshold: 3 + periodSeconds: 60 + successThreshold: 1 + # -- No startup/readiness probes by default since we don't really care about its startup time (it doesn't serve traffic) + startupProbe: {} + # -- No startup/readiness probes by default since we don't really care about its startup time (it doesn't serve traffic) + readinessProbe: {} + +# Superset beat configuration (to trigger scheduled jobs like reports) +supersetCeleryBeat: + # -- This is only required if you intend to use alerts and reports + enabled: false + # -- Command + # @default -- a `celery beat` command + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; celery --app=superset.tasks.celery_app:app beat --pidfile /tmp/celerybeat.pid --schedule /tmp/celerybeat-schedule" + # -- If true, forces deployment to reload on each upgrade + forceReload: false + # -- List of init containers + # @default -- a container waiting for postgres + initContainers: + - name: wait-for-postgres-redis + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -wait "tcp://$REDIS_HOST:$REDIS_PORT" -timeout 120s + # -- Annotations to be added to supersetCeleryBeat deployment + deploymentAnnotations: {} + # -- Affinity to be added to supersetCeleryBeat deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetCeleryBeat deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetCeleryBeat pods + podAnnotations: {} + # -- Labels to be added to supersetCeleryBeat pods + podLabels: {} + # -- Resource settings for the CeleryBeat pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + podSecurityContext: {} + containerSecurityContext: {} + +supersetCeleryFlower: + # -- Enables a Celery flower deployment (management UI to monitor celery jobs) + # WARNING: on superset 1.x, this requires a Superset image that has `flower<1.0.0` installed (which is NOT the case of the default images) + # flower>=1.0.0 requires Celery 5+ which Superset 1.5 does not support + enabled: false + replicaCount: 1 + # -- Command + # @default -- a `celery flower` command + command: + - "/bin/sh" + - "-c" + - "celery --app=superset.tasks.celery_app:app flower" + service: + type: ClusterIP + annotations: {} + loadBalancerIP: ~ + port: 5555 + nodePort: + # -- (int) + http: nil + startupProbe: + httpGet: + path: /api/workers + port: flower + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 60 + periodSeconds: 5 + successThreshold: 1 + livenessProbe: + httpGet: + path: /api/workers + port: flower + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 5 + successThreshold: 1 + readinessProbe: + httpGet: + path: /api/workers + port: flower + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 5 + successThreshold: 1 + # -- List of init containers + # @default -- a container waiting for postgres and redis + initContainers: + - name: wait-for-postgres-redis + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -wait "tcp://$REDIS_HOST:$REDIS_PORT" -timeout 120s + # -- Annotations to be added to supersetCeleryFlower deployment + deploymentAnnotations: {} + # -- Affinity to be added to supersetCeleryFlower deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetCeleryFlower deployments + topologySpreadConstraints: [] + # -- Annotations to be added to supersetCeleryFlower pods + podAnnotations: {} + # -- Labels to be added to supersetCeleryFlower pods + podLabels: {} + # -- Resource settings for the CeleryBeat pods - these settings overwrite might existing values from the global resources object defined above. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + podSecurityContext: {} + containerSecurityContext: {} + +supersetWebsockets: + # -- This is only required if you intend to use `GLOBAL_ASYNC_QUERIES` in `ws` mode + # see https://github.com/apache/superset/blob/master/CONTRIBUTING.md#async-chart-queries + enabled: false + replicaCount: 1 + ingress: + path: /ws + pathType: Prefix + image: + # -- There is no official image (yet), this one is community-supported + repository: oneacrefund/superset-websocket + tag: latest + pullPolicy: IfNotPresent + # -- The config.json to pass to the server, see https://github.com/apache/superset/tree/master/superset-websocket + # Note that the configuration can also read from environment variables (which will have priority), see https://github.com/apache/superset/blob/master/superset-websocket/src/config.ts for a list of supported variables + # @default -- see `values.yaml` + config: + { + "port": 8080, + "logLevel": "debug", + "logToFile": false, + "logFilename": "app.log", + "statsd": { "host": "127.0.0.1", "port": 8125, "globalTags": [] }, + "redis": + { + "port": 6379, + "host": "127.0.0.1", + "password": "", + "db": 0, + "ssl": false, + }, + "redisStreamPrefix": "async-events-", + "jwtSecret": "CHANGE-ME", + "jwtCookieName": "async-token", + } + service: + type: ClusterIP + annotations: {} + loadBalancerIP: ~ + port: 8080 + nodePort: + # -- (int) + http: nil + command: [] + resources: {} + deploymentAnnotations: {} + # -- Affinity to be added to supersetWebsockets deployment + affinity: {} + # -- TopologySpreadConstrains to be added to supersetWebsockets deployments + topologySpreadConstraints: [] + podAnnotations: {} + podLabels: {} + strategy: {} + podSecurityContext: {} + containerSecurityContext: {} + startupProbe: + httpGet: + path: /health + port: ws + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 60 + periodSeconds: 5 + successThreshold: 1 + livenessProbe: + httpGet: + path: /health + port: ws + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 5 + successThreshold: 1 + readinessProbe: + httpGet: + path: /health + port: ws + initialDelaySeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + periodSeconds: 5 + successThreshold: 1 + +init: + # Configure resources + # Warning: fab command consumes a lot of ram and can + # cause the process to be killed due to OOM if it exceeds limit + # Make sure you are giving a strong password for the admin user creation( else make sure you are changing after setup) + # Also change the admin email to your own custom email. + resources: {} + # limits: + # cpu: + # memory: + # requests: + # cpu: + # memory: + # -- Command + # @default -- a `superset_init.sh` command + command: + - "/bin/sh" + - "-c" + - ". {{ .Values.configMountPath }}/superset_bootstrap.sh; . {{ .Values.configMountPath }}/superset_init.sh" + enabled: true + jobAnnotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": "before-hook-creation" + loadExamples: false + createAdmin: true + adminUser: + username: admin + firstname: Superset + lastname: Admin + email: admin@superset.com + password: admin + # -- List of initContainers + # @default -- a container waiting for postgres + initContainers: + - name: wait-for-postgres + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: "{{ .Values.initImage.pullPolicy }}" + envFrom: + - secretRef: + name: "{{ tpl .Values.envFromSecret . }}" + command: + - /bin/sh + - -c + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -timeout 120s + # -- A Superset init script + # @default -- a script to create admin user and initialize roles + initscript: |- + #!/bin/sh + set -eu + echo "Upgrading DB schema..." + superset db upgrade + echo "Initializing roles..." + superset init + {{ if .Values.init.createAdmin }} + echo "Creating admin user..." + superset fab create-admin \ + --username {{ .Values.init.adminUser.username }} \ + --firstname {{ .Values.init.adminUser.firstname }} \ + --lastname {{ .Values.init.adminUser.lastname }} \ + --email {{ .Values.init.adminUser.email }} \ + --password {{ .Values.init.adminUser.password }} \ + || true + {{- end }} + {{ if .Values.init.loadExamples }} + echo "Loading examples..." + superset load_examples + {{- end }} + if [ -f "{{ .Values.extraConfigMountPath }}/import_datasources.yaml" ]; then + echo "Importing database connections.... " + superset import_datasources -p {{ .Values.extraConfigMountPath }}/import_datasources.yaml + fi + # -- Launch additional containers into init job pod + extraContainers: [] + ## Annotations to be added to init job pods + podAnnotations: {} + podSecurityContext: {} + containerSecurityContext: {} + ## Tolerations to be added to init job pods + tolerations: [] + ## Affinity to be added to init job pods + affinity: {} + # -- TopologySpreadConstrains to be added to init job + topologySpreadConstraints: [] + +# -- Configuration values for the postgresql dependency. +# ref: https://github.com/bitnami/charts/tree/main/bitnami/postgresql +# @default -- see `values.yaml` +postgresql: + ## + ## Use the PostgreSQL chart dependency. + ## Set to false if bringing your own PostgreSQL. + enabled: true + + ## Authentication parameters + auth: + ## The name of an existing secret that contains the postgres password. + existingSecret: + ## PostgreSQL name for a custom user to create + username: superset + ## PostgreSQL password for the custom user to create. Ignored if `auth.existingSecret` with key `password` is provided + password: superset + ## PostgreSQL name for a custom database to create + database: superset + + image: + tag: "14.6.0-debian-11-r13" + + ## PostgreSQL Primary parameters + primary: + ## + ## Persistent Volume Storage configuration. + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes + persistence: + ## + ## Enable PostgreSQL persistence using Persistent Volume Claims. + enabled: true + ## + ## Persistent class + # storageClass: classname + ## + ## Access modes: + accessModes: + - ReadWriteOnce + ## PostgreSQL port + service: + ports: + postgresql: "5432" + +# -- Configuration values for the Redis dependency. +# ref: https://github.com/bitnami/charts/blob/master/bitnami/redis +# More documentation can be found here: https://artifacthub.io/packages/helm/bitnami/redis +# @default -- see `values.yaml` +redis: + ## + ## Use the redis chart dependency. + ## + ## If you are bringing your own redis, you can set the host in supersetNode.connections.redis_host + ## + ## Set to false if bringing your own redis. + enabled: true + ## + ## Set architecture to standalone/replication + architecture: standalone + ## + ## Auth configuration: + ## + auth: + ## Enable password authentication + enabled: false + ## The name of an existing secret that contains the redis password. + existingSecret: "" + ## Name of the key containing the secret. + existingSecretKey: "" + ## Redis password + password: superset + ## + ## Master configuration + ## + master: + ## + ## Image configuration + # image: + ## + ## docker registry secret names (list) + # pullSecrets: nil + ## + ## Configure persistance + persistence: + ## + ## Use a PVC to persist data. + enabled: false + ## + ## Persistent class + # storageClass: classname + ## + ## Access mode: + accessModes: + - ReadWriteOnce + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +# -- TopologySpreadConstrains to be added to all deployments +topologySpreadConstraints: [] From e3f4e0d931bb139b96f5310881873840de6573fb Mon Sep 17 00:00:00 2001 From: tim738745 <98717409+tim738745@users.noreply.github.com> Date: Wed, 27 Mar 2024 09:54:22 -0700 Subject: [PATCH 096/152] chore: 164 - metabase models on git (#256) --- README.md | 10 + django/api/settings.py | 11 + django/metabase/apps.py | 5 + django/metabase/db_router.py | 31 + django/metabase/migrations/0001_initial.py | 1136 ++++++++++++++++++++ django/metabase/migrations/__init__.py | 0 django/metabase/models.py | 1130 +++++++++++++++++++ docker-compose.yml | 5 + metabase.env | 6 + 9 files changed, 2334 insertions(+) create mode 100644 django/metabase/apps.py create mode 100644 django/metabase/db_router.py create mode 100644 django/metabase/migrations/0001_initial.py create mode 100644 django/metabase/migrations/__init__.py create mode 100644 django/metabase/models.py create mode 100644 metabase.env diff --git a/README.md b/README.md index 948af573..666927c4 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,16 @@ The Clean Transportation Data Hub provides an evidence base for the Clean Transp - The rebased commits will have different hashes than the old ones, so if you previously pushed `your_branch` to a remote you will have to `git push --force` in order not to end up with additional commits in your remote branch. - On Github, you can modify the base branch of a PR if you're rebasing from a branch based on a previous release branch to the latest release branch. +# Metabase +- Locally, create a database to store metabase's internals, and use/modify `metabase.env`, django's `settings.DATABASES` and `settings.DATABASE_ROUTERS` to point to said database. +- You can create django data migrations to insert your custom queries into the metabase application database. +- To create a data migration within the metabase django app: +- ```python manage.py makemigrations --empty metabase``` +- Then, using `RunPython` and django's `QuerySet` API, you may read/insert/update/delete data from metabase's application database. +- For custom queries, the internal metabase table of interest would probably be `report_card` (the associated model is `ReportCard`). +- To make your `RunPython` "script" cleaner, consider putting the actual queries themselves in separate sql files and reading from those in `RunPython` +- To uncouple metabase from django, simply remove metabase from `settings.INSTALLED_APPS`. + # License The code is a fork from Richard's personal project. Please do not clone, copy or replicate this project unless you're authorized to do so. diff --git a/django/api/settings.py b/django/api/settings.py index 17f84e17..31dfbe1e 100644 --- a/django/api/settings.py +++ b/django/api/settings.py @@ -43,6 +43,7 @@ INSTALLED_APPS = [ 'api.apps.ApiConfig', 'tfrs.apps.ApiConfig', + 'metabase.apps.MetabaseConfig', 'corsheaders', 'django_filters', 'django.contrib.admin', @@ -96,8 +97,18 @@ 'HOST': os.getenv('DB_HOST', 'db'), 'PORT': os.getenv('DB_PORT', '5432'), }, + 'metabase': { + 'ENGINE': os.getenv('METABASE_DB_ENGINE', 'django.db.backends.postgresql'), + 'NAME': os.getenv('METABASE_DB_NAME', 'metabase'), + 'USER': os.getenv('METABASE_DB_USER', 'postgres'), + 'PASSWORD': os.getenv('METABASE_DB_PASSWORD', 'postgres'), + 'HOST': os.getenv('METABASE_DB_HOST', 'db'), + 'PORT': os.getenv('METABASE_DB_PORT', '5432'), + }, } +DATABASE_ROUTERS = ['metabase.db_router.MetabaseRouter',] + # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators diff --git a/django/metabase/apps.py b/django/metabase/apps.py new file mode 100644 index 00000000..947144ef --- /dev/null +++ b/django/metabase/apps.py @@ -0,0 +1,5 @@ +from django.apps import AppConfig + + +class MetabaseConfig(AppConfig): + name = 'metabase' \ No newline at end of file diff --git a/django/metabase/db_router.py b/django/metabase/db_router.py new file mode 100644 index 00000000..702f540b --- /dev/null +++ b/django/metabase/db_router.py @@ -0,0 +1,31 @@ +class MetabaseRouter: + + app_label = "metabase" + metabase_db = "metabase" + + def db_for_read(self, model, **hints): + """ + if reading a metabase model, read from the metabase db + """ + if model._meta.app_label == self.app_label: + return self.metabase_db + return None + + def db_for_write(self, model, **hints): + """ + if writing a metabase model instance, write to the metabase db + """ + if model._meta.app_label == self.app_label: + return self.metabase_db + return None + + def allow_migrate(self, db, app_label, model_name=None, **hints): + """ + if metabase migration, execute on metabase db + """ + is_metabase_data_migration = hints.get("is_metabase_data_migration", False) + if app_label == self.app_label: + if is_metabase_data_migration: + return True + return False + return None \ No newline at end of file diff --git a/django/metabase/migrations/0001_initial.py b/django/metabase/migrations/0001_initial.py new file mode 100644 index 00000000..581947b5 --- /dev/null +++ b/django/metabase/migrations/0001_initial.py @@ -0,0 +1,1136 @@ +# Generated by Django 3.1.6 on 2024-03-19 16:46 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + # unmanaged models will not have any SQL generated for them when it comes to table creation, modification, or deletion + # all metabase models should be unmanaged + operations = [ + migrations.CreateModel( + name='Action', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('type', models.TextField()), + ('name', models.CharField(max_length=254)), + ('description', models.TextField(blank=True, null=True)), + ('parameters', models.TextField(blank=True, null=True)), + ('parameter_mappings', models.TextField(blank=True, null=True)), + ('visualization_settings', models.TextField(blank=True, null=True)), + ('public_uuid', models.CharField(blank=True, max_length=36, null=True, unique=True)), + ('archived', models.BooleanField()), + ('entity_id', models.CharField(blank=True, max_length=21, null=True, unique=True)), + ], + options={ + 'db_table': 'action', + 'managed': False, + }, + ), + migrations.CreateModel( + name='Activity', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('topic', models.CharField(max_length=32)), + ('timestamp', models.DateTimeField()), + ('model', models.CharField(blank=True, max_length=16, null=True)), + ('model_id', models.IntegerField(blank=True, null=True)), + ('database_id', models.IntegerField(blank=True, null=True)), + ('table_id', models.IntegerField(blank=True, null=True)), + ('custom_id', models.CharField(blank=True, max_length=48, null=True)), + ('details', models.TextField()), + ], + options={ + 'db_table': 'activity', + 'managed': False, + }, + ), + migrations.CreateModel( + name='ApplicationPermissionsRevision', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('before', models.TextField()), + ('after', models.TextField()), + ('created_at', models.DateTimeField()), + ('remark', models.TextField(blank=True, null=True)), + ], + options={ + 'db_table': 'application_permissions_revision', + 'managed': False, + }, + ), + migrations.CreateModel( + name='BookmarkOrdering', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('type', models.CharField(max_length=255)), + ('item_id', models.IntegerField()), + ('ordering', models.IntegerField()), + ], + options={ + 'db_table': 'bookmark_ordering', + 'managed': False, + }, + ), + migrations.CreateModel( + name='CardBookmark', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('created_at', models.DateTimeField()), + ], + options={ + 'db_table': 'card_bookmark', + 'managed': False, + }, + ), + migrations.CreateModel( + name='CardLabel', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ], + options={ + 'db_table': 'card_label', + 'managed': False, + }, + ), + migrations.CreateModel( + name='Collection', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('name', models.TextField()), + ('description', models.TextField(blank=True, null=True)), + ('color', models.CharField(max_length=7)), + ('archived', models.BooleanField()), + ('location', models.CharField(max_length=254)), + ('slug', models.CharField(max_length=254)), + ('namespace', models.CharField(blank=True, max_length=254, null=True)), + ('authority_level', models.CharField(blank=True, max_length=255, null=True)), + ('entity_id', models.CharField(blank=True, max_length=21, null=True, unique=True)), + ('created_at', models.DateTimeField()), + ], + options={ + 'db_table': 'collection', + 'managed': False, + }, + ), + migrations.CreateModel( + name='CollectionBookmark', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('created_at', models.DateTimeField()), + ], + options={ + 'db_table': 'collection_bookmark', + 'managed': False, + }, + ), + migrations.CreateModel( + name='CollectionPermissionGraphRevision', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('before', models.TextField()), + ('after', models.TextField()), + ('created_at', models.DateTimeField()), + ('remark', models.TextField(blank=True, null=True)), + ], + options={ + 'db_table': 'collection_permission_graph_revision', + 'managed': False, + }, + ), + migrations.CreateModel( + name='ComputationJob', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('type', models.CharField(max_length=254)), + ('status', models.CharField(max_length=254)), + ('context', models.TextField(blank=True, null=True)), + ('ended_at', models.DateTimeField(blank=True, null=True)), + ], + options={ + 'db_table': 'computation_job', + 'managed': False, + }, + ), + migrations.CreateModel( + name='ComputationJobResult', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('permanence', models.CharField(max_length=254)), + ('payload', models.TextField()), + ], + options={ + 'db_table': 'computation_job_result', + 'managed': False, + }, + ), + migrations.CreateModel( + name='CoreSession', + fields=[ + ('id', models.CharField(max_length=254, primary_key=True, serialize=False)), + ('created_at', models.DateTimeField()), + ('anti_csrf_token', models.TextField(blank=True, null=True)), + ], + options={ + 'db_table': 'core_session', + 'managed': False, + }, + ), + migrations.CreateModel( + name='CoreUser', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('email', models.TextField(unique=True)), + ('first_name', models.CharField(blank=True, max_length=254, null=True)), + ('last_name', models.CharField(blank=True, max_length=254, null=True)), + ('password', models.CharField(blank=True, max_length=254, null=True)), + ('password_salt', models.CharField(blank=True, max_length=254, null=True)), + ('date_joined', models.DateTimeField()), + ('last_login', models.DateTimeField(blank=True, null=True)), + ('is_superuser', models.BooleanField()), + ('is_active', models.BooleanField()), + ('reset_token', models.CharField(blank=True, max_length=254, null=True)), + ('reset_triggered', models.BigIntegerField(blank=True, null=True)), + ('is_qbnewb', models.BooleanField()), + ('google_auth', models.BooleanField()), + ('ldap_auth', models.BooleanField()), + ('login_attributes', models.TextField(blank=True, null=True)), + ('updated_at', models.DateTimeField(blank=True, null=True)), + ('sso_source', models.CharField(blank=True, max_length=254, null=True)), + ('locale', models.CharField(blank=True, max_length=5, null=True)), + ('is_datasetnewb', models.BooleanField()), + ('settings', models.TextField(blank=True, null=True)), + ], + options={ + 'db_table': 'core_user', + 'managed': False, + }, + ), + migrations.CreateModel( + name='DashboardBookmark', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('created_at', models.DateTimeField()), + ], + options={ + 'db_table': 'dashboard_bookmark', + 'managed': False, + }, + ), + migrations.CreateModel( + name='DashboardcardSeries', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('position', models.IntegerField()), + ], + options={ + 'db_table': 'dashboardcard_series', + 'managed': False, + }, + ), + migrations.CreateModel( + name='DashboardFavorite', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ], + options={ + 'db_table': 'dashboard_favorite', + 'managed': False, + }, + ), + migrations.CreateModel( + name='DataMigrations', + fields=[ + ('id', models.CharField(max_length=254, primary_key=True, serialize=False)), + ('timestamp', models.DateTimeField()), + ], + options={ + 'db_table': 'data_migrations', + 'managed': False, + }, + ), + migrations.CreateModel( + name='Dependency', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('model', models.CharField(max_length=32)), + ('model_id', models.IntegerField()), + ('dependent_on_model', models.CharField(max_length=32)), + ('dependent_on_id', models.IntegerField()), + ('created_at', models.DateTimeField()), + ], + options={ + 'db_table': 'dependency', + 'managed': False, + }, + ), + migrations.CreateModel( + name='Dimension', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('name', models.CharField(max_length=254)), + ('type', models.CharField(max_length=254)), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('entity_id', models.CharField(blank=True, max_length=21, null=True, unique=True)), + ], + options={ + 'db_table': 'dimension', + 'managed': False, + }, + ), + migrations.CreateModel( + name='ImplicitAction', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('kind', models.TextField()), + ], + options={ + 'db_table': 'implicit_action', + 'managed': False, + }, + ), + migrations.CreateModel( + name='Label', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('name', models.CharField(max_length=254)), + ('slug', models.CharField(max_length=254, unique=True)), + ('icon', models.CharField(blank=True, max_length=128, null=True)), + ], + options={ + 'db_table': 'label', + 'managed': False, + }, + ), + migrations.CreateModel( + name='LoginHistory', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('timestamp', models.DateTimeField()), + ('device_id', models.CharField(max_length=36)), + ('device_description', models.TextField()), + ('ip_address', models.TextField()), + ], + options={ + 'db_table': 'login_history', + 'managed': False, + }, + ), + migrations.CreateModel( + name='MetabaseDatabase', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('name', models.CharField(max_length=254)), + ('description', models.TextField(blank=True, null=True)), + ('details', models.TextField()), + ('engine', models.CharField(max_length=254)), + ('is_sample', models.BooleanField()), + ('is_full_sync', models.BooleanField()), + ('points_of_interest', models.TextField(blank=True, null=True)), + ('caveats', models.TextField(blank=True, null=True)), + ('metadata_sync_schedule', models.CharField(max_length=254)), + ('cache_field_values_schedule', models.CharField(max_length=254)), + ('timezone', models.CharField(blank=True, max_length=254, null=True)), + ('is_on_demand', models.BooleanField()), + ('options', models.TextField(blank=True, null=True)), + ('auto_run_queries', models.BooleanField()), + ('refingerprint', models.BooleanField(blank=True, null=True)), + ('cache_ttl', models.IntegerField(blank=True, null=True)), + ('initial_sync_status', models.CharField(max_length=32)), + ('settings', models.TextField(blank=True, null=True)), + ('dbms_version', models.TextField(blank=True, null=True)), + ], + options={ + 'db_table': 'metabase_database', + 'managed': False, + }, + ), + migrations.CreateModel( + name='MetabaseField', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('name', models.CharField(max_length=254)), + ('base_type', models.CharField(max_length=255)), + ('semantic_type', models.CharField(blank=True, max_length=255, null=True)), + ('active', models.BooleanField()), + ('description', models.TextField(blank=True, null=True)), + ('preview_display', models.BooleanField()), + ('position', models.IntegerField()), + ('display_name', models.CharField(blank=True, max_length=254, null=True)), + ('visibility_type', models.CharField(max_length=32)), + ('fk_target_field_id', models.IntegerField(blank=True, null=True)), + ('last_analyzed', models.DateTimeField(blank=True, null=True)), + ('points_of_interest', models.TextField(blank=True, null=True)), + ('caveats', models.TextField(blank=True, null=True)), + ('fingerprint', models.TextField(blank=True, null=True)), + ('fingerprint_version', models.IntegerField()), + ('database_type', models.TextField()), + ('has_field_values', models.TextField(blank=True, null=True)), + ('settings', models.TextField(blank=True, null=True)), + ('database_position', models.IntegerField()), + ('custom_position', models.IntegerField()), + ('effective_type', models.CharField(blank=True, max_length=255, null=True)), + ('coercion_strategy', models.CharField(blank=True, max_length=255, null=True)), + ('nfc_path', models.CharField(blank=True, max_length=254, null=True)), + ('database_required', models.BooleanField()), + ('database_is_auto_increment', models.BooleanField()), + ], + options={ + 'db_table': 'metabase_field', + 'managed': False, + }, + ), + migrations.CreateModel( + name='MetabaseFieldvalues', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('values', models.TextField(blank=True, null=True)), + ('human_readable_values', models.TextField(blank=True, null=True)), + ('has_more_values', models.BooleanField(blank=True, null=True)), + ('type', models.CharField(max_length=32)), + ('hash_key', models.TextField(blank=True, null=True)), + ('last_used_at', models.DateTimeField()), + ], + options={ + 'db_table': 'metabase_fieldvalues', + 'managed': False, + }, + ), + migrations.CreateModel( + name='MetabaseTable', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('name', models.CharField(max_length=254)), + ('description', models.TextField(blank=True, null=True)), + ('entity_type', models.CharField(blank=True, max_length=254, null=True)), + ('active', models.BooleanField()), + ('display_name', models.CharField(blank=True, max_length=254, null=True)), + ('visibility_type', models.CharField(blank=True, max_length=254, null=True)), + ('schema', models.CharField(blank=True, max_length=254, null=True)), + ('points_of_interest', models.TextField(blank=True, null=True)), + ('caveats', models.TextField(blank=True, null=True)), + ('show_in_getting_started', models.BooleanField()), + ('field_order', models.CharField(max_length=254)), + ('initial_sync_status', models.CharField(max_length=32)), + ], + options={ + 'db_table': 'metabase_table', + 'managed': False, + }, + ), + migrations.CreateModel( + name='Metric', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('name', models.CharField(max_length=254)), + ('description', models.TextField(blank=True, null=True)), + ('archived', models.BooleanField()), + ('definition', models.TextField()), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('points_of_interest', models.TextField(blank=True, null=True)), + ('caveats', models.TextField(blank=True, null=True)), + ('how_is_this_calculated', models.TextField(blank=True, null=True)), + ('show_in_getting_started', models.BooleanField()), + ('entity_id', models.CharField(blank=True, max_length=21, null=True, unique=True)), + ], + options={ + 'db_table': 'metric', + 'managed': False, + }, + ), + migrations.CreateModel( + name='MetricImportantField', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ], + options={ + 'db_table': 'metric_important_field', + 'managed': False, + }, + ), + migrations.CreateModel( + name='ModerationReview', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('updated_at', models.DateTimeField()), + ('created_at', models.DateTimeField()), + ('status', models.CharField(blank=True, max_length=255, null=True)), + ('text', models.TextField(blank=True, null=True)), + ('moderated_item_id', models.IntegerField()), + ('moderated_item_type', models.CharField(max_length=255)), + ('moderator_id', models.IntegerField()), + ('most_recent', models.BooleanField()), + ], + options={ + 'db_table': 'moderation_review', + 'managed': False, + }, + ), + migrations.CreateModel( + name='NativeQuerySnippet', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('name', models.CharField(max_length=254, unique=True)), + ('description', models.TextField(blank=True, null=True)), + ('content', models.TextField()), + ('archived', models.BooleanField()), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('entity_id', models.CharField(blank=True, max_length=21, null=True, unique=True)), + ], + options={ + 'db_table': 'native_query_snippet', + 'managed': False, + }, + ), + migrations.CreateModel( + name='ParameterCard', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('updated_at', models.DateTimeField()), + ('created_at', models.DateTimeField()), + ('parameterized_object_type', models.CharField(max_length=32)), + ('parameterized_object_id', models.IntegerField()), + ('parameter_id', models.CharField(max_length=36)), + ('entity_id', models.CharField(blank=True, max_length=21, null=True, unique=True)), + ], + options={ + 'db_table': 'parameter_card', + 'managed': False, + }, + ), + migrations.CreateModel( + name='Permissions', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('object', models.CharField(max_length=254)), + ], + options={ + 'db_table': 'permissions', + 'managed': False, + }, + ), + migrations.CreateModel( + name='PermissionsGroup', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('name', models.CharField(max_length=255, unique=True)), + ], + options={ + 'db_table': 'permissions_group', + 'managed': False, + }, + ), + migrations.CreateModel( + name='PermissionsGroupMembership', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('is_group_manager', models.BooleanField()), + ], + options={ + 'db_table': 'permissions_group_membership', + 'managed': False, + }, + ), + migrations.CreateModel( + name='PermissionsRevision', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('before', models.TextField()), + ('after', models.TextField()), + ('created_at', models.DateTimeField()), + ('remark', models.TextField(blank=True, null=True)), + ], + options={ + 'db_table': 'permissions_revision', + 'managed': False, + }, + ), + migrations.CreateModel( + name='PersistedInfo', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('question_slug', models.TextField()), + ('table_name', models.TextField()), + ('definition', models.TextField(blank=True, null=True)), + ('query_hash', models.TextField(blank=True, null=True)), + ('active', models.BooleanField()), + ('state', models.TextField()), + ('refresh_begin', models.DateTimeField()), + ('refresh_end', models.DateTimeField(blank=True, null=True)), + ('state_change_at', models.DateTimeField(blank=True, null=True)), + ('error', models.TextField(blank=True, null=True)), + ('created_at', models.DateTimeField()), + ], + options={ + 'db_table': 'persisted_info', + 'managed': False, + }, + ), + migrations.CreateModel( + name='Pulse', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('name', models.CharField(blank=True, max_length=254, null=True)), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('skip_if_empty', models.BooleanField()), + ('alert_condition', models.CharField(blank=True, max_length=254, null=True)), + ('alert_first_only', models.BooleanField(blank=True, null=True)), + ('alert_above_goal', models.BooleanField(blank=True, null=True)), + ('collection_position', models.SmallIntegerField(blank=True, null=True)), + ('archived', models.BooleanField(blank=True, null=True)), + ('parameters', models.TextField()), + ('entity_id', models.CharField(blank=True, max_length=21, null=True, unique=True)), + ], + options={ + 'db_table': 'pulse', + 'managed': False, + }, + ), + migrations.CreateModel( + name='PulseCard', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('position', models.IntegerField()), + ('include_csv', models.BooleanField()), + ('include_xls', models.BooleanField()), + ('entity_id', models.CharField(blank=True, max_length=21, null=True, unique=True)), + ], + options={ + 'db_table': 'pulse_card', + 'managed': False, + }, + ), + migrations.CreateModel( + name='PulseChannel', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('channel_type', models.CharField(max_length=32)), + ('details', models.TextField()), + ('schedule_type', models.CharField(max_length=32)), + ('schedule_hour', models.IntegerField(blank=True, null=True)), + ('schedule_day', models.CharField(blank=True, max_length=64, null=True)), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('schedule_frame', models.CharField(blank=True, max_length=32, null=True)), + ('enabled', models.BooleanField()), + ('entity_id', models.CharField(blank=True, max_length=21, null=True, unique=True)), + ], + options={ + 'db_table': 'pulse_channel', + 'managed': False, + }, + ), + migrations.CreateModel( + name='PulseChannelRecipient', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ], + options={ + 'db_table': 'pulse_channel_recipient', + 'managed': False, + }, + ), + migrations.CreateModel( + name='QrtzCalendars', + fields=[ + ('sched_name', models.CharField(max_length=120, primary_key=True, serialize=False)), + ('calendar_name', models.CharField(max_length=200)), + ('calendar', models.BinaryField()), + ], + options={ + 'db_table': 'qrtz_calendars', + 'managed': False, + }, + ), + migrations.CreateModel( + name='QrtzFiredTriggers', + fields=[ + ('sched_name', models.CharField(max_length=120, primary_key=True, serialize=False)), + ('entry_id', models.CharField(max_length=95)), + ('trigger_name', models.CharField(max_length=200)), + ('trigger_group', models.CharField(max_length=200)), + ('instance_name', models.CharField(max_length=200)), + ('fired_time', models.BigIntegerField()), + ('sched_time', models.BigIntegerField(blank=True, null=True)), + ('priority', models.IntegerField()), + ('state', models.CharField(max_length=16)), + ('job_name', models.CharField(blank=True, max_length=200, null=True)), + ('job_group', models.CharField(blank=True, max_length=200, null=True)), + ('is_nonconcurrent', models.BooleanField(blank=True, null=True)), + ('requests_recovery', models.BooleanField(blank=True, null=True)), + ], + options={ + 'db_table': 'qrtz_fired_triggers', + 'managed': False, + }, + ), + migrations.CreateModel( + name='QrtzJobDetails', + fields=[ + ('sched_name', models.CharField(max_length=120, primary_key=True, serialize=False)), + ('job_name', models.CharField(max_length=200)), + ('job_group', models.CharField(max_length=200)), + ('description', models.CharField(blank=True, max_length=250, null=True)), + ('job_class_name', models.CharField(max_length=250)), + ('is_durable', models.BooleanField()), + ('is_nonconcurrent', models.BooleanField()), + ('is_update_data', models.BooleanField()), + ('requests_recovery', models.BooleanField()), + ('job_data', models.BinaryField(blank=True, null=True)), + ], + options={ + 'db_table': 'qrtz_job_details', + 'managed': False, + }, + ), + migrations.CreateModel( + name='QrtzLocks', + fields=[ + ('sched_name', models.CharField(max_length=120, primary_key=True, serialize=False)), + ('lock_name', models.CharField(max_length=40)), + ], + options={ + 'db_table': 'qrtz_locks', + 'managed': False, + }, + ), + migrations.CreateModel( + name='QrtzPausedTriggerGrps', + fields=[ + ('sched_name', models.CharField(max_length=120, primary_key=True, serialize=False)), + ('trigger_group', models.CharField(max_length=200)), + ], + options={ + 'db_table': 'qrtz_paused_trigger_grps', + 'managed': False, + }, + ), + migrations.CreateModel( + name='QrtzSchedulerState', + fields=[ + ('sched_name', models.CharField(max_length=120, primary_key=True, serialize=False)), + ('instance_name', models.CharField(max_length=200)), + ('last_checkin_time', models.BigIntegerField()), + ('checkin_interval', models.BigIntegerField()), + ], + options={ + 'db_table': 'qrtz_scheduler_state', + 'managed': False, + }, + ), + migrations.CreateModel( + name='Query', + fields=[ + ('query_hash', models.BinaryField(primary_key=True, serialize=False)), + ('average_execution_time', models.IntegerField()), + ('query', models.TextField(blank=True, null=True)), + ], + options={ + 'db_table': 'query', + 'managed': False, + }, + ), + migrations.CreateModel( + name='QueryCache', + fields=[ + ('query_hash', models.BinaryField(primary_key=True, serialize=False)), + ('updated_at', models.DateTimeField()), + ('results', models.BinaryField()), + ], + options={ + 'db_table': 'query_cache', + 'managed': False, + }, + ), + migrations.CreateModel( + name='QueryExecution', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('hash', models.BinaryField()), + ('started_at', models.DateTimeField()), + ('running_time', models.IntegerField()), + ('result_rows', models.IntegerField()), + ('native', models.BooleanField()), + ('context', models.CharField(blank=True, max_length=32, null=True)), + ('error', models.TextField(blank=True, null=True)), + ('executor_id', models.IntegerField(blank=True, null=True)), + ('card_id', models.IntegerField(blank=True, null=True)), + ('dashboard_id', models.IntegerField(blank=True, null=True)), + ('pulse_id', models.IntegerField(blank=True, null=True)), + ('database_id', models.IntegerField(blank=True, null=True)), + ('cache_hit', models.BooleanField(blank=True, null=True)), + ], + options={ + 'db_table': 'query_execution', + 'managed': False, + }, + ), + migrations.CreateModel( + name='ReportCard', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('name', models.CharField(max_length=254)), + ('description', models.TextField(blank=True, null=True)), + ('display', models.CharField(max_length=254)), + ('dataset_query', models.TextField()), + ('visualization_settings', models.TextField()), + ('query_type', models.CharField(blank=True, max_length=16, null=True)), + ('archived', models.BooleanField()), + ('public_uuid', models.CharField(blank=True, max_length=36, null=True, unique=True)), + ('enable_embedding', models.BooleanField()), + ('embedding_params', models.TextField(blank=True, null=True)), + ('cache_ttl', models.IntegerField(blank=True, null=True)), + ('result_metadata', models.TextField(blank=True, null=True)), + ('collection_position', models.SmallIntegerField(blank=True, null=True)), + ('dataset', models.BooleanField()), + ('entity_id', models.CharField(blank=True, max_length=21, null=True, unique=True)), + ('parameters', models.TextField(blank=True, null=True)), + ('parameter_mappings', models.TextField(blank=True, null=True)), + ('collection_preview', models.BooleanField()), + ], + options={ + 'db_table': 'report_card', + 'managed': False, + }, + ), + migrations.CreateModel( + name='ReportCardfavorite', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ], + options={ + 'db_table': 'report_cardfavorite', + 'managed': False, + }, + ), + migrations.CreateModel( + name='ReportDashboard', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('name', models.CharField(max_length=254)), + ('description', models.TextField(blank=True, null=True)), + ('parameters', models.TextField()), + ('points_of_interest', models.TextField(blank=True, null=True)), + ('caveats', models.TextField(blank=True, null=True)), + ('show_in_getting_started', models.BooleanField()), + ('public_uuid', models.CharField(blank=True, max_length=36, null=True, unique=True)), + ('enable_embedding', models.BooleanField()), + ('embedding_params', models.TextField(blank=True, null=True)), + ('archived', models.BooleanField()), + ('position', models.IntegerField(blank=True, null=True)), + ('collection_position', models.SmallIntegerField(blank=True, null=True)), + ('cache_ttl', models.IntegerField(blank=True, null=True)), + ('entity_id', models.CharField(blank=True, max_length=21, null=True, unique=True)), + ], + options={ + 'db_table': 'report_dashboard', + 'managed': False, + }, + ), + migrations.CreateModel( + name='ReportDashboardcard', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('size_x', models.IntegerField()), + ('size_y', models.IntegerField()), + ('row', models.IntegerField()), + ('col', models.IntegerField()), + ('parameter_mappings', models.TextField()), + ('visualization_settings', models.TextField()), + ('entity_id', models.CharField(blank=True, max_length=21, null=True, unique=True)), + ], + options={ + 'db_table': 'report_dashboardcard', + 'managed': False, + }, + ), + migrations.CreateModel( + name='Revision', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('model', models.CharField(max_length=16)), + ('model_id', models.IntegerField()), + ('timestamp', models.DateTimeField()), + ('object', models.TextField()), + ('is_reversion', models.BooleanField()), + ('is_creation', models.BooleanField()), + ('message', models.TextField(blank=True, null=True)), + ], + options={ + 'db_table': 'revision', + 'managed': False, + }, + ), + migrations.CreateModel( + name='Sandboxes', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('attribute_remappings', models.TextField(blank=True, null=True)), + ], + options={ + 'db_table': 'sandboxes', + 'managed': False, + }, + ), + migrations.CreateModel( + name='Secret', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('version', models.IntegerField()), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField(blank=True, null=True)), + ('name', models.CharField(max_length=254)), + ('kind', models.CharField(max_length=254)), + ('source', models.CharField(blank=True, max_length=254, null=True)), + ('value', models.BinaryField()), + ], + options={ + 'db_table': 'secret', + 'managed': False, + }, + ), + migrations.CreateModel( + name='Segment', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('name', models.CharField(max_length=254)), + ('description', models.TextField(blank=True, null=True)), + ('archived', models.BooleanField()), + ('definition', models.TextField()), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('points_of_interest', models.TextField(blank=True, null=True)), + ('caveats', models.TextField(blank=True, null=True)), + ('show_in_getting_started', models.BooleanField()), + ('entity_id', models.CharField(blank=True, max_length=21, null=True, unique=True)), + ], + options={ + 'db_table': 'segment', + 'managed': False, + }, + ), + migrations.CreateModel( + name='Setting', + fields=[ + ('key', models.CharField(max_length=254, primary_key=True, serialize=False)), + ('value', models.TextField()), + ], + options={ + 'db_table': 'setting', + 'managed': False, + }, + ), + migrations.CreateModel( + name='TaskHistory', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('task', models.CharField(max_length=254)), + ('db_id', models.IntegerField(blank=True, null=True)), + ('started_at', models.DateTimeField()), + ('ended_at', models.DateTimeField()), + ('duration', models.IntegerField()), + ('task_details', models.TextField(blank=True, null=True)), + ], + options={ + 'db_table': 'task_history', + 'managed': False, + }, + ), + migrations.CreateModel( + name='Timeline', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('name', models.CharField(max_length=255)), + ('description', models.CharField(blank=True, max_length=255, null=True)), + ('icon', models.CharField(max_length=128)), + ('archived', models.BooleanField()), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ('default', models.BooleanField()), + ('entity_id', models.CharField(blank=True, max_length=21, null=True, unique=True)), + ], + options={ + 'db_table': 'timeline', + 'managed': False, + }, + ), + migrations.CreateModel( + name='TimelineEvent', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('name', models.CharField(max_length=255)), + ('description', models.CharField(blank=True, max_length=255, null=True)), + ('timestamp', models.DateTimeField()), + ('time_matters', models.BooleanField()), + ('timezone', models.CharField(max_length=255)), + ('icon', models.CharField(max_length=128)), + ('archived', models.BooleanField()), + ('created_at', models.DateTimeField()), + ('updated_at', models.DateTimeField()), + ], + options={ + 'db_table': 'timeline_event', + 'managed': False, + }, + ), + migrations.CreateModel( + name='ViewLog', + fields=[ + ('id', models.IntegerField(primary_key=True, serialize=False)), + ('model', models.CharField(max_length=16)), + ('model_id', models.IntegerField()), + ('timestamp', models.DateTimeField()), + ('metadata', models.TextField(blank=True, null=True)), + ], + options={ + 'db_table': 'view_log', + 'managed': False, + }, + ), + migrations.CreateModel( + name='HttpAction', + fields=[ + ('action', models.OneToOneField(on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='metabase.action')), + ('template', models.TextField()), + ('response_handle', models.TextField(blank=True, null=True)), + ('error_handle', models.TextField(blank=True, null=True)), + ], + options={ + 'db_table': 'http_action', + 'managed': False, + }, + ), + migrations.CreateModel( + name='QrtzTriggers', + fields=[ + ('sched_name', models.OneToOneField(db_column='sched_name', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='metabase.qrtzjobdetails')), + ('trigger_name', models.CharField(max_length=200)), + ('trigger_group', models.CharField(max_length=200)), + ('job_name', models.CharField(max_length=200)), + ('job_group', models.CharField(max_length=200)), + ('description', models.CharField(blank=True, max_length=250, null=True)), + ('next_fire_time', models.BigIntegerField(blank=True, null=True)), + ('prev_fire_time', models.BigIntegerField(blank=True, null=True)), + ('priority', models.IntegerField(blank=True, null=True)), + ('trigger_state', models.CharField(max_length=16)), + ('trigger_type', models.CharField(max_length=8)), + ('start_time', models.BigIntegerField()), + ('end_time', models.BigIntegerField(blank=True, null=True)), + ('calendar_name', models.CharField(blank=True, max_length=200, null=True)), + ('misfire_instr', models.SmallIntegerField(blank=True, null=True)), + ('job_data', models.BinaryField(blank=True, null=True)), + ], + options={ + 'db_table': 'qrtz_triggers', + 'managed': False, + }, + ), + migrations.CreateModel( + name='QueryAction', + fields=[ + ('action', models.OneToOneField(on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='metabase.action')), + ('dataset_query', models.TextField()), + ], + options={ + 'db_table': 'query_action', + 'managed': False, + }, + ), + migrations.CreateModel( + name='QrtzBlobTriggers', + fields=[ + ('sched_name', models.OneToOneField(db_column='sched_name', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='metabase.qrtztriggers')), + ('trigger_name', models.CharField(max_length=200)), + ('trigger_group', models.CharField(max_length=200)), + ('blob_data', models.BinaryField(blank=True, null=True)), + ], + options={ + 'db_table': 'qrtz_blob_triggers', + 'managed': False, + }, + ), + migrations.CreateModel( + name='QrtzCronTriggers', + fields=[ + ('sched_name', models.OneToOneField(db_column='sched_name', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='metabase.qrtztriggers')), + ('trigger_name', models.CharField(max_length=200)), + ('trigger_group', models.CharField(max_length=200)), + ('cron_expression', models.CharField(max_length=120)), + ('time_zone_id', models.CharField(blank=True, max_length=80, null=True)), + ], + options={ + 'db_table': 'qrtz_cron_triggers', + 'managed': False, + }, + ), + migrations.CreateModel( + name='QrtzSimpleTriggers', + fields=[ + ('sched_name', models.OneToOneField(db_column='sched_name', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='metabase.qrtztriggers')), + ('trigger_name', models.CharField(max_length=200)), + ('trigger_group', models.CharField(max_length=200)), + ('repeat_count', models.BigIntegerField()), + ('repeat_interval', models.BigIntegerField()), + ('times_triggered', models.BigIntegerField()), + ], + options={ + 'db_table': 'qrtz_simple_triggers', + 'managed': False, + }, + ), + migrations.CreateModel( + name='QrtzSimpropTriggers', + fields=[ + ('sched_name', models.OneToOneField(db_column='sched_name', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='metabase.qrtztriggers')), + ('trigger_name', models.CharField(max_length=200)), + ('trigger_group', models.CharField(max_length=200)), + ('str_prop_1', models.CharField(blank=True, max_length=512, null=True)), + ('str_prop_2', models.CharField(blank=True, max_length=512, null=True)), + ('str_prop_3', models.CharField(blank=True, max_length=512, null=True)), + ('int_prop_1', models.IntegerField(blank=True, null=True)), + ('int_prop_2', models.IntegerField(blank=True, null=True)), + ('long_prop_1', models.BigIntegerField(blank=True, null=True)), + ('long_prop_2', models.BigIntegerField(blank=True, null=True)), + ('dec_prop_1', models.DecimalField(blank=True, decimal_places=4, max_digits=13, null=True)), + ('dec_prop_2', models.DecimalField(blank=True, decimal_places=4, max_digits=13, null=True)), + ('bool_prop_1', models.BooleanField(blank=True, null=True)), + ('bool_prop_2', models.BooleanField(blank=True, null=True)), + ], + options={ + 'db_table': 'qrtz_simprop_triggers', + 'managed': False, + }, + ), + ] diff --git a/django/metabase/migrations/__init__.py b/django/metabase/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/django/metabase/models.py b/django/metabase/models.py new file mode 100644 index 00000000..8b29c27e --- /dev/null +++ b/django/metabase/models.py @@ -0,0 +1,1130 @@ +# This is an auto-generated Django model module. +# You'll have to do the following manually to clean this up: +# * Rearrange models' order +# * Make sure each model has one field with primary_key=True +# * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior +# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table +# Feel free to rename the models, but don't rename db_table values or field names. +from django.db import models + + +class Action(models.Model): + id = models.IntegerField(primary_key=True) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + type = models.TextField() + model = models.ForeignKey('ReportCard', models.DO_NOTHING) + name = models.CharField(max_length=254) + description = models.TextField(blank=True, null=True) + parameters = models.TextField(blank=True, null=True) + parameter_mappings = models.TextField(blank=True, null=True) + visualization_settings = models.TextField(blank=True, null=True) + public_uuid = models.CharField(unique=True, max_length=36, blank=True, null=True) + made_public_by = models.ForeignKey('CoreUser', models.DO_NOTHING, related_name="made_public_actions", blank=True, null=True) + creator = models.ForeignKey('CoreUser', models.DO_NOTHING, related_name="created_actions", blank=True, null=True) + archived = models.BooleanField() + entity_id = models.CharField(unique=True, max_length=21, blank=True, null=True) + + class Meta: + managed = False + db_table = 'action' + + +class Activity(models.Model): + id = models.IntegerField(primary_key=True) + topic = models.CharField(max_length=32) + timestamp = models.DateTimeField() + user = models.ForeignKey('CoreUser', models.DO_NOTHING, blank=True, null=True) + model = models.CharField(max_length=16, blank=True, null=True) + model_id = models.IntegerField(blank=True, null=True) + database_id = models.IntegerField(blank=True, null=True) + table_id = models.IntegerField(blank=True, null=True) + custom_id = models.CharField(max_length=48, blank=True, null=True) + details = models.TextField() + + class Meta: + managed = False + db_table = 'activity' + + +class ApplicationPermissionsRevision(models.Model): + id = models.IntegerField(primary_key=True) + before = models.TextField() + after = models.TextField() + user = models.ForeignKey('CoreUser', models.DO_NOTHING) + created_at = models.DateTimeField() + remark = models.TextField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'application_permissions_revision' + + +class BookmarkOrdering(models.Model): + id = models.IntegerField(primary_key=True) + user = models.ForeignKey('CoreUser', models.DO_NOTHING) + type = models.CharField(max_length=255) + item_id = models.IntegerField() + ordering = models.IntegerField() + + class Meta: + managed = False + db_table = 'bookmark_ordering' + unique_together = (('user', 'type', 'item_id'), ('user', 'ordering'),) + + +class CardBookmark(models.Model): + id = models.IntegerField(primary_key=True) + user = models.ForeignKey('CoreUser', models.DO_NOTHING) + card = models.ForeignKey('ReportCard', models.DO_NOTHING) + created_at = models.DateTimeField() + + class Meta: + managed = False + db_table = 'card_bookmark' + unique_together = (('user', 'card'),) + + +class CardLabel(models.Model): + id = models.IntegerField(primary_key=True) + card = models.ForeignKey('ReportCard', models.DO_NOTHING) + label = models.ForeignKey('Label', models.DO_NOTHING) + + class Meta: + managed = False + db_table = 'card_label' + unique_together = (('card', 'label'),) + + +class Collection(models.Model): + id = models.IntegerField(primary_key=True) + name = models.TextField() + description = models.TextField(blank=True, null=True) + color = models.CharField(max_length=7) + archived = models.BooleanField() + location = models.CharField(max_length=254) + personal_owner = models.OneToOneField('CoreUser', models.DO_NOTHING, blank=True, null=True) + slug = models.CharField(max_length=254) + namespace = models.CharField(max_length=254, blank=True, null=True) + authority_level = models.CharField(max_length=255, blank=True, null=True) + entity_id = models.CharField(unique=True, max_length=21, blank=True, null=True) + created_at = models.DateTimeField() + + class Meta: + managed = False + db_table = 'collection' + + +class CollectionBookmark(models.Model): + id = models.IntegerField(primary_key=True) + user = models.ForeignKey('CoreUser', models.DO_NOTHING) + collection = models.ForeignKey(Collection, models.DO_NOTHING) + created_at = models.DateTimeField() + + class Meta: + managed = False + db_table = 'collection_bookmark' + unique_together = (('user', 'collection'),) + + +class CollectionPermissionGraphRevision(models.Model): + id = models.IntegerField(primary_key=True) + before = models.TextField() + after = models.TextField() + user = models.ForeignKey('CoreUser', models.DO_NOTHING) + created_at = models.DateTimeField() + remark = models.TextField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'collection_permission_graph_revision' + + +class ComputationJob(models.Model): + id = models.IntegerField(primary_key=True) + creator = models.ForeignKey('CoreUser', models.DO_NOTHING, blank=True, null=True) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + type = models.CharField(max_length=254) + status = models.CharField(max_length=254) + context = models.TextField(blank=True, null=True) + ended_at = models.DateTimeField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'computation_job' + + +class ComputationJobResult(models.Model): + id = models.IntegerField(primary_key=True) + job = models.ForeignKey(ComputationJob, models.DO_NOTHING) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + permanence = models.CharField(max_length=254) + payload = models.TextField() + + class Meta: + managed = False + db_table = 'computation_job_result' + + +class CoreSession(models.Model): + id = models.CharField(primary_key=True, max_length=254) + user = models.ForeignKey('CoreUser', models.DO_NOTHING) + created_at = models.DateTimeField() + anti_csrf_token = models.TextField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'core_session' + + +class CoreUser(models.Model): + id = models.IntegerField(primary_key=True) + email = models.TextField(unique=True) # This field type is a guess. + first_name = models.CharField(max_length=254, blank=True, null=True) + last_name = models.CharField(max_length=254, blank=True, null=True) + password = models.CharField(max_length=254, blank=True, null=True) + password_salt = models.CharField(max_length=254, blank=True, null=True) + date_joined = models.DateTimeField() + last_login = models.DateTimeField(blank=True, null=True) + is_superuser = models.BooleanField() + is_active = models.BooleanField() + reset_token = models.CharField(max_length=254, blank=True, null=True) + reset_triggered = models.BigIntegerField(blank=True, null=True) + is_qbnewb = models.BooleanField() + google_auth = models.BooleanField() + ldap_auth = models.BooleanField() + login_attributes = models.TextField(blank=True, null=True) + updated_at = models.DateTimeField(blank=True, null=True) + sso_source = models.CharField(max_length=254, blank=True, null=True) + locale = models.CharField(max_length=5, blank=True, null=True) + is_datasetnewb = models.BooleanField() + settings = models.TextField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'core_user' + + +class DashboardBookmark(models.Model): + id = models.IntegerField(primary_key=True) + user = models.ForeignKey(CoreUser, models.DO_NOTHING) + dashboard = models.ForeignKey('ReportDashboard', models.DO_NOTHING) + created_at = models.DateTimeField() + + class Meta: + managed = False + db_table = 'dashboard_bookmark' + unique_together = (('user', 'dashboard'),) + + +class DashboardFavorite(models.Model): + id = models.IntegerField(primary_key=True) + user = models.ForeignKey(CoreUser, models.DO_NOTHING) + dashboard = models.ForeignKey('ReportDashboard', models.DO_NOTHING) + + class Meta: + managed = False + db_table = 'dashboard_favorite' + unique_together = (('user', 'dashboard'),) + + +class DashboardcardSeries(models.Model): + id = models.IntegerField(primary_key=True) + dashboardcard = models.ForeignKey('ReportDashboardcard', models.DO_NOTHING) + card = models.ForeignKey('ReportCard', models.DO_NOTHING) + position = models.IntegerField() + + class Meta: + managed = False + db_table = 'dashboardcard_series' + + +class DataMigrations(models.Model): + id = models.CharField(primary_key=True, max_length=254) + timestamp = models.DateTimeField() + + class Meta: + managed = False + db_table = 'data_migrations' + + +# class Databasechangelog(models.Model): +# identifier = models.CharField(max_length=255) +# author = models.CharField(max_length=255) +# filename = models.CharField(max_length=255) +# dateexecuted = models.DateTimeField() +# orderexecuted = models.IntegerField() +# exectype = models.CharField(max_length=10) +# md5sum = models.CharField(max_length=35, blank=True, null=True) +# description = models.CharField(max_length=255, blank=True, null=True) +# comments = models.CharField(max_length=255, blank=True, null=True) +# tag = models.CharField(max_length=255, blank=True, null=True) +# liquibase = models.CharField(max_length=20, blank=True, null=True) +# contexts = models.CharField(max_length=255, blank=True, null=True) +# labels = models.CharField(max_length=255, blank=True, null=True) +# deployment_id = models.CharField(max_length=10, blank=True, null=True) + +# class Meta: +# managed = False +# db_table = 'databasechangelog' +# unique_together = (('identifier', 'author', 'filename'),) + + +# class Databasechangeloglock(models.Model): +# id = models.IntegerField(primary_key=True) +# locked = models.BooleanField() +# lockgranted = models.DateTimeField(blank=True, null=True) +# lockedby = models.CharField(max_length=255, blank=True, null=True) + +# class Meta: +# managed = False +# db_table = 'databasechangeloglock' + + +class Dependency(models.Model): + id = models.IntegerField(primary_key=True) + model = models.CharField(max_length=32) + model_id = models.IntegerField() + dependent_on_model = models.CharField(max_length=32) + dependent_on_id = models.IntegerField() + created_at = models.DateTimeField() + + class Meta: + managed = False + db_table = 'dependency' + + +class Dimension(models.Model): + id = models.IntegerField(primary_key=True) + field = models.OneToOneField('MetabaseField', models.DO_NOTHING, related_name="field_dimensions") + name = models.CharField(max_length=254) + type = models.CharField(max_length=254) + human_readable_field = models.ForeignKey('MetabaseField', models.DO_NOTHING, related_name="human_readable_field_dimensions", blank=True, null=True) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + entity_id = models.CharField(unique=True, max_length=21, blank=True, null=True) + + class Meta: + managed = False + db_table = 'dimension' + + +class HttpAction(models.Model): + action = models.OneToOneField(Action, models.DO_NOTHING, primary_key=True) + template = models.TextField() + response_handle = models.TextField(blank=True, null=True) + error_handle = models.TextField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'http_action' + + +class ImplicitAction(models.Model): + action = models.ForeignKey(Action, models.DO_NOTHING) + kind = models.TextField() + + class Meta: + managed = False + db_table = 'implicit_action' + + +class Label(models.Model): + id = models.IntegerField(primary_key=True) + name = models.CharField(max_length=254) + slug = models.CharField(unique=True, max_length=254) + icon = models.CharField(max_length=128, blank=True, null=True) + + class Meta: + managed = False + db_table = 'label' + + +class LoginHistory(models.Model): + id = models.IntegerField(primary_key=True) + timestamp = models.DateTimeField() + user = models.ForeignKey(CoreUser, models.DO_NOTHING) + session = models.ForeignKey(CoreSession, models.DO_NOTHING, blank=True, null=True) + device_id = models.CharField(max_length=36) + device_description = models.TextField() + ip_address = models.TextField() + + class Meta: + managed = False + db_table = 'login_history' + + +class MetabaseDatabase(models.Model): + id = models.IntegerField(primary_key=True) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + name = models.CharField(max_length=254) + description = models.TextField(blank=True, null=True) + details = models.TextField() + engine = models.CharField(max_length=254) + is_sample = models.BooleanField() + is_full_sync = models.BooleanField() + points_of_interest = models.TextField(blank=True, null=True) + caveats = models.TextField(blank=True, null=True) + metadata_sync_schedule = models.CharField(max_length=254) + cache_field_values_schedule = models.CharField(max_length=254) + timezone = models.CharField(max_length=254, blank=True, null=True) + is_on_demand = models.BooleanField() + options = models.TextField(blank=True, null=True) + auto_run_queries = models.BooleanField() + refingerprint = models.BooleanField(blank=True, null=True) + cache_ttl = models.IntegerField(blank=True, null=True) + initial_sync_status = models.CharField(max_length=32) + creator = models.ForeignKey(CoreUser, models.DO_NOTHING, blank=True, null=True) + settings = models.TextField(blank=True, null=True) + dbms_version = models.TextField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'metabase_database' + + +class MetabaseField(models.Model): + id = models.IntegerField(primary_key=True) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + name = models.CharField(max_length=254) + base_type = models.CharField(max_length=255) + semantic_type = models.CharField(max_length=255, blank=True, null=True) + active = models.BooleanField() + description = models.TextField(blank=True, null=True) + preview_display = models.BooleanField() + position = models.IntegerField() + table = models.ForeignKey('MetabaseTable', models.DO_NOTHING) + parent = models.ForeignKey('self', models.DO_NOTHING, blank=True, null=True) + display_name = models.CharField(max_length=254, blank=True, null=True) + visibility_type = models.CharField(max_length=32) + fk_target_field_id = models.IntegerField(blank=True, null=True) + last_analyzed = models.DateTimeField(blank=True, null=True) + points_of_interest = models.TextField(blank=True, null=True) + caveats = models.TextField(blank=True, null=True) + fingerprint = models.TextField(blank=True, null=True) + fingerprint_version = models.IntegerField() + database_type = models.TextField() + has_field_values = models.TextField(blank=True, null=True) + settings = models.TextField(blank=True, null=True) + database_position = models.IntegerField() + custom_position = models.IntegerField() + effective_type = models.CharField(max_length=255, blank=True, null=True) + coercion_strategy = models.CharField(max_length=255, blank=True, null=True) + nfc_path = models.CharField(max_length=254, blank=True, null=True) + database_required = models.BooleanField() + database_is_auto_increment = models.BooleanField() + + class Meta: + managed = False + db_table = 'metabase_field' + unique_together = (('table', 'parent', 'name'), ('table', 'name'),) + + +class MetabaseFieldvalues(models.Model): + id = models.IntegerField(primary_key=True) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + values = models.TextField(blank=True, null=True) + human_readable_values = models.TextField(blank=True, null=True) + field = models.ForeignKey(MetabaseField, models.DO_NOTHING) + has_more_values = models.BooleanField(blank=True, null=True) + type = models.CharField(max_length=32) + hash_key = models.TextField(blank=True, null=True) + last_used_at = models.DateTimeField() + + class Meta: + managed = False + db_table = 'metabase_fieldvalues' + + +class MetabaseTable(models.Model): + id = models.IntegerField(primary_key=True) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + name = models.CharField(max_length=254) + description = models.TextField(blank=True, null=True) + entity_type = models.CharField(max_length=254, blank=True, null=True) + active = models.BooleanField() + db = models.ForeignKey(MetabaseDatabase, models.DO_NOTHING) + display_name = models.CharField(max_length=254, blank=True, null=True) + visibility_type = models.CharField(max_length=254, blank=True, null=True) + schema = models.CharField(max_length=254, blank=True, null=True) + points_of_interest = models.TextField(blank=True, null=True) + caveats = models.TextField(blank=True, null=True) + show_in_getting_started = models.BooleanField() + field_order = models.CharField(max_length=254) + initial_sync_status = models.CharField(max_length=32) + + class Meta: + managed = False + db_table = 'metabase_table' + unique_together = (('db', 'schema', 'name'), ('db', 'name'),) + + +class Metric(models.Model): + id = models.IntegerField(primary_key=True) + table = models.ForeignKey(MetabaseTable, models.DO_NOTHING) + creator = models.ForeignKey(CoreUser, models.DO_NOTHING) + name = models.CharField(max_length=254) + description = models.TextField(blank=True, null=True) + archived = models.BooleanField() + definition = models.TextField() + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + points_of_interest = models.TextField(blank=True, null=True) + caveats = models.TextField(blank=True, null=True) + how_is_this_calculated = models.TextField(blank=True, null=True) + show_in_getting_started = models.BooleanField() + entity_id = models.CharField(unique=True, max_length=21, blank=True, null=True) + + class Meta: + managed = False + db_table = 'metric' + + +class MetricImportantField(models.Model): + id = models.IntegerField(primary_key=True) + metric = models.ForeignKey(Metric, models.DO_NOTHING) + field = models.ForeignKey(MetabaseField, models.DO_NOTHING) + + class Meta: + managed = False + db_table = 'metric_important_field' + unique_together = (('metric', 'field'),) + + +class ModerationReview(models.Model): + id = models.IntegerField(primary_key=True) + updated_at = models.DateTimeField() + created_at = models.DateTimeField() + status = models.CharField(max_length=255, blank=True, null=True) + text = models.TextField(blank=True, null=True) + moderated_item_id = models.IntegerField() + moderated_item_type = models.CharField(max_length=255) + moderator_id = models.IntegerField() + most_recent = models.BooleanField() + + class Meta: + managed = False + db_table = 'moderation_review' + + +class NativeQuerySnippet(models.Model): + id = models.IntegerField(primary_key=True) + name = models.CharField(unique=True, max_length=254) + description = models.TextField(blank=True, null=True) + content = models.TextField() + creator = models.ForeignKey(CoreUser, models.DO_NOTHING) + archived = models.BooleanField() + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + collection = models.ForeignKey(Collection, models.DO_NOTHING, blank=True, null=True) + entity_id = models.CharField(unique=True, max_length=21, blank=True, null=True) + + class Meta: + managed = False + db_table = 'native_query_snippet' + + +class ParameterCard(models.Model): + id = models.IntegerField(primary_key=True) + updated_at = models.DateTimeField() + created_at = models.DateTimeField() + card = models.ForeignKey('ReportCard', models.DO_NOTHING) + parameterized_object_type = models.CharField(max_length=32) + parameterized_object_id = models.IntegerField() + parameter_id = models.CharField(max_length=36) + entity_id = models.CharField(unique=True, max_length=21, blank=True, null=True) + + class Meta: + managed = False + db_table = 'parameter_card' + unique_together = (('parameterized_object_id', 'parameterized_object_type', 'parameter_id'),) + + +class Permissions(models.Model): + id = models.IntegerField(primary_key=True) + object = models.CharField(max_length=254) + group = models.ForeignKey('PermissionsGroup', models.DO_NOTHING) + + class Meta: + managed = False + db_table = 'permissions' + unique_together = (('group', 'object'),) + + +class PermissionsGroup(models.Model): + id = models.IntegerField(primary_key=True) + name = models.CharField(unique=True, max_length=255) + + class Meta: + managed = False + db_table = 'permissions_group' + + +class PermissionsGroupMembership(models.Model): + id = models.IntegerField(primary_key=True) + user = models.ForeignKey(CoreUser, models.DO_NOTHING) + group = models.ForeignKey(PermissionsGroup, models.DO_NOTHING) + is_group_manager = models.BooleanField() + + class Meta: + managed = False + db_table = 'permissions_group_membership' + unique_together = (('user', 'group'),) + + +class PermissionsRevision(models.Model): + id = models.IntegerField(primary_key=True) + before = models.TextField() + after = models.TextField() + user = models.ForeignKey(CoreUser, models.DO_NOTHING) + created_at = models.DateTimeField() + remark = models.TextField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'permissions_revision' + + +class PersistedInfo(models.Model): + id = models.IntegerField(primary_key=True) + database = models.ForeignKey(MetabaseDatabase, models.DO_NOTHING) + card = models.OneToOneField('ReportCard', models.DO_NOTHING) + question_slug = models.TextField() + table_name = models.TextField() + definition = models.TextField(blank=True, null=True) + query_hash = models.TextField(blank=True, null=True) + active = models.BooleanField() + state = models.TextField() + refresh_begin = models.DateTimeField() + refresh_end = models.DateTimeField(blank=True, null=True) + state_change_at = models.DateTimeField(blank=True, null=True) + error = models.TextField(blank=True, null=True) + created_at = models.DateTimeField() + creator = models.ForeignKey(CoreUser, models.DO_NOTHING, blank=True, null=True) + + class Meta: + managed = False + db_table = 'persisted_info' + + +class Pulse(models.Model): + id = models.IntegerField(primary_key=True) + creator = models.ForeignKey(CoreUser, models.DO_NOTHING) + name = models.CharField(max_length=254, blank=True, null=True) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + skip_if_empty = models.BooleanField() + alert_condition = models.CharField(max_length=254, blank=True, null=True) + alert_first_only = models.BooleanField(blank=True, null=True) + alert_above_goal = models.BooleanField(blank=True, null=True) + collection = models.ForeignKey(Collection, models.DO_NOTHING, blank=True, null=True) + collection_position = models.SmallIntegerField(blank=True, null=True) + archived = models.BooleanField(blank=True, null=True) + dashboard = models.ForeignKey('ReportDashboard', models.DO_NOTHING, blank=True, null=True) + parameters = models.TextField() + entity_id = models.CharField(unique=True, max_length=21, blank=True, null=True) + + class Meta: + managed = False + db_table = 'pulse' + + +class PulseCard(models.Model): + id = models.IntegerField(primary_key=True) + pulse = models.ForeignKey(Pulse, models.DO_NOTHING) + card = models.ForeignKey('ReportCard', models.DO_NOTHING) + position = models.IntegerField() + include_csv = models.BooleanField() + include_xls = models.BooleanField() + dashboard_card = models.ForeignKey('ReportDashboardcard', models.DO_NOTHING, blank=True, null=True) + entity_id = models.CharField(unique=True, max_length=21, blank=True, null=True) + + class Meta: + managed = False + db_table = 'pulse_card' + + +class PulseChannel(models.Model): + id = models.IntegerField(primary_key=True) + pulse = models.ForeignKey(Pulse, models.DO_NOTHING) + channel_type = models.CharField(max_length=32) + details = models.TextField() + schedule_type = models.CharField(max_length=32) + schedule_hour = models.IntegerField(blank=True, null=True) + schedule_day = models.CharField(max_length=64, blank=True, null=True) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + schedule_frame = models.CharField(max_length=32, blank=True, null=True) + enabled = models.BooleanField() + entity_id = models.CharField(unique=True, max_length=21, blank=True, null=True) + + class Meta: + managed = False + db_table = 'pulse_channel' + + +class PulseChannelRecipient(models.Model): + id = models.IntegerField(primary_key=True) + pulse_channel = models.ForeignKey(PulseChannel, models.DO_NOTHING) + user = models.ForeignKey(CoreUser, models.DO_NOTHING) + + class Meta: + managed = False + db_table = 'pulse_channel_recipient' + + +class QrtzBlobTriggers(models.Model): + sched_name = models.OneToOneField('QrtzTriggers', models.DO_NOTHING, db_column='sched_name', primary_key=True) + trigger_name = models.CharField(max_length=200) + trigger_group = models.CharField(max_length=200) + blob_data = models.BinaryField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'qrtz_blob_triggers' + unique_together = (('sched_name', 'trigger_name', 'trigger_group'),) + + +class QrtzCalendars(models.Model): + sched_name = models.CharField(primary_key=True, max_length=120) + calendar_name = models.CharField(max_length=200) + calendar = models.BinaryField() + + class Meta: + managed = False + db_table = 'qrtz_calendars' + unique_together = (('sched_name', 'calendar_name'),) + + +class QrtzCronTriggers(models.Model): + sched_name = models.OneToOneField('QrtzTriggers', models.DO_NOTHING, db_column='sched_name', primary_key=True) + trigger_name = models.CharField(max_length=200) + trigger_group = models.CharField(max_length=200) + cron_expression = models.CharField(max_length=120) + time_zone_id = models.CharField(max_length=80, blank=True, null=True) + + class Meta: + managed = False + db_table = 'qrtz_cron_triggers' + unique_together = (('sched_name', 'trigger_name', 'trigger_group'),) + + +class QrtzFiredTriggers(models.Model): + sched_name = models.CharField(primary_key=True, max_length=120) + entry_id = models.CharField(max_length=95) + trigger_name = models.CharField(max_length=200) + trigger_group = models.CharField(max_length=200) + instance_name = models.CharField(max_length=200) + fired_time = models.BigIntegerField() + sched_time = models.BigIntegerField(blank=True, null=True) + priority = models.IntegerField() + state = models.CharField(max_length=16) + job_name = models.CharField(max_length=200, blank=True, null=True) + job_group = models.CharField(max_length=200, blank=True, null=True) + is_nonconcurrent = models.BooleanField(blank=True, null=True) + requests_recovery = models.BooleanField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'qrtz_fired_triggers' + unique_together = (('sched_name', 'entry_id'),) + + +class QrtzJobDetails(models.Model): + sched_name = models.CharField(primary_key=True, max_length=120) + job_name = models.CharField(max_length=200) + job_group = models.CharField(max_length=200) + description = models.CharField(max_length=250, blank=True, null=True) + job_class_name = models.CharField(max_length=250) + is_durable = models.BooleanField() + is_nonconcurrent = models.BooleanField() + is_update_data = models.BooleanField() + requests_recovery = models.BooleanField() + job_data = models.BinaryField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'qrtz_job_details' + unique_together = (('sched_name', 'job_name', 'job_group'),) + + +class QrtzLocks(models.Model): + sched_name = models.CharField(primary_key=True, max_length=120) + lock_name = models.CharField(max_length=40) + + class Meta: + managed = False + db_table = 'qrtz_locks' + unique_together = (('sched_name', 'lock_name'),) + + +class QrtzPausedTriggerGrps(models.Model): + sched_name = models.CharField(primary_key=True, max_length=120) + trigger_group = models.CharField(max_length=200) + + class Meta: + managed = False + db_table = 'qrtz_paused_trigger_grps' + unique_together = (('sched_name', 'trigger_group'),) + + +class QrtzSchedulerState(models.Model): + sched_name = models.CharField(primary_key=True, max_length=120) + instance_name = models.CharField(max_length=200) + last_checkin_time = models.BigIntegerField() + checkin_interval = models.BigIntegerField() + + class Meta: + managed = False + db_table = 'qrtz_scheduler_state' + unique_together = (('sched_name', 'instance_name'),) + + +class QrtzSimpleTriggers(models.Model): + sched_name = models.OneToOneField('QrtzTriggers', models.DO_NOTHING, db_column='sched_name', primary_key=True) + trigger_name = models.CharField(max_length=200) + trigger_group = models.CharField(max_length=200) + repeat_count = models.BigIntegerField() + repeat_interval = models.BigIntegerField() + times_triggered = models.BigIntegerField() + + class Meta: + managed = False + db_table = 'qrtz_simple_triggers' + unique_together = (('sched_name', 'trigger_name', 'trigger_group'),) + + +class QrtzSimpropTriggers(models.Model): + sched_name = models.OneToOneField('QrtzTriggers', models.DO_NOTHING, db_column='sched_name', primary_key=True) + trigger_name = models.CharField(max_length=200) + trigger_group = models.CharField(max_length=200) + str_prop_1 = models.CharField(max_length=512, blank=True, null=True) + str_prop_2 = models.CharField(max_length=512, blank=True, null=True) + str_prop_3 = models.CharField(max_length=512, blank=True, null=True) + int_prop_1 = models.IntegerField(blank=True, null=True) + int_prop_2 = models.IntegerField(blank=True, null=True) + long_prop_1 = models.BigIntegerField(blank=True, null=True) + long_prop_2 = models.BigIntegerField(blank=True, null=True) + dec_prop_1 = models.DecimalField(max_digits=13, decimal_places=4, blank=True, null=True) + dec_prop_2 = models.DecimalField(max_digits=13, decimal_places=4, blank=True, null=True) + bool_prop_1 = models.BooleanField(blank=True, null=True) + bool_prop_2 = models.BooleanField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'qrtz_simprop_triggers' + unique_together = (('sched_name', 'trigger_name', 'trigger_group'),) + + +class QrtzTriggers(models.Model): + sched_name = models.OneToOneField(QrtzJobDetails, models.DO_NOTHING, db_column='sched_name', primary_key=True) + trigger_name = models.CharField(max_length=200) + trigger_group = models.CharField(max_length=200) + job_name = models.CharField(max_length=200) + job_group = models.CharField(max_length=200) + description = models.CharField(max_length=250, blank=True, null=True) + next_fire_time = models.BigIntegerField(blank=True, null=True) + prev_fire_time = models.BigIntegerField(blank=True, null=True) + priority = models.IntegerField(blank=True, null=True) + trigger_state = models.CharField(max_length=16) + trigger_type = models.CharField(max_length=8) + start_time = models.BigIntegerField() + end_time = models.BigIntegerField(blank=True, null=True) + calendar_name = models.CharField(max_length=200, blank=True, null=True) + misfire_instr = models.SmallIntegerField(blank=True, null=True) + job_data = models.BinaryField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'qrtz_triggers' + unique_together = (('sched_name', 'trigger_name', 'trigger_group'),) + + +class Query(models.Model): + query_hash = models.BinaryField(primary_key=True) + average_execution_time = models.IntegerField() + query = models.TextField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'query' + + +class QueryAction(models.Model): + action = models.OneToOneField(Action, models.DO_NOTHING, primary_key=True) + database = models.ForeignKey(MetabaseDatabase, models.DO_NOTHING) + dataset_query = models.TextField() + + class Meta: + managed = False + db_table = 'query_action' + + +class QueryCache(models.Model): + query_hash = models.BinaryField(primary_key=True) + updated_at = models.DateTimeField() + results = models.BinaryField() + + class Meta: + managed = False + db_table = 'query_cache' + + +class QueryExecution(models.Model): + id = models.IntegerField(primary_key=True) + hash = models.BinaryField() + started_at = models.DateTimeField() + running_time = models.IntegerField() + result_rows = models.IntegerField() + native = models.BooleanField() + context = models.CharField(max_length=32, blank=True, null=True) + error = models.TextField(blank=True, null=True) + executor_id = models.IntegerField(blank=True, null=True) + card_id = models.IntegerField(blank=True, null=True) + dashboard_id = models.IntegerField(blank=True, null=True) + pulse_id = models.IntegerField(blank=True, null=True) + database_id = models.IntegerField(blank=True, null=True) + cache_hit = models.BooleanField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'query_execution' + + +class ReportCard(models.Model): + id = models.IntegerField(primary_key=True) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + name = models.CharField(max_length=254) + description = models.TextField(blank=True, null=True) + display = models.CharField(max_length=254) + dataset_query = models.TextField() + visualization_settings = models.TextField() + creator = models.ForeignKey(CoreUser, models.DO_NOTHING, related_name="creator_report_cards") + database = models.ForeignKey(MetabaseDatabase, models.DO_NOTHING) + table = models.ForeignKey(MetabaseTable, models.DO_NOTHING, blank=True, null=True) + query_type = models.CharField(max_length=16, blank=True, null=True) + archived = models.BooleanField() + collection = models.ForeignKey(Collection, models.DO_NOTHING, blank=True, null=True) + public_uuid = models.CharField(unique=True, max_length=36, blank=True, null=True) + made_public_by = models.ForeignKey(CoreUser, models.DO_NOTHING, blank=True, null=True, related_name="made_public_by_report_cards") + enable_embedding = models.BooleanField() + embedding_params = models.TextField(blank=True, null=True) + cache_ttl = models.IntegerField(blank=True, null=True) + result_metadata = models.TextField(blank=True, null=True) + collection_position = models.SmallIntegerField(blank=True, null=True) + dataset = models.BooleanField() + entity_id = models.CharField(unique=True, max_length=21, blank=True, null=True) + parameters = models.TextField(blank=True, null=True) + parameter_mappings = models.TextField(blank=True, null=True) + collection_preview = models.BooleanField() + + class Meta: + managed = False + db_table = 'report_card' + + +class ReportCardfavorite(models.Model): + id = models.IntegerField(primary_key=True) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + card = models.ForeignKey(ReportCard, models.DO_NOTHING) + owner = models.ForeignKey(CoreUser, models.DO_NOTHING) + + class Meta: + managed = False + db_table = 'report_cardfavorite' + unique_together = (('card', 'owner'),) + + +class ReportDashboard(models.Model): + id = models.IntegerField(primary_key=True) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + name = models.CharField(max_length=254) + description = models.TextField(blank=True, null=True) + creator = models.ForeignKey(CoreUser, models.DO_NOTHING, related_name="creator_report_dashboards") + parameters = models.TextField() + points_of_interest = models.TextField(blank=True, null=True) + caveats = models.TextField(blank=True, null=True) + show_in_getting_started = models.BooleanField() + public_uuid = models.CharField(unique=True, max_length=36, blank=True, null=True) + made_public_by = models.ForeignKey(CoreUser, models.DO_NOTHING, related_name="made_public_by_report_dashboards", blank=True, null=True) + enable_embedding = models.BooleanField() + embedding_params = models.TextField(blank=True, null=True) + archived = models.BooleanField() + position = models.IntegerField(blank=True, null=True) + collection = models.ForeignKey(Collection, models.DO_NOTHING, blank=True, null=True) + collection_position = models.SmallIntegerField(blank=True, null=True) + cache_ttl = models.IntegerField(blank=True, null=True) + entity_id = models.CharField(unique=True, max_length=21, blank=True, null=True) + + class Meta: + managed = False + db_table = 'report_dashboard' + + +class ReportDashboardcard(models.Model): + id = models.IntegerField(primary_key=True) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + size_x = models.IntegerField() + size_y = models.IntegerField() + row = models.IntegerField() + col = models.IntegerField() + card = models.ForeignKey(ReportCard, models.DO_NOTHING, blank=True, null=True) + dashboard = models.ForeignKey(ReportDashboard, models.DO_NOTHING) + parameter_mappings = models.TextField() + visualization_settings = models.TextField() + entity_id = models.CharField(unique=True, max_length=21, blank=True, null=True) + action = models.ForeignKey(Action, models.DO_NOTHING, blank=True, null=True) + + class Meta: + managed = False + db_table = 'report_dashboardcard' + + +class Revision(models.Model): + id = models.IntegerField(primary_key=True) + model = models.CharField(max_length=16) + model_id = models.IntegerField() + user = models.ForeignKey(CoreUser, models.DO_NOTHING) + timestamp = models.DateTimeField() + object = models.TextField() + is_reversion = models.BooleanField() + is_creation = models.BooleanField() + message = models.TextField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'revision' + + +class Sandboxes(models.Model): + id = models.IntegerField(primary_key=True) + group = models.ForeignKey(PermissionsGroup, models.DO_NOTHING) + table = models.ForeignKey(MetabaseTable, models.DO_NOTHING) + card = models.ForeignKey(ReportCard, models.DO_NOTHING, blank=True, null=True) + attribute_remappings = models.TextField(blank=True, null=True) + permission = models.ForeignKey(Permissions, models.DO_NOTHING, blank=True, null=True) + + class Meta: + managed = False + db_table = 'sandboxes' + unique_together = (('table', 'group'),) + + +class Secret(models.Model): + id = models.IntegerField(primary_key=True) + version = models.IntegerField() + creator = models.ForeignKey(CoreUser, models.DO_NOTHING, blank=True, null=True) + created_at = models.DateTimeField() + updated_at = models.DateTimeField(blank=True, null=True) + name = models.CharField(max_length=254) + kind = models.CharField(max_length=254) + source = models.CharField(max_length=254, blank=True, null=True) + value = models.BinaryField() + + class Meta: + managed = False + db_table = 'secret' + unique_together = (('id', 'version'),) + + +class Segment(models.Model): + id = models.IntegerField(primary_key=True) + table = models.ForeignKey(MetabaseTable, models.DO_NOTHING) + creator = models.ForeignKey(CoreUser, models.DO_NOTHING) + name = models.CharField(max_length=254) + description = models.TextField(blank=True, null=True) + archived = models.BooleanField() + definition = models.TextField() + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + points_of_interest = models.TextField(blank=True, null=True) + caveats = models.TextField(blank=True, null=True) + show_in_getting_started = models.BooleanField() + entity_id = models.CharField(unique=True, max_length=21, blank=True, null=True) + + class Meta: + managed = False + db_table = 'segment' + + +class Setting(models.Model): + key = models.CharField(primary_key=True, max_length=254) + value = models.TextField() + + class Meta: + managed = False + db_table = 'setting' + + +class TaskHistory(models.Model): + id = models.IntegerField(primary_key=True) + task = models.CharField(max_length=254) + db_id = models.IntegerField(blank=True, null=True) + started_at = models.DateTimeField() + ended_at = models.DateTimeField() + duration = models.IntegerField() + task_details = models.TextField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'task_history' + + +class Timeline(models.Model): + id = models.IntegerField(primary_key=True) + name = models.CharField(max_length=255) + description = models.CharField(max_length=255, blank=True, null=True) + icon = models.CharField(max_length=128) + collection = models.ForeignKey(Collection, models.DO_NOTHING, blank=True, null=True) + archived = models.BooleanField() + creator = models.ForeignKey(CoreUser, models.DO_NOTHING) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + default = models.BooleanField() + entity_id = models.CharField(unique=True, max_length=21, blank=True, null=True) + + class Meta: + managed = False + db_table = 'timeline' + + +class TimelineEvent(models.Model): + id = models.IntegerField(primary_key=True) + timeline = models.ForeignKey(Timeline, models.DO_NOTHING) + name = models.CharField(max_length=255) + description = models.CharField(max_length=255, blank=True, null=True) + timestamp = models.DateTimeField() + time_matters = models.BooleanField() + timezone = models.CharField(max_length=255) + icon = models.CharField(max_length=128) + archived = models.BooleanField() + creator = models.ForeignKey(CoreUser, models.DO_NOTHING) + created_at = models.DateTimeField() + updated_at = models.DateTimeField() + + class Meta: + managed = False + db_table = 'timeline_event' + + +class ViewLog(models.Model): + id = models.IntegerField(primary_key=True) + user = models.ForeignKey(CoreUser, models.DO_NOTHING, blank=True, null=True) + model = models.CharField(max_length=16) + model_id = models.IntegerField() + timestamp = models.DateTimeField() + metadata = models.TextField(blank=True, null=True) + + class Meta: + managed = False + db_table = 'view_log' diff --git a/docker-compose.yml b/docker-compose.yml index 19371282..cd960728 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -19,6 +19,11 @@ services: image: metabase/metabase ports: - 3001:3000 + env_file: + - metabase.env + depends_on: + db: + condition: service_healthy minio: image: minio/minio hostname: "minio" diff --git a/metabase.env b/metabase.env new file mode 100644 index 00000000..ba77cd46 --- /dev/null +++ b/metabase.env @@ -0,0 +1,6 @@ +MB_DB_TYPE=postgres +MB_DB_DBNAME=metabase +MB_DB_PORT=5432 +MB_DB_USER=postgres +MB_DB_PASS=postgres +MB_DB_HOST=db \ No newline at end of file From c5c79286aba603bfc3c288dabc919470ef5a3d84 Mon Sep 17 00:00:00 2001 From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Date: Thu, 28 Mar 2024 13:37:50 -0700 Subject: [PATCH 097/152] update workflow (#262) --- .github/workflows/dev-ci.yaml | 2 +- .github/workflows/test-ci.yaml | 13 +++++-------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/.github/workflows/dev-ci.yaml b/.github/workflows/dev-ci.yaml index 1cad9e78..42bf9729 100644 --- a/.github/workflows/dev-ci.yaml +++ b/.github/workflows/dev-ci.yaml @@ -107,6 +107,6 @@ jobs: git config --global user.email "actions@github.com" git config --global user.name "GitHub Actions" git add cthub/values-dev.yaml - git commit -m "update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }}" + git commit -m "Update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }} on Dev" git push \ No newline at end of file diff --git a/.github/workflows/test-ci.yaml b/.github/workflows/test-ci.yaml index cb6f4a45..4c46f5af 100644 --- a/.github/workflows/test-ci.yaml +++ b/.github/workflows/test-ci.yaml @@ -54,17 +54,19 @@ jobs: - name: Build CTHUB Backend run: | - cd openshift/templates + cd openshift/templates/backend oc process -f ./backend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} sleep 5s oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 + oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - name: Build CTHUB Frontend run: | - cd openshift/templates + cd openshift/templates/frontend oc process -f ./frontend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} sleep 5s oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-frontend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 + oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} deploy: @@ -85,11 +87,6 @@ jobs: minimum-approvals: 1 issue-title: "CTHUB release-${{ env.VERSION }} Test Deployment" - - name: Tag CTHUB images to Test - run: | - oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-$PRE_RELEASE ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-$PRE_RELEASE - oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-$PRE_RELEASE ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-$PRE_RELEASE - - name: Checkout Manifest repository uses: actions/checkout@v4.1.1 with: @@ -112,6 +109,6 @@ jobs: git config --global user.email "actions@github.com" git config --global user.name "GitHub Actions" git add cthub/values-test.yaml - git commit -m "update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }}" + git commit -m "Update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }} on Test" git push \ No newline at end of file From b995aec8cc86e0978fa281a65c29aede5647e14b Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 28 Mar 2024 13:39:42 -0700 Subject: [PATCH 098/152] only apply on prod --- .github/workflows/build-release.yaml | 124 +++++++++++++-------------- 1 file changed, 62 insertions(+), 62 deletions(-) diff --git a/.github/workflows/build-release.yaml b/.github/workflows/build-release.yaml index 1d73d2dd..3e232aaf 100644 --- a/.github/workflows/build-release.yaml +++ b/.github/workflows/build-release.yaml @@ -52,74 +52,74 @@ jobs: npm install npm run build -- --pr=${{ env.PR_NUMBER }} --env=build - deploy-on-dev: - - name: Deploy CTHUB on Dev Environment - runs-on: ubuntu-latest - timeout-minutes: 60 - needs: build - - steps: - - - name: Check out repository - uses: actions/checkout@v3 - - - name: Log in to Openshift - uses: redhat-actions/oc-login@v1.3 - with: - openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} - openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} - insecure_skip_tls_verify: true - namespace: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev - - - name: Run BCDK deployment on CTHUB Dev environment - run: | - cd .pipeline - echo "Deploying CTHUB ${{ env.RELEASE_NAME }} on Dev" - npm install - npm run deploy -- --pr=${{ env.PR_NUMBER }} --env=dev - - deploy-on-test: - - name: Deploy CTHUB on Test Environment - runs-on: ubuntu-latest - timeout-minutes: 60 - needs: deploy-on-dev - - steps: - - - name: Check out repository - uses: actions/checkout@v3 - - - name: Log in to Openshift - uses: redhat-actions/oc-login@v1.3 - with: - openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} - openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} - insecure_skip_tls_verify: true - namespace: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-test - - - name: Ask for approval for CTHUB Test deployment - uses: trstringer/manual-approval@v1.6.0 - with: - secret: ${{ github.TOKEN }} - approvers: emi-hi,tim738745,kuanfandevops,JulianForeman - minimum-approvals: 1 - issue-title: "CTHUB ${{ env.RELEASE_NAME }} Test Deployment" - - - name: Run BCDK deployment on CTHUB Test environment - run: | - cd .pipeline - echo "Deploying CTHUB ${{ env.RELEASE_NAME }} on Test" - npm install - npm run deploy -- --pr=${{ env.PR_NUMBER }} --env=test + # deploy-on-dev: + + # name: Deploy CTHUB on Dev Environment + # runs-on: ubuntu-latest + # timeout-minutes: 60 + # needs: build + + # steps: + + # - name: Check out repository + # uses: actions/checkout@v3 + + # - name: Log in to Openshift + # uses: redhat-actions/oc-login@v1.3 + # with: + # openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + # openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + # insecure_skip_tls_verify: true + # namespace: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev + + # - name: Run BCDK deployment on CTHUB Dev environment + # run: | + # cd .pipeline + # echo "Deploying CTHUB ${{ env.RELEASE_NAME }} on Dev" + # npm install + # npm run deploy -- --pr=${{ env.PR_NUMBER }} --env=dev + + # deploy-on-test: + + # name: Deploy CTHUB on Test Environment + # runs-on: ubuntu-latest + # timeout-minutes: 60 + # needs: deploy-on-dev + + # steps: + + # - name: Check out repository + # uses: actions/checkout@v3 + + # - name: Log in to Openshift + # uses: redhat-actions/oc-login@v1.3 + # with: + # openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + # openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + # insecure_skip_tls_verify: true + # namespace: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-test + + # - name: Ask for approval for CTHUB Test deployment + # uses: trstringer/manual-approval@v1.6.0 + # with: + # secret: ${{ github.TOKEN }} + # approvers: emi-hi,tim738745,kuanfandevops,JulianForeman + # minimum-approvals: 1 + # issue-title: "CTHUB ${{ env.RELEASE_NAME }} Test Deployment" + + # - name: Run BCDK deployment on CTHUB Test environment + # run: | + # cd .pipeline + # echo "Deploying CTHUB ${{ env.RELEASE_NAME }} on Test" + # npm install + # npm run deploy -- --pr=${{ env.PR_NUMBER }} --env=test deploy-on-prod: name: Deploy CTHUB on Prod Environment runs-on: ubuntu-latest timeout-minutes: 60 - needs: deploy-on-test + # needs: deploy-on-test steps: From 167afa035581fecc3faf79f7bd929c5465e6c503 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 28 Mar 2024 13:53:19 -0700 Subject: [PATCH 099/152] update frontend build --- .github/workflows/test-ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-ci.yaml b/.github/workflows/test-ci.yaml index 4c46f5af..e277a94f 100644 --- a/.github/workflows/test-ci.yaml +++ b/.github/workflows/test-ci.yaml @@ -63,7 +63,7 @@ jobs: - name: Build CTHUB Frontend run: | cd openshift/templates/frontend - oc process -f ./frontend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + oc process -f ./frontend-bc-docker.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} sleep 5s oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-frontend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} From 4bb115fa7a84482cabca30f18c886ff34200c372 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 28 Mar 2024 14:11:26 -0700 Subject: [PATCH 100/152] tag after test approval --- .github/workflows/test-ci.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-ci.yaml b/.github/workflows/test-ci.yaml index e277a94f..ddf55013 100644 --- a/.github/workflows/test-ci.yaml +++ b/.github/workflows/test-ci.yaml @@ -58,7 +58,6 @@ jobs: oc process -f ./backend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} sleep 5s oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 - oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - name: Build CTHUB Frontend run: | @@ -66,7 +65,6 @@ jobs: oc process -f ./frontend-bc-docker.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} sleep 5s oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-frontend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 - oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} deploy: @@ -87,6 +85,11 @@ jobs: minimum-approvals: 1 issue-title: "CTHUB release-${{ env.VERSION }} Test Deployment" + - name: Tag CTHUB images to Test + run: | + oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-$PRE_RELEASE ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-$PRE_RELEASE + oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-$PRE_RELEASE ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-$PRE_RELEASE + - name: Checkout Manifest repository uses: actions/checkout@v4.1.1 with: From 73813fc7f93ab1f90f146c1c1627d2de22cfbc97 Mon Sep 17 00:00:00 2001 From: JulianForeman <71847719+JulianForeman@users.noreply.github.com> Date: Thu, 28 Mar 2024 15:00:35 -0700 Subject: [PATCH 101/152] Feat: Uploader & Error Refactor #166 (#188) * Uploader refactor to just use 1 function for all services. WIP working state for arc_project_tracking * New service files * Moving constants to their own file, changing dictionary to enums, seperating import function into function steps, updating logic to support new data formats, added a couple helper functions * Cleaning up logic, adding working upload state for data fleets & charger rebates * Working state for all uploader services refactored to use general import function * Fixing field type mapping and adding type casts & error exceptions for field types * Individual cell error handling with response message formatting and code cleanup * Making uploaded data store the idir of who uploaded it & updating suvi model * Updating README with instructions for creating a user account locally. * Excluding a list of datasets so they can be re-implemented as we get updated models for them * Moving dataset config to constants, updating constants import to avoid namespace clashing, updated how alerts are displayed to use existing functionality --- django/README.md | 13 +- django/api/constants.py | 591 ++++++++++++++++++ .../commands/import_arc_project_tracking.py | 4 +- .../api/migrations/0020_auto_20240311_2136.py | 33 + .../api/migrations/0021_auto_20240326_2152.py | 18 + django/api/models/data_fleets.py | 8 +- .../speciality_use_vehicle_incentives.py | 2 +- django/api/services/arc_project_tracking.py | 75 --- django/api/services/charger_rebates.py | 51 -- django/api/services/data_fleets.py | 83 --- .../services/datasheet_template_generator.py | 213 ++----- django/api/services/hydrogen_fleets.py | 58 -- django/api/services/hydrogen_fueling.py | 64 -- django/api/services/public_charging.py | 53 -- django/api/services/scrap_it.py | 41 -- .../speciality_use_vehicle_incentives.py | 67 -- django/api/services/spreadsheet_uploader.py | 170 +++++ .../api/services/spreadsheet_uploader_prep.py | 89 +++ django/api/viewsets/upload.py | 144 ++--- frontend/src/uploads/UploadContainer.js | 81 +-- 20 files changed, 1046 insertions(+), 812 deletions(-) create mode 100644 django/api/constants.py create mode 100644 django/api/migrations/0020_auto_20240311_2136.py create mode 100644 django/api/migrations/0021_auto_20240326_2152.py delete mode 100644 django/api/services/arc_project_tracking.py delete mode 100644 django/api/services/charger_rebates.py delete mode 100644 django/api/services/data_fleets.py delete mode 100644 django/api/services/hydrogen_fleets.py delete mode 100644 django/api/services/hydrogen_fueling.py delete mode 100644 django/api/services/public_charging.py delete mode 100644 django/api/services/scrap_it.py delete mode 100644 django/api/services/speciality_use_vehicle_incentives.py create mode 100644 django/api/services/spreadsheet_uploader.py create mode 100644 django/api/services/spreadsheet_uploader_prep.py diff --git a/django/README.md b/django/README.md index 93b83f3f..f2da1074 100644 --- a/django/README.md +++ b/django/README.md @@ -47,4 +47,15 @@ use the same as above to load fixtures docker-compose exec api bash python manage.py loaddata api/fixtures/0001_add_ldv_rebates_datasets.json -etc \ No newline at end of file +etc + +## Creating User Account +After running all the fixtures to create the dataset dropdown list and the user_permissions table. +You will need to run a few SQL commands to allow your account to upload documents locally. + +insert into public.user (create_user, idir) values ('test', 'IDIR'); +insert into user_permission (create_user, permission_id, user_id) values ('test', 1, 1); +insert into user_permission (create_user, permission_id, user_id) values ('test', 2, 1); + +Only after running these will you be able to upload into CTHUB locally. +If you're encountering errors make sure you've run the fixture for creating the user_permission table and that you're not missing any fields in SQL. \ No newline at end of file diff --git a/django/api/constants.py b/django/api/constants.py new file mode 100644 index 00000000..d461598c --- /dev/null +++ b/django/api/constants.py @@ -0,0 +1,591 @@ +import datetime +from decimal import Decimal +from enum import Enum + +import pandas as pd + +from api.models.arc_project_tracking import ARCProjectTracking +from api.models.charger_rebates import ChargerRebates +from api.models.data_fleets import DataFleets +from api.models.hydrogen_fleets import HydrogenFleets +from api.models.hydrogen_fueling import HydrogrenFueling +from api.models.ldv_rebates import LdvRebates +from api.models.public_charging import PublicCharging +from api.models.scrap_it import ScrapIt +from api.models.speciality_use_vehicle_incentives import SpecialityUseVehicleIncentives +from api.services.spreadsheet_uploader_prep import prepare_arc_project_tracking, prepare_hydrogen_fleets, prepare_hydrogen_fueling, prepare_ldv_rebates, prepare_public_charging, prepare_scrap_it, prepare_speciality_use_vehicle_incentives + + + +class ARCProjectTrackingColumns(Enum): + FUNDING_CALL = "Funding Call" + PROPONENT = "Proponent" + REF_NUMBER = "Ref #" + PROJECT_TITLE = "Project Title" + PRIMARY_LOCATION = "Primary Location" + STATUS = "Status" + ARC_FUNDING = "ARC Funding" + FUNDS_ISSUED = "Funds Issued" + START_DATE = "Start Date" + COMPLETION_DATE = "Completion Date" + TOTAL_PROJECT_VALUE = "Total Project Value" + ZEV_SUB_SECTOR = "ZEV Sub-Sector" + ON_ROAD_OFF_ROAD = "On-Road/Off-Road" + FUEL_TYPE = "Fuel Type" + PUBLICLY_ANNOUNCED = "Publicly Announced" + +class ArcProjectTrackingColumnMapping(Enum): + funding_call = "Funding Call" + proponent = "Proponent" + reference_number = "Ref #" + project_title = "Project Title" + primary_location = "Primary Location" + status = "Status" + arc_funding = "ARC Funding" + funds_issued = "Funds Issued" + start_date = "Start Date" + completion_date = "Completion Date" + total_project_value = "Total Project Value" + zev_sub_sector = "ZEV Sub-Sector" + on_road_off_road = "On-Road/Off-Road" + fuel_type = "Fuel Type" + publicly_announced = "Publicly Announced" + +class EVChargingRebatesColumns(Enum): + ORGANIZATION = "Organization" + REGION = "Region" + CITY = "City" + ADDRESS = "Address" + NUMBER_OF_FAST_CHARGING_STATIONS = "Number of Fast Charging Stations" + IN_SERVICE_DATE = "In service date" + EXPECTED_IN_SERVICE_DATE = "Expected in service date" + ANNOUNCED = "Announced?" + BC_EMPR_FUNDING_ANTICIPATED = "B.C. (EMPR) Funding Anticipated (Max $25,000 per station, excludes MOTI stations) (Not all funding paid out yet as depends on station completion)" + NOTES = "Notes" + +class EVChargingRebatesColumnMapping(Enum): + organization = "Organization" + region = "Region" + city = "City" + address = "Address" + number_of_fast_charging_stations = "Number of Fast Charging Stations" + in_service_date = "In service date" + expected_in_service_date = "Expected in service date" + announced = "Announced?" + rebate_paid = "B.C. (EMPR) Funding Anticipated (Max $25,000 per station, excludes MOTI stations) (Not all funding paid out yet as depends on station completion)" + notes = "Notes" + + +class DataFleetsColumns(Enum): + CURRENT_STAGE = "Current Stage" + REBATE_VALUE = "Rebate Value" + LEGAL_NAME_OF_ORGANIZATION = "Legal Name of your Organization/Fleet: " + BUSINESS_CATEGORY = "Your business Category" + CITY = "City:*" + POSTAL_CODE = "Postal Code:*" + APPLICANT_FIRST_NAME = "Applicant First Name" + APPLICANT_LAST_NAME = "Applicant Last Name" + EMAIL_ADDRESS = "Email Address:*" + FLEET_SIZE_ALL = "Fleet Size All" + FLEET_SIZE_LIGHT_DUTY = "Fleet Size Light-duty" + TOTAL_NUMBER_OF_EVS = "Total number of EVs?" + TOTAL_NUMBER_OF_LIGHT_DUTY_EVS = "Total number of light-duty EVs?" + PHEVS = "PHEV's" + EVSES = "EVSE's?" + AVERAGE_DAILY_TRAVEL_DISTANCE = "Average daily travel distance?" + WHICH_COMPONENT_ARE_YOU_APPLYING_FOR = "Which component are you applying for?*" + ESTIMATED_COST = "Estimated cost" + WHICH_TYPE_OF_CHARGER_ARE_YOU_INSTALLING = "Which type of charger are you installing?" + HOW_MANY_LEVEL_2_CHARGING_STATIONS = "How many Level 2 Charging Stations are you applying for" + HOW_MANY_LEVEL_3_DC_FAST_CHARGING_STATIONS = "How many Level 3/DC Fast Charging Stations are you applying for" + APPLICATION_FORM_FLEETS_COMPLETION_DATE_TIME = '"Application Form Fleets" completion date/time' + PRE_APPROVAL_DATE = "Pre-Approval Date" + DEADLINE = "Deadline" + APPLICATION_NUMBER = "Application Number" + POTENTIAL_REBATE = "Potential Rebate" + +class DataFleetsColumnMapping(Enum): + current_stage = "Current Stage" + rebate_value = "Rebate Value" + legal_name_of_organization_fleet = "Legal Name of your Organization/Fleet: " + business_category = "Your business Category" + city = "City:*" + postal_code = "Postal Code:*" + applicant_first_name = "Applicant First Name" + applicant_last_name = "Applicant Last Name" + email_address = "Email Address:*" + fleet_size_all = "Fleet Size All" + fleet_size_light_duty = "Fleet Size Light-duty" + total_number_of_evs = "Total number of EVs?" + total_number_of_light_duty_evs = "Total number of light-duty EVs?" + phev = "PHEV's" + evse = "EVSE's?" + average_daily_travel_distance = "Average daily travel distance?" + component_being_applied_for = "Which component are you applying for?*" + estimated_cost = "Estimated cost" + type_of_charger_being_installed = "Which type of charger are you installing?" + number_of_level_2_charging_stations_being_applied_for = "How many Level 2 Charging Stations are you applying for" + number_of_level_3_dc_fast_charging_stations_being_applied_for = "How many Level 3/DC Fast Charging Stations are you applying for" + application_form_fleets_completion_date_time = '"Application Form Fleets" completion date/time' + pre_approval_date = "Pre-Approval Date" + deadline = "Deadline" + application_number = "Application Number" + potential_rebate = "Potential Rebate" + +class HydrogenFleetsColumns(Enum): + APPLICATION_NUMBER = "Application #" + FLEET_NUMBER = "Fleet #" + APPLICATION_DATE = "Application Date" + ORGANIZATION_NAME = "Organization Name" + FLEET_NAME = "Fleet Name" + STREET_ADDRESS = "Street Address" + CITY = "City" + POSTAL_CODE = "Postal Code" + VIN = "VIN" + MAKE = "Make" + MODEL = "Model" + YEAR = "Year" + PURCHASE_DATE = "Purchase Date" + DEALER_NAME = "Dealer Name" + REBATE_AMOUNT = "Rebate Amount" + +class HydrogenFleetsColumnMapping(Enum): + application_number = "Application #" + fleet_number = "Fleet #" + application_date = "Application Date" + organization_name = "Organization Name" + fleet_name = "Fleet Name" + street_address = "Street Address" + city = "City" + postal_code = "Postal Code" + vin = "VIN" + make = "Make" + model = "Model" + year = "Year" + purchase_date = "Purchase Date" + dealer_name = "Dealer Name" + rebate_amount = "Rebate Amount" + +class HydrogenFuelingColumns(Enum): + STATION_NUMBER = "Station Number" + RFP_CLOSE_DATE = "RFP Close Date" + STATION_NAME = "Station Name" + STREET_ADDRESS = "Street Address" + CITY = "City" + POSTAL_CODE = "Postal Code" + PROPONENT = "Proponent" + LOCATION_PARTNER = "Location Partner (Shell/7-11/etc.)" + CAPITAL_FUNDING_AWARDED = "Capital Funding Awarded" + OM_FUNDING_POTENTIAL = "O&M Funding Potential" + DAILY_CAPACITY = "Daily Capacity (kg/day)" + BAR_700 = "700 Bar" + BAR_350 = "350 Bar" + STATUS = "Status" + NUMBER_OF_FUELLING_POSITIONS = "# of Fuelling Positions" + OPERATIONAL_DATE = "Operational Date " + OPENING_DATE = "Opening Date" + TOTAL_CAPITAL_COST = "Total Capital Cost" + +class HydrogenFuelingColumnMapping(Enum): + station_number = "Station Number" + rfp_close_date = "RFP Close Date" + station_name = "Station Name" + street_address = "Street Address" + city = "City" + postal_code = "Postal Code" + proponent = "Proponent" + location_partner = "Location Partner (Shell/7-11/etc.)" + capital_funding_awarded = "Capital Funding Awarded" + om_funding_potential = "O&M Funding Potential" + daily_capacity = "Daily Capacity (kg/day)" + bar_700 = "700 Bar" + bar_350 = "350 Bar" + status = "Status" + number_of_fueling_positions = "# of Fuelling Positions" + operational_date = "Operational Date " + opening_date = "Opening Date" + total_capital_cost = "Total Capital Cost" + +class LDVRebatesColumns(Enum): + CASL_CONSENT = "CASL Consent" + DATE_APPROVED = "DATE APPROVED" + SUBMISSION_ID = "Submission ID" + SUBMISSION_DATE = "Submission Date" + COMPANY_NAME = "Company Name" + CITY = "City" + APPLICANT_NAME = "Applicant Name" + APPLICANT_ADDRESS_1 = "Applicant Address 1" + APPLICANT_ADDRESS_2 = "Applicant Address 2" + APPLICANT_CITY = "Applicant City" + APPLICANT_POSTAL_CODE = "Applicant Postal Code" + APPLICANT_PHONE = "Applicant Phone" + APPLICANT_EMAIL = "Applicant Email" + APPLICANT_USE = "Applicant Use" + APPLICANT_TYPE = "Applicant Type" + BUSINESS_NAME = "Business Name" + BUSINESS_NUMBER = "Business Number" + DRIVERS_LICENSE = "Drivers License" + PROVINCE = "Province" + MSRP = "MSRP" + OTHER_INCENTIVES = "Other Incentives" + DOCUMENT_TYPE = "Document Type" + VEHICLE = "Vehicle" + INCENTIVE_AMOUNT = "Incentive Amount" + VIN = "VIN#" + DELIVERED = "Delivered" + CONSENT_TO_CONTACT = "Consent to Contact" + +class LdvRebatesColumnMapping(Enum): + casl_consent = "CASL Consent" + date_approved = "DATE APPROVED" + submission_id = "Submission ID" + submission_date = "Submission Date" + company_name = "Company Name" + city = "City" + applicant_name = "Applicant Name" + applicant_address_1 = "Applicant Address 1" + applicant_address_2 = "Applicant Address 2" + applicant_city = "Applicant City" + applicant_postal_code = "Applicant Postal Code" + applicant_phone = "Applicant Phone" + applicant_email = "Applicant Email" + applicant_use = "Applicant Use" + applicant_type = "Applicant Type" + business_name = "Business Name" + business_number = "Business Number" + drivers_license = "Drivers License" + province = "Province" + msrp = "MSRP" + other_incentives = "Other Incentives" + document_type = "Document Type" + vehicle = "Vehicle" + incentive_amount = "Incentive Amount" + vin = "VIN#" + delivered = "Delivered" + consent_to_contact = "Consent to Contact" + + +class PublicChargingColumns(Enum): + APPLICANT_NAME = "Applicant Name" + ADDRESS = "Address" + CHARGING_STATION_INFO = "Charging Station Info" + GT_25KW_LT_50KW = ">25kW; <50kW" + GT_50KW_LT_100KW = ">50kW; <100kW" + GT_100KW = ">100kW " + LEVEL_2_UNITS_STATIONS = "Level 2 (# of units/stations)" + LEVEL_2_PORTS = "Level 2 (# of ports)" + ESTIMATED_BUDGET = "Estimated Budget" + ADJUSTED_REBATE = "Adjusted Rebate " + REBATE_PERCENT_MAXIMUM = "Rebate % Maximum " + PILOT_PROJECT = "Pilot Project (Y/N)" + REGION = "Region" + ORGANIZATION_TYPE = "Organization Type" + PROJECT_STATUS = "Project Status" + REVIEW_NUMBER = "Review Number" + PAID_OUT_REBATE_AMOUNT = "Paid out rebate amount" + +class PublicChargingColumnMapping(Enum): + applicant_name = "Applicant Name" + address = "Address" + charging_station_info = "Charging Station Info" + between_25kw_and_50kw = ">25kW; <50kW" + between_50kw_and_100kw = ">50kW; <100kW" + over_100kw = ">100kW " + level_2_units = "Level 2 (# of units/stations)" + level_2_ports = "Level 2 (# of ports)" + estimated_budget = "Estimated Budget" + adjusted_rebate = "Adjusted Rebate " + rebate_percent_maximum = "Rebate % Maximum " + pilot_project = "Pilot Project (Y/N)" + region = "Region" + organization_type = "Organization Type" + project_status = "Project Status" + review_number = "Review Number" + rebate_paid = "Paid out rebate amount" + +class ScrapItColumns(Enum): + APPROVAL_NUM = "Approval Num" + APP_RECVD_DATE = "App Recv'd Date" + COMPLETION_DATE = "Completion Date" + POSTAL_CODE = "Postal Code" + VIN = "VIN" + APP_CITY_FUEL = "App City Fuel" + INCENTIVE_TYPE = "Incentive Type" + INCENTIVE_COST = "Incentive Cost" + CHEQUE_NUMBER = "Cheque #" + BUDGET_CODE = "Budget Code" + SCRAP_DATE = "Scrap Date" + +class ScrapItColumnMapping(Enum): + approval_number = "Approval Num" + application_received_date = "App Recv'd Date" + completion_date = "Completion Date" + postal_code = "Postal Code" + vin = "VIN" + application_city_fuel = "App City Fuel" + incentive_type = "Incentive Type" + incentive_cost = "Incentive Cost" + cheque_number = "Cheque #" + budget_code = "Budget Code" + scrap_date = "Scrap Date" + +class SpecialityUseVehicleIncentiveProgramColumns(Enum): + APPROVALS = "Approvals" + DATE = "Date" + FLEET = "Fleet" + APPLICANT_NAME = "Applicant Name" + MAX_INCENTIVE_AMOUNT_REQUESTED = "Max Incentive Amount Requested" + CATEGORY = "Category" + INDIVIDUAL = "Individual" + INCENTIVE_PAID = "Incentive Paid" + TOTAL_PURCHASE_PRICE_PRE_TAX = "Total Purchase Price (pre-tax)" + MANUFACTURER = "Manufacturer" + MODEL = "Model" + +class SpecialityUseVehicleIncentivesColumnMapping(Enum): + approvals = "Approvals" + date = "Date" + applicant_name = "Applicant Name" + max_incentive_amount_requested = "Max Incentive Amount Requested" + category = "Category" + applicant_type = "Applicant Type" + incentive_paid = "Incentive Paid" + total_purchase_price = "Total Purchase Price (pre-tax)" + manufacturer = "Manufacturer" + model = "Model" + +FIELD_TYPES = { + 'ARC Project Tracking': { + "funding_call": str, + "proponent": str, + "reference_number": str, + "project_title": str, + "primary_location": str, + "status": str, + "arc_funding": int, + "funds_issued": int, + "start_date": str, + "completion_date": str, + "total_project_value": int, + "zev_sub_sector": str, + "on_road_off_road": str, + "fuel_type": str, + "publicly_announced": bool, + }, + 'EV Charging Rebates': { + "organization": str, + "region": str, + "city": str, + "address": str, + "number_of_fast_charging_stations": int, + "in_service_date": str, + "expected_in_service_date": str, + "announced": str, + "rebate_paid": float, + "notes": str, + }, + 'Data Fleets': { + "current_stage": str, + "rebate_value": str, + "legal_name_of_organization_fleet": str, + "business_category": str, + "city": str, + "postal_code": str, + "applicant_first_name": str, + "applicant_last_name": str, + "email_address": str, + "fleet_size_all": int, + "fleet_size_light_duty": int, + "total_number_of_evs": int, + "total_number_of_light_duty_evs": int, + "phev": int, + "evse": int, + "average_daily_travel_distance": str, + "component_being_applied_for": str, + "estimated_cost": str, + "type_of_charger_being_installed": str, + "number_of_level_2_charging_stations_being_applied_for": int, + "number_of_level_3_dc_fast_charging_stations_being_applied_for": int, + "application_form_fleets_completion_date_time": str, + "pre_approval_date": str, + "deadline": str, + "application_number": str, + "potential_rebate": str, + }, + 'Hydrogen Fleets': { + "application_number": int, + "fleet_number": int, + "application_date": str, + "organization_name": str, + "fleet_name": str, + "street_address": str, + "city": str, + "postal_code": str, + "vin": str, + "make": str, + "model": str, + "year": str, + "purchase_date": str, + "dealer_name": str, + "rebate_amount": str + }, + 'Hydrogen Fueling': { + "station_number": int, + "rfp_close_date": datetime.date, + "station_name": str, + "street_address": str, + "city": str, + "postal_code": str, + "proponent": str, + "location_partner": str, + "capital_funding_awarded": Decimal, + "om_funding_potential": Decimal, + "daily_capacity": int, + "bar_700": bool, + "bar_350": bool, + "status": str, + "number_of_fueling_positions": int, + "operational_date": datetime.date, + "opening_date": datetime.date, + "total_capital_cost": Decimal + }, + 'LDV Rebates': { + "casl_consent": bool, + "date_approved": str, + "submission_id": int, + "submission_date": str, + "company_name": str, + "city": str, + "applicant_name": str, + "applicant_address_1": str, + "applicant_address_2": str, + "applicant_city": str, + "applicant_postal_code": str, + "applicant_phone": str, + "applicant_email": str, + "applicant_use": str, + "applicant_type": str, + "business_name": str, + "business_number": str, + "drivers_license": str, + "province": str, + "msrp": Decimal, + "other_incentives": str, + "document_type": str, + "vehicle": str, + "incentive_amount": Decimal, + "vin": str, + "delivered": bool, + "consent_to_contact": bool, + }, + 'Public Charging': { + "applicant_name": str, + "address": str, + "charging_station_info": str, + "between_25kw_and_50kw": int, + "between_50kw_and_100kw": int, + "over_100kw": int, + "level_2_units": int, + "level_2_ports": int, + "estimated_budget": float, + "adjusted_rebate": float, + "rebate_percent_maximum": float, + "pilot_project": bool, + "region": str, + "organization_type": str, + "project_status": str, + "review_number": int, + "rebate_paid": float, + }, + 'Scrap It': { + "approval_number": int, + "application_received_date": str, + "completion_date": str, + "postal_code": str, + "vin": str, + "application_city_fuel": Decimal, + "incentive_type": str, + "incentive_cost": Decimal, + "cheque_number": str, + "budget_code": str, + "scrap_date": str, + }, + 'Specialty Use Vehicle Incentive Program': { + "approvals": str, + "date": str, + "applicant_name": str, + "max_incentive_amount_requested": int, + "category": str, + "applicant_type": str, + "incentive_paid": int, + "total_purchase_price": int, + "manufacturer": str, + "model": str, + } +} + +DATASET_CONFIG = { + 'ARC Project Tracking': { + 'model': ARCProjectTracking, + 'columns': ARCProjectTrackingColumns, + 'column_mapping': ArcProjectTrackingColumnMapping, + 'sheet_name': 'Project_Tracking', + 'preparation_functions': [prepare_arc_project_tracking] + }, + 'EV Charging Rebates': { + 'model': ChargerRebates, + 'columns': EVChargingRebatesColumns, + 'column_mapping': EVChargingRebatesColumnMapping, + 'sheet_name': 'Updated', + 'header_row': 2 + }, + 'Data Fleets': { + 'model': DataFleets, + 'columns': DataFleetsColumns, + 'column_mapping': DataFleetsColumnMapping, + 'sheet_name': 'Data Fleets' + }, + 'Hydrogen Fleets': { + 'model': HydrogenFleets, + 'columns': HydrogenFleetsColumnMapping, + 'column_mapping': HydrogenFleetsColumnMapping, + 'sheet_name': 'Fleets', + 'preparation_functions': [prepare_hydrogen_fleets] + }, + 'Hydrogen Fueling': { + 'model': HydrogrenFueling, + 'columns': HydrogenFuelingColumnMapping, + 'column_mapping': HydrogenFuelingColumnMapping, + 'sheet_name': 'Station_Tracking', + 'preparation_functions': [prepare_hydrogen_fueling] + }, + 'LDV Rebates': { + 'model': LdvRebates, + 'columns': LdvRebatesColumnMapping, + 'sheet_name': 'Raw Data', + 'preparation_functions': [prepare_ldv_rebates] + }, + 'Public Charging': { + 'model': PublicCharging, + 'columns': PublicChargingColumns, + 'column_mapping': PublicChargingColumnMapping, + 'sheet_name': 'Project_applications', + 'header_row': 2, + 'preparation_functions': [prepare_public_charging] + }, + 'Scrap It': { + 'model': ScrapIt, + 'columns': ScrapItColumns, + 'column_mapping': ScrapItColumnMapping, + 'sheet_name': 'TOP OTHER TRANSACTIONS', + 'header_row': 5, + 'preparation_functions': [prepare_scrap_it] + }, + 'Specialty Use Vehicle Incentive Program': { + 'model': SpecialityUseVehicleIncentives, + 'columns': SpecialityUseVehicleIncentiveProgramColumns, + 'column_mapping': SpecialityUseVehicleIncentivesColumnMapping, + 'sheet_name': 'Sheet1', + 'preparation_functions': [prepare_speciality_use_vehicle_incentives] + }, + } \ No newline at end of file diff --git a/django/api/management/commands/import_arc_project_tracking.py b/django/api/management/commands/import_arc_project_tracking.py index e6cd574c..5c937cb5 100644 --- a/django/api/management/commands/import_arc_project_tracking.py +++ b/django/api/management/commands/import_arc_project_tracking.py @@ -2,7 +2,7 @@ from os import path from django.core.management import BaseCommand -from api.services.arc_project_tracking import import_from_xls +#from django.api.services.spreadsheet_uploader_prep import import_from_xls class Command(BaseCommand): @@ -34,7 +34,7 @@ def handle(self, *args, **options): ) )) return False - import_from_xls(xls_file) + #import_from_xls(xls_file) self.stdout.write(self.style.SUCCESS( 'Import complete' )) diff --git a/django/api/migrations/0020_auto_20240311_2136.py b/django/api/migrations/0020_auto_20240311_2136.py new file mode 100644 index 00000000..4a943f82 --- /dev/null +++ b/django/api/migrations/0020_auto_20240311_2136.py @@ -0,0 +1,33 @@ +# Generated by Django 3.1.6 on 2024-03-11 21:36 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0019_auto_20240223_1820'), + ] + + operations = [ + migrations.RenameField( + model_name='datafleets', + old_name='component_being_applyied_for', + new_name='component_being_applied_for', + ), + migrations.RenameField( + model_name='datafleets', + old_name='number_of_Level_2_Charging_Stations_being_applying_for', + new_name='number_of_level_2_charging_stations_being_applied_for', + ), + migrations.RenameField( + model_name='datafleets', + old_name='number_of_level_3_dc_fast_charging_stations_being_applying_for', + new_name='number_of_level_3_dc_fast_charging_stations_being_applied_for', + ), + migrations.RenameField( + model_name='datafleets', + old_name='type_of_charger_being_installing', + new_name='type_of_charger_being_installed', + ), + ] diff --git a/django/api/migrations/0021_auto_20240326_2152.py b/django/api/migrations/0021_auto_20240326_2152.py new file mode 100644 index 00000000..7419b057 --- /dev/null +++ b/django/api/migrations/0021_auto_20240326_2152.py @@ -0,0 +1,18 @@ +# Generated by Django 3.1.6 on 2024-03-26 21:52 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0020_auto_20240311_2136'), + ] + + operations = [ + migrations.AlterField( + model_name='specialityusevehicleincentives', + name='date', + field=models.DateField(blank=True, max_length=20, null=True), + ), + ] diff --git a/django/api/models/data_fleets.py b/django/api/models/data_fleets.py index 586fcdc1..c72b8fb5 100644 --- a/django/api/models/data_fleets.py +++ b/django/api/models/data_fleets.py @@ -103,7 +103,7 @@ class DataFleets(Auditable): unique=False ) - component_being_applyied_for = models.CharField( + component_being_applied_for = models.CharField( blank=True, null=True, max_length=250, @@ -117,19 +117,19 @@ class DataFleets(Auditable): unique=False ) - type_of_charger_being_installing = models.CharField( + type_of_charger_being_installed = models.CharField( blank=True, null=True, max_length=250, unique=False ) - number_of_Level_2_Charging_Stations_being_applying_for = models.IntegerField( + number_of_level_2_charging_stations_being_applied_for = models.IntegerField( blank=True, null=True ) - number_of_level_3_dc_fast_charging_stations_being_applying_for = models.IntegerField( + number_of_level_3_dc_fast_charging_stations_being_applied_for = models.IntegerField( blank=True, null=True ) diff --git a/django/api/models/speciality_use_vehicle_incentives.py b/django/api/models/speciality_use_vehicle_incentives.py index b2c5c916..175e052e 100644 --- a/django/api/models/speciality_use_vehicle_incentives.py +++ b/django/api/models/speciality_use_vehicle_incentives.py @@ -9,7 +9,7 @@ class SpecialityUseVehicleIncentives(Auditable): null=True, max_length=20 ) - date = models.CharField( + date = models.DateField( max_length=20, null=True, blank=True diff --git a/django/api/services/arc_project_tracking.py b/django/api/services/arc_project_tracking.py deleted file mode 100644 index a45a221b..00000000 --- a/django/api/services/arc_project_tracking.py +++ /dev/null @@ -1,75 +0,0 @@ -import pandas as pd -from api.models.arc_project_tracking import ARCProjectTracking - - -def trim_all_columns(df): - """ - Trim whitespace from ends of each value across all series in dataframe - """ - trim_strings = lambda x: x.strip() if isinstance(x, str) else x - return df.applymap(trim_strings) - - -def import_from_xls(excel_file): - row_count = 1 #starting count, ie headers - df = pd.read_excel(excel_file, 'Project_Tracking') - - df.drop(df.columns.difference([ - "Funding Call", - "Proponent", - "Ref #", - "Project Title", - "Primary Location", - "Status", - "ARC Funding", - "Funds Issued", - "Start Date", - "Completion Date", - "Total Project Value", - "ZEV Sub-Sector", - "On-Road/Off-Road", - "Fuel Type", - "Publicly Announced" - ]), axis=1, inplace=True) - - df['Publicly Announced'].replace( - to_replace=['No', 'N'], - value=False, - inplace=True - ) - - df['Publicly Announced'].replace( - to_replace=['Yes', 'Y'], - value=False, - inplace=True - ) - - df = trim_all_columns(df) - df = df.applymap(lambda s: s.upper() if type(s) == str else s) - - df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) - - try: - for _, row in df.iterrows(): - row_count += 1 - if row["Publicly Announced"] == '': continue # Skip rows without this field - ARCProjectTracking.objects.create( - funding_call=row["Funding Call"], - proponent=row["Proponent"], - reference_number=row["Ref #"], - project_title=row["Project Title"], - primary_location=row["Primary Location"], - status=row["Status"], - arc_funding=row["ARC Funding"], - funds_issued=row["Funds Issued"], - start_date=row["Start Date"], - completion_date=row["Completion Date"], - total_project_value=row["Total Project Value"], - zev_sub_sector=row["ZEV Sub-Sector"], - on_road_off_road=row["On-Road/Off-Road"], - fuel_type=row["Fuel Type"], - publicly_announced=row["Publicly Announced"] - ) - except Exception as error: - return (error, 'data', row_count) - return True diff --git a/django/api/services/charger_rebates.py b/django/api/services/charger_rebates.py deleted file mode 100644 index 189ddcf7..00000000 --- a/django/api/services/charger_rebates.py +++ /dev/null @@ -1,51 +0,0 @@ -import pandas as pd -from api.models.charger_rebates import ChargerRebates - - -def trim_all_columns(df): - """ - Trim whitespace from ends of each value across all series in dataframe - """ - trim_strings = lambda x: x.strip() if isinstance(x, str) else x - return df.applymap(trim_strings) - - -def import_from_xls(excel_file): - row_count = 3 ##starts at 3 because of the headers! - df = pd.read_excel(excel_file, 'Updated', header=2) - df.drop(df.columns.difference([ - "Organization", - "MLA", - "Region", - "City", - "Address", - "Number of Fast Charging Stations", - "In service date", - "Expected in service date", - "Announced?", - "B.C. (EMPR) Funding Anticipated (Max $25,000 per station, excludes MOTI stations) (Not all funding paid out yet as depends on station completion)", - "Notes", - ]), axis=1, inplace=True) - df = trim_all_columns(df) - df = df.applymap(lambda s: s.upper() if type(s) == str else s) - df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) - try: - for _, row in df.iterrows(): - row_count += 1 - ChargerRebates.objects.create( - organization=row["Organization"], - region=row["Region"], - city=row["City"], - address=row["Address"], - number_of_fast_charging_stations=row["Number of Fast Charging Stations"], - in_service_date=row["In service date"], - expected_in_service_date=row["Expected in service date"], - announced=row["Announced?"], - rebate_paid=row["B.C. (EMPR) Funding Anticipated (Max $25,000 per station, excludes MOTI stations) (Not all funding paid out yet as depends on station completion)"], - notes=row["Notes"] - ) - except Exception as error: - if isinstance(error, list): - error = error[0] - return (error,'data', row_count) - return True diff --git a/django/api/services/data_fleets.py b/django/api/services/data_fleets.py deleted file mode 100644 index d416c6c3..00000000 --- a/django/api/services/data_fleets.py +++ /dev/null @@ -1,83 +0,0 @@ -import pandas as pd -from api.models.data_fleets import DataFleets - - -def trim_all_columns(df): - """ - Trim whitespace from ends of each value across all series in dataframe - """ - trim_strings = lambda x: x.strip() if isinstance(x, str) else x - return df.applymap(trim_strings) - - -def import_from_xls(excel_file): - row_count = 1 - df = pd.read_excel(excel_file, 'Data Fleets') - df.drop(df.columns.difference([ - "Current Stage", - "Rebate Value", - "Legal Name of your Organization/Fleet: ", - "Your business Category", - "City:*", - "Postal Code:*", - "Applicant First Name", - "Applicant Last Name", - "Email Address:*", - "Fleet Size All", - "Fleet Size Light-duty", - "Total number of EVs?", - "Total number of light-duty EVs?", - "PHEV's", - "EVSE's?", - "Average daily travel distance?", - "Which component are you applying for?*", - "Estimated cost", - "Which type of charger are you installing?", - "How many Level 2 Charging Stations are you applying for", - "How many Level 3/DC Fast Charging Stations are you applying for", - '"Application Form Fleets" completion date/time', - "Pre-Approval Date", - "Deadline", - "Application Number", - "Potential Rebate" - ]), axis=1, inplace=True) - df = trim_all_columns(df) - df = df.applymap(lambda s: s.upper() if type(s) == str else s) - - # df.fillna('') - df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) - - for _, row in df.iterrows(): - row_count += 1 - try: - DataFleets.objects.create( - current_stage=row["Current Stage"], - rebate_value=row["Rebate Value"], - legal_name_of_organization_fleet =row["Legal Name of your Organization/Fleet: "], - business_category=row["Your business Category"], - city=row["City:*"], - postal_code=row["Postal Code:*"], - applicant_first_name=row["Applicant First Name"], - applicant_last_name =row["Applicant Last Name"], - email_address=row["Email Address:*"], - fleet_size_all=row["Fleet Size All"], - fleet_size_light_duty=row["Fleet Size Light-duty"], - total_number_of_evs=row["Total number of EVs?"], - total_number_of_light_duty_evs=row["Total number of light-duty EVs?"], - phev=row["PHEV's"], - evse=row["EVSE's?"], - average_daily_travel_distance=row["Average daily travel distance?"], - component_being_applyied_for=row["Which component are you applying for?*"], - estimated_cost=row["Estimated cost"], - type_of_charger_being_installing=row["Which type of charger are you installing?"], - number_of_Level_2_Charging_Stations_being_applying_for=row["How many Level 2 Charging Stations are you applying for"], - number_of_level_3_dc_fast_charging_stations_being_applying_for=row["How many Level 3/DC Fast Charging Stations are you applying for"], - application_form_fleets_completion_date_time=row['"Application Form Fleets" completion date/time'], - pre_approval_date=row["Pre-Approval Date"], - deadline=row["Deadline"], - application_number=row["Application Number"], - potential_rebate=row["Potential Rebate"] - ) - except Exception as error: - return (error,'data',row_count) - return True diff --git a/django/api/services/datasheet_template_generator.py b/django/api/services/datasheet_template_generator.py index 8e9495f8..ea8f3402 100644 --- a/django/api/services/datasheet_template_generator.py +++ b/django/api/services/datasheet_template_generator.py @@ -1,196 +1,55 @@ import pandas as pd from io import BytesIO - -DATASET_COLUMNS = { - 'ARC Project Tracking': [ - "Funding Call", "Proponent", "Ref #", "Project Title", "Primary Location", - "Status", "ARC Funding", "Funds Issued", "Start Date", "Completion Date", - "Total Project Value", "ZEV Sub-Sector", "On-Road/Off-Road", "Fuel Type", - "Publicly Announced", - ], - # Charger Rebates - 'EV Charging Rebates': [ - "Organization", "MLA", "Region", "City", - "Address", "Number of Fast Charging Stations", "In service date", - "Expected in service date", "Announced?", - "B.C. (EMPR) Funding Anticipated (Max $25,000 per station, excludes MOTI stations) (Not all funding paid out yet as depends on station completion)", - "Notes", - ], - 'Data Fleets': [ - "Current Stage", "Rebate Value", "Legal Name of your Organization/Fleet: ", - "Your business Category", "City:*", "Postal Code:*", "Applicant First Name", - "Applicant Last Name", "Email Address:*", "Fleet Size All", - "Fleet Size Light-duty", "Total number of EVs?", "Total number of light-duty EVs?", - "PHEV's", "EVSE's?", "Average daily travel distance?", "Which component are you applying for?*", - "Estimated cost", "Which type of charger are you installing?", - "How many Level 2 Charging Stations are you applying for", - "How many Level 3/DC Fast Charging Stations are you applying for", - '"Application Form Fleets" completion date/time', - "Pre-Approval Date", "Deadline", "Application Number", "Potential Rebate" - ], - # Hydrogen Fleets - 'Hydrogen Fleets': [ - "Application #", - "Fleet #", - "Application Date", - "Organization Name", - "Fleet Name", - "Street Address", - "City", - "Postal Code", - "VIN", - "Make", - "Model", - "Year", - "Purchase Date", - "Dealer Name", - "Rebate Amount" - ], - # Hydrogen Fueling - 'Hydrogen Fueling': [ - "Station Number", - "RFP Close Date", - "Station Name", - "Street Address", - "City", - "Postal Code", - "Proponent", - "Location Partner (Shell/7-11/etc.)", - "Capital Funding Awarded", - "O&M Funding Potential", - "Daily Capacity (kg/day)", - "700 Bar", - "350 Bar", - "Status", - "# of Fuelling Positions", - "Operational Date", - "Opening Date", - "Total Capital Cost", - ], - # LDV Rebates - 'LDV Rebates': [ - "CASL Consent", - "DATE APPROVED", - "Submission ID", - "Submission Date", - "Company Name", - "City", - "Applicant Name", - "Applicant Address 1", - "Applicant Address 2", - "Applicant City", - "Applicant Postal Code", - "Applicant Phone", - "Applicant Email", - "Applicant Use", - "Applicant Type", - "Business Name", - "Business Number", - "Drivers License", - "Province", - "MSRP", - "Other Incentives", - "Document Type", - "Vehicle", - "Incentive Amount", - "VIN#", - "Delivered", - "Consent to Contact" - ], - # Public Charging - 'Public Charging': [ - "Applicant Name", - "Address", - "Charging Station Info", - ">25kW; <50kW", - ">50kW; <100kW", - ">100kW", - "Level 2 (# of units/stations)", - "Level 2 (# of ports)", - "Estimated Budget", - "Adjusted Rebate", - "Rebate % Maximum", - "Pilot Project (Y/N)", - "Region", - "Organization Type", - "Project Status", - "Review Number", - "Paid out rebate amount" - ], - # Scrap It - 'Scrap It': [ - "Approval Num", - "App Recv'd Date", - "Completion Date", - "Postal Code", - "VIN", - "App City Fuel", - "Incentive Type", - "Incentive Cost", - "Cheque #", - "Budget Code", - "Scrap Date" - ], - # Specialty Use Vehicle Incetives - 'Specialty Use Vehicle Incentive Program': [ - "Approvals", - "Date", - "Applicant Name", - "Max Incentive Amount Requested", - "Category", - "Fleet", - "Individual", - "Incentive Paid", - "Total Purchase Price (pre-tax)", - "Manufacturer", - "Model", - ], - - -} +from api.constants import * def generate_template(dataset_name): """ Generates an Excel spreadsheet template for a specified dataset. """ - if dataset_name not in DATASET_COLUMNS: + dataset_column_enum_map = { + 'ARC Project Tracking': ARCProjectTrackingColumns, + 'EV Charging Rebates': EVChargingRebatesColumns, + 'Data Fleets': DataFleetsColumns, + 'Hydrogen Fleets': HydrogenFleetsColumns, + 'Hydrogen Fueling': HydrogenFuelingColumns, + 'LDV Rebates': LDVRebatesColumns, + 'Public Charging': PublicChargingColumns, + 'Scrap It': ScrapItColumns, + 'Specialty Use Vehicle Incentive Program': SpecialityUseVehicleIncentiveProgramColumns, + } + + if dataset_name not in dataset_column_enum_map: raise ValueError(f"Dataset '{dataset_name}' is not supported.") - columns = DATASET_COLUMNS[dataset_name] + columns = [column.value for column in dataset_column_enum_map[dataset_name]] + df = pd.DataFrame(columns=columns) excel_buffer = BytesIO() with pd.ExcelWriter(excel_buffer, engine='xlsxwriter') as writer: - sheet_name = dataset_name + sheet_name = dataset_name.replace(" ", "_") start_row = 0 - if dataset_name == 'ARC Project Tracking': - sheet_name = 'Project_Tracking' - - if dataset_name == 'Specialty Use Vehicle Incentive Program': - sheet_name= 'Sheet1' - - if dataset_name == 'Public Charging': - sheet_name= 'Project_applications' - start_row = 2 - - if dataset_name == 'LDV Rebates': - sheet_name='Raw Data' - - if dataset_name == 'EV Charging Rebates': - sheet_name = 'Updated' - start_row = 2 - - if dataset_name == 'Hydrogen Fueling': - sheet_name = 'Station_Tracking' - - if dataset_name == 'Hydrogen Fleets': - sheet_name = 'Fleets' - if dataset_name == 'Scrap It': - sheet_name = 'TOP OTHER TRANSACTIONS' - start_row = 5 + custom_sheet_names = { + 'ARC Project Tracking': 'Project_Tracking', + 'Specialty Use Vehicle Incentive Program': 'Sheet1', + 'Public Charging': 'Project_applications', + 'LDV Rebates': 'Raw Data', + 'EV Charging Rebates': 'Updated', + 'Hydrogen Fueling': 'Station_Tracking', + 'Hydrogen Fleets': 'Fleets', + 'Scrap It': 'TOP OTHER TRANSACTIONS', + } + custom_start_rows = { + 'Public Charging': 2, + 'EV Charging Rebates': 2, + 'Scrap It': 5, + } + + sheet_name = custom_sheet_names.get(dataset_name, sheet_name) + start_row = custom_start_rows.get(dataset_name, start_row) df.to_excel(writer, sheet_name=sheet_name, startrow=start_row, index=False) excel_buffer.seek(0) - return excel_buffer + return excel_buffer \ No newline at end of file diff --git a/django/api/services/hydrogen_fleets.py b/django/api/services/hydrogen_fleets.py deleted file mode 100644 index 874060b6..00000000 --- a/django/api/services/hydrogen_fleets.py +++ /dev/null @@ -1,58 +0,0 @@ -import pandas as pd -from api.models.hydrogen_fleets import HydrogenFleets - - -def trim_all_columns(df): - """ - Trim whitespace from ends of each value across all series in dataframe - """ - trim_strings = lambda x: x.strip() if isinstance(x, str) else x - return df.applymap(trim_strings) - - -def import_from_xls(excel_file): - row_count = 1 - df = pd.read_excel(excel_file, 'Fleets') - df.drop(df.columns.difference([ - "Application #", - "Fleet #", - "Application Date", - "Organization Name", - "Fleet Name", - "Street Address", - "City", - "Postal Code", - "VIN", - "Make", - "Model", - "Year", - "Purchase Date", - "Dealer Name", - "Rebate Amount" - ]), axis=1, inplace=True) - df = trim_all_columns(df) - df = df.applymap(lambda s: s.upper() if type(s) == str else s) - df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) - try: - for _, row in df.iterrows(): - row_count += 1 - HydrogenFleets.objects.create( - application_number=row["Application #"], - fleet_number=row["Fleet #"], - application_date=row["Application Date"], - organization_name=row["Organization Name"], - fleet_name=row["Fleet Name"], - street_address=row["Street Address"], - city=row["City"], - postal_code=row["Postal Code"], - vin=row["VIN"], - make=row["Make"], - model=row["Model"], - year=row["Year"], - purchase_date=row["Purchase Date"], - dealer_name=row["Dealer Name"], - rebate_amount=row["Rebate Amount"] - ) - except Exception as error: - return (error,'data',row_count) - return True diff --git a/django/api/services/hydrogen_fueling.py b/django/api/services/hydrogen_fueling.py deleted file mode 100644 index b53deb5d..00000000 --- a/django/api/services/hydrogen_fueling.py +++ /dev/null @@ -1,64 +0,0 @@ -import pandas as pd -from api.models.hydrogen_fueling import HydrogrenFueling - - -def trim_all_columns(df): - """ - Trim whitespace from ends of each value across all series in dataframe - """ - trim_strings = lambda x: x.strip() if isinstance(x, str) else x - return df.applymap(trim_strings) - - -def import_from_xls(excel_file): - row_count = 1 - df = pd.read_excel(excel_file, 'Station_Tracking') - df = trim_all_columns(df) - df = df.applymap(lambda s: s.upper() if type(s) == str else s) - df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) - df['700 Bar'].replace( - to_replace=['NO', 'N'], - value=False, - inplace=True - ) - df['700 Bar'].replace( - to_replace=['YES', 'Y'], - value=True, - inplace=True - ) - df['350 Bar'].replace( - to_replace=['NO', 'N'], - value=False, - inplace=True - ) - df['350 Bar'].replace( - to_replace=['YES', 'Y'], - value=True, - inplace=True - ) - try: - for _, row in df.iterrows(): - row_count +=1 - HydrogrenFueling.objects.create( - station_number=row["Station Number"], - rfp_close_date=row["RFP Close Date"], - station_name=row["Station Name"], - street_address=row["Street Address"], - city=row["City"], - postal_code=row["Postal Code"], - proponent=row["Proponent"], - location_partner=row["Location Partner (Shell/7-11/etc.)"], - capital_funding_awarded=row["Capital Funding Awarded"], - om_funding_potential=row["O&M Funding Potential"], - daily_capacity=row["Daily Capacity (kg/day)"], - bar_700=row["700 Bar"], - bar_350=row["350 Bar"], - status=["Status"], - number_of_fueling_positions=row["# of Fuelling Positions"], - operational_date=row["Operational Date "], - opening_date=row["Opening Date"], - total_capital_cost=row["Total Capital Cost"] - ) - except Exception as error: - return (error,'data',row_count) - return True diff --git a/django/api/services/public_charging.py b/django/api/services/public_charging.py deleted file mode 100644 index 79be8c33..00000000 --- a/django/api/services/public_charging.py +++ /dev/null @@ -1,53 +0,0 @@ -import pandas as pd -from api.models.public_charging import PublicCharging - - -def trim_all_columns(df): - """ - Trim whitespace from ends of each value across all series in dataframe - """ - trim_strings = lambda x: x.strip() if isinstance(x, str) else x - return df.applymap(trim_strings) - - -def import_from_xls(excel_file): - row_count = 3 - df = pd.read_excel(excel_file, 'Project_applications', header=2) - df = trim_all_columns(df) - df = df.applymap(lambda s: s.upper() if type(s) == str else s) - df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) - df['Pilot Project (Y/N)'].replace( - to_replace=['NO', 'N'], - value=False, - inplace=True - ) - df['Pilot Project (Y/N)'].replace( - to_replace=['YES', 'Y'], - value=True, - inplace=True - ) - try: - for _, row in df.iterrows(): - row_count += 1 - PublicCharging.objects.create( - applicant_name=row["Applicant Name"], - address=row["Address"], - charging_station_info=row["Charging Station Info"], - between_25kw_and_50kw=row[">25kW; <50kW"], - between_50kw_and_100kw=row[">50kW; <100kW"], - over_100kw=row[">100kW "], - level_2_units=row["Level 2 (# of units/stations)"], - level_2_ports=row["Level 2 (# of ports)"], - estimated_budget=row["Estimated Budget"], - adjusted_rebate=row["Adjusted Rebate "], - rebate_percent_maximum=row["Rebate % Maximum "], - pilot_project=row["Pilot Project (Y/N)"], - region=row["Region"], - organization_type=row["Organization Type"], - project_status=row["Project Status"], - review_number=row["Review Number"], - rebate_paid=row["Paid out rebate amount"], - ) - except Exception as error: - return (error,'data',row_count) - return True diff --git a/django/api/services/scrap_it.py b/django/api/services/scrap_it.py deleted file mode 100644 index 4f9bc669..00000000 --- a/django/api/services/scrap_it.py +++ /dev/null @@ -1,41 +0,0 @@ -import pandas as pd -from api.models.scrap_it import ScrapIt - - -def trim_all_columns(df): - """ - Trim whitespace from ends of each value across all series in dataframe - """ - trim_strings = lambda x: x.strip() if isinstance(x, str) else x - return df.applymap(trim_strings) - - -def import_from_xls(excel_file): - row_count = 6 - df = pd.read_excel(excel_file, 'TOP OTHER TRANSACTIONS', header=5) - - df = trim_all_columns(df) - df = df.applymap(lambda s: s.upper() if type(s) == str else s) - - df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) - - try: - for _, row in df.iterrows(): - row_count += 1 - if row["VIN"] == '': continue # Skip rows without this field - ScrapIt.objects.create( - approval_number=row["Approval Num"], - application_received_date=row["App Recv'd Date"], - completion_date=row["Completion Date"], - postal_code=row["Postal Code"], - vin=row["VIN"], - application_city_fuel=row["App City Fuel"], - incentive_type=row["Incentive Type"], - incentive_cost=row["Incentive Cost"], - cheque_number=row["Cheque #"], - budget_code=row["Budget Code"], - scrap_date=row["Scrap Date"] - ) - except Exception as error: - return (error,'data',row_count) - return True diff --git a/django/api/services/speciality_use_vehicle_incentives.py b/django/api/services/speciality_use_vehicle_incentives.py deleted file mode 100644 index fa645eef..00000000 --- a/django/api/services/speciality_use_vehicle_incentives.py +++ /dev/null @@ -1,67 +0,0 @@ -import pandas as pd -import numpy as np -from api.models.speciality_use_vehicle_incentives import \ - SpecialityUseVehicleIncentives - - -def trim_all_columns(df): - """ - Trim whitespace from ends of each value across all series in dataframe - """ - trim_strings = lambda x: x.strip() if isinstance(x, str) else x - return df.applymap(trim_strings) - - -def applicant_type(row): - if isinstance((row["Fleet"]), str): - return 'Fleet' - elif isinstance((row["Individual"]), str): - return 'Individual' - else: - return '' - - -def import_from_xls(excel_file): - row_count = 1 - df = pd.read_excel(excel_file, 'Sheet1') - df.drop(df.columns.difference([ - "Approvals", - "Date", - "Applicant Name", - "Max Incentive Amount Requested", - "Category", - "Fleet", - "Individual", - "Incentive Paid", - "Total Purchase Price (pre-tax)", - "Manufacturer", - "Model", - ]), axis=1, inplace=True) - df = trim_all_columns(df) - ## find the columns that contain numbers and replace blank balues with 0 - num_columns = df.select_dtypes(include=np.number).columns.tolist() - num_columns.remove('Applicant Name') - df[num_columns] = df[num_columns].fillna(0) - ## all other columns get a null value - df = df.fillna('') - df = df.applymap(lambda s: s.upper() if type(s) == str else s) - df['Applicant Type'] = df.apply(lambda row: applicant_type(row), axis=1) - - try: - for _, row in df.iterrows(): - row_count += 1 - SpecialityUseVehicleIncentives.objects.create( - approvals=row["Approvals"], - date=row["Date"], - applicant_name=row["Applicant Name"], - max_incentive_amount_requested=row["Max Incentive Amount Requested"], - category=row["Category"], - applicant_type=row["Applicant Type"], - incentive_paid=row["Incentive Paid"], - total_purchase_price=row["Total Purchase Price (pre-tax)"], - manufacturer=row["Manufacturer"], - model=row["Model"], - ) - except Exception as error: - return (error,'data',row_count) - return True diff --git a/django/api/services/spreadsheet_uploader.py b/django/api/services/spreadsheet_uploader.py new file mode 100644 index 00000000..f350c7b5 --- /dev/null +++ b/django/api/services/spreadsheet_uploader.py @@ -0,0 +1,170 @@ +from decimal import Decimal, ROUND_HALF_UP +import pandas as pd +import traceback +from django.db import transaction + +def get_field_default(model, field): + field = model._meta.get_field(field) + + if callable(field.default): + return field.default() + return field.default + +def get_nullable_fields(model): + nullable_fields = {} + + for field in model._meta.get_fields(): + if hasattr(field, 'null') and field.null: + nullable_fields[field.name] = True + return nullable_fields + +def trim_all_columns(df): + trim_strings = lambda x: x.strip() if isinstance(x, str) else x + return df.applymap(trim_strings) + +def extract_data(excel_file, sheet_name, header_row): + try: + df = pd.read_excel(excel_file, sheet_name, header=header_row) + df = trim_all_columns(df) + return df + except Exception as e: + traceback.print_exc() + raise + + +def transform_data(df, dataset_columns, column_mapping_enum, preparation_functions=[], validation_functions=[]): + required_columns = [col.value for col in dataset_columns] + + df = df[[col for col in df.columns if col in required_columns]] + + missing_columns = [col for col in required_columns if col not in df.columns] + if missing_columns: + raise ValueError(f"Missing columns: {', '.join(missing_columns)}") + + for prep_func in preparation_functions: + df = prep_func(df) + + for validate in validation_functions: + df = validate(df) + + column_mapping = {col.name: col.value for col in column_mapping_enum} + # Need to use the inverse (keys) for mapping the columns to what the database expects in order to use enums + inverse_column_mapping = {v: k for k, v in column_mapping.items()} + df.rename(columns=inverse_column_mapping, inplace=True) + + return df + +@transaction.atomic +def load_data(df, model, field_types, replace_data, user): + row_count = 0 + records_inserted = 0 + errors = [] + nullable_fields = get_nullable_fields(model) + + if replace_data: + model.objects.all().delete() + + for index, row in df.iterrows(): + row_dict = row.to_dict() + valid_row = True + + for column, value in row_dict.items(): + + expected_type = field_types.get(column) + is_nullable = column in nullable_fields + + if pd.isna(value) or value == '': + if is_nullable: + row_dict[column] = None + else: + row_dict[column] = get_field_default(model, column) + elif expected_type == float: + if isinstance(value, int): + row_dict[column] = float(value) + elif isinstance(value, float): + row_dict[column] = round(value, 2) + elif isinstance(value, str) and value.strip() != '': + try: + float_value = float(value) + row_dict[column] = round(float_value, 2) + except ValueError: + errors.append(f"Row {index + 1}: Unable to convert value to float for '{column}'. Value was '{value}'.") + valid_row = False + continue + elif expected_type == int and ((isinstance(value, str) and value.strip() != '') or isinstance(value, float)): + try: + row_dict[column] = int(value) + except ValueError: + errors.append(f"Row {index + 1}: Unable to convert value to int for '{column}'. Value was '{value}'.") + valid_row = False + continue + elif expected_type == Decimal and ((isinstance(value, int) or isinstance(value, float))): + try: + decimal_value = Decimal(value).quantize(Decimal('0.01'), rounding=ROUND_HALF_UP) + row_dict[column] = decimal_value + except ValueError: + errors.append(f"Row {index + 1}: Unable to convert value to int for '{column}'. Value was '{value}'.") + valid_row = False + continue + elif not isinstance(value, expected_type) and value != '': + errors.append(f"Row {index + 1}: Incorrect type for '{column}'. Expected {expected_type.__name__}, got {type(value).__name__}.") + valid_row = False + continue + + if valid_row: + try: + row_dict['update_user'] = user + model_instance = model(**row_dict) + model_instance.full_clean() + model_instance.save() + records_inserted += 1 + except Exception as e: + errors.append(f"Row {index + 1}: {e}") + + row_count += 1 + + return { + "row_count": row_count, + "records_inserted": records_inserted, + "errors": errors + } + + +def import_from_xls(excel_file, sheet_name, model, dataset_columns, header_row, column_mapping_enum, field_types, replace_data, user, preparation_functions=[], validation_functions=[]): + try: + df = extract_data(excel_file, sheet_name, header_row) + df = transform_data(df, dataset_columns, column_mapping_enum, preparation_functions, validation_functions) + result = load_data(df, model, field_types, replace_data, user) + + total_rows = result['row_count'] + inserted_rows = result['records_inserted'] + + if result['errors'] and result['records_inserted'] > 0: + return { + "success": True, + "message": f"{inserted_rows} out of {total_rows} rows successfully inserted with some errors encountered.", + "errors": result['errors'], + "rows_processed": result['row_count'] + } + elif len(result['errors']) > 0: + return { + "success": False, + "message": "Errors encountered with no successful insertions.", + "errors": result['errors'], + "rows_processed": result['row_count'] + } + else: + return { + "success": True, + "message": f"All {inserted_rows} records successfully inserted out of {total_rows}.", + "rows_processed": result['row_count'] + } + except Exception as error: + traceback.print_exc() + error_msg = f"Unexpected error: {str(error)}" + return { + "success": False, + "errors": [str(error)], + "rows_processed": 0 + } + diff --git a/django/api/services/spreadsheet_uploader_prep.py b/django/api/services/spreadsheet_uploader_prep.py new file mode 100644 index 00000000..5395fa9c --- /dev/null +++ b/django/api/services/spreadsheet_uploader_prep.py @@ -0,0 +1,89 @@ +from decimal import Decimal +import numpy as np +import pandas as pd + +def prepare_arc_project_tracking(df): + df['Publicly Announced'] = df['Publicly Announced'].replace({'No': False, 'N': False, 'Yes': True, 'Y': True}) + return df + +def prepare_hydrogen_fleets(df): + df.applymap(lambda s: s.upper() if type(s) == str else s) + df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) + return df + +def prepare_hydrogen_fueling(df): + + decimal_columns = ["Capital Funding Awarded", "O&M Funding Potential"] + + for column in ['700 Bar', '350 Bar']: + df[column].replace(to_replace=['NO', 'N'], value=False, inplace=True) + df[column].replace(to_replace=['YES', 'Y'], value=True, inplace=True) + + for field in decimal_columns: + try: + df[field] = df[field].apply(lambda x: round(Decimal(x), 2) if pd.notnull(x) else None) + except: + print({f'{field} Should be a header row'}) + return df + +def prepare_ldv_rebates(df): + replacements = { + "CASL Consent": {'YES': True, 'Y': True, 'NO': False, 'N': False}, + "Delivered": {'YES': True, 'Y': True, 'NO': False, 'N': False, 'OEM': False, 'INCENTIVE_FUNDS_AVAILABLE': False}, + "Consent to Contact": {'YES': True, 'Y': True, 'NO': False, 'N': False} + } + + for column, replacement_dict in replacements.items(): + df[column].replace(replacement_dict, inplace=True) + + df.fillna('') + + return df + +def prepare_public_charging(df): + + df = df.applymap(lambda s: s.upper() if type(s) == str else s) + + df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) + + df['Pilot Project (Y/N)'].replace(to_replace=['NO', 'N'], value=False, inplace=True) + df['Pilot Project (Y/N)'].replace(to_replace=['YES', 'Y'], value=True, inplace=True) + + return df + +def prepare_scrap_it(df): + + df = df.applymap(lambda s: s.upper() if type(s) == str else s) + df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) + + return df + +def applicant_type(row): + if isinstance((row["Fleet"]), str): + return 'Fleet' + elif isinstance((row["Individual"]), str): + return 'Individual' + else: + return '' + +def prepare_speciality_use_vehicle_incentives(df): + + df = df.applymap(lambda s: s.upper() if type(s) == str else s) + + num_columns = df.select_dtypes(include=['number']).columns.tolist() + df[num_columns] = df[num_columns].fillna(0) + + non_num_columns = df.columns.difference(num_columns) + df[non_num_columns] = df[non_num_columns].fillna('') + + df['Applicant Type'] = df.apply(lambda row: applicant_type(row), axis=1) + + if 'Fleet' in df.columns: + df = df.drop(columns=['Fleet']) + + if 'Individual' in df.columns: + df = df.drop(columns=['Individual']) + + return df + + diff --git a/django/api/viewsets/upload.py b/django/api/viewsets/upload.py index a73815ec..e983d7d1 100644 --- a/django/api/viewsets/upload.py +++ b/django/api/viewsets/upload.py @@ -21,25 +21,11 @@ from api.models.data_fleets import DataFleets from api.models.hydrogen_fleets import HydrogenFleets from api.serializers.datasets import DatasetsSerializer -from api.services.ldv_rebates import import_from_xls as import_ldv -from api.services.hydrogen_fueling import import_from_xls as \ - import_hydrogen_fueling -from api.services.charger_rebates import import_from_xls as \ - import_charger_rebates -from api.services.scrap_it import import_from_xls as \ - import_scrap_it -from api.services.arc_project_tracking import import_from_xls as \ - import_arc_project_tracking -from api.services.data_fleets import import_from_xls as \ - import_data_fleets -from api.services.hydrogen_fleets import import_from_xls as \ - import_hydrogen_fleets from api.services.minio import minio_get_object, minio_remove_object -from api.services.public_charging import import_from_xls as \ - import_public_charging -from api.services.speciality_use_vehicle_incentives import \ - import_from_xls as import_suvi from api.services.datasheet_template_generator import generate_template +from api.services.spreadsheet_uploader import import_from_xls +import api.constants as constants +from api.services.spreadsheet_uploader_prep import * class UploadViewset(GenericViewSet): permission_classes = (AllowAny,) @@ -47,99 +33,61 @@ class UploadViewset(GenericViewSet): @action(detail=False, methods=['get']) def datasets_list(self, request): - datasets = Datasets.objects.all() + + incomplete_datasets = ['LDV Rebates', 'Specialty Use Vehicle Incentive Program', 'Public Charging -Remove Later', 'EV Charging Rebates', 'Hydrogen Fueling', 'Hydrogen Fleets', 'ARC Project Tracking', 'Data Fleets', 'Scrap It'] + + datasets = Datasets.objects.all().exclude(name__in=incomplete_datasets) serializer = DatasetsSerializer(datasets, many=True, read_only=True) return Response(serializer.data) @action(detail=False, methods=['post']) @method_decorator(check_upload_permission()) def import_data(self, request): + filename = request.data.get('filename') dataset_selected = request.data.get('datasetSelected') replace_data = request.data.get('replace', False) - error = '' - done = '' - records_inserted = 0 - starting_count = 0 + try: url = minio_get_object(filename) urllib.request.urlretrieve(url, filename) - if dataset_selected: - model = '' - done = '' - import_func = '' - if dataset_selected == 'EV Charging Rebates': - import_func = import_charger_rebates - model = ChargerRebates - if dataset_selected == 'LDV Rebates': - import_func = import_ldv - model = LdvRebates - if dataset_selected == 'Hydrogen Fueling': - import_func = import_hydrogen_fueling - model = HydrogrenFueling - if dataset_selected == \ - 'Specialty Use Vehicle Incentive Program': - import_func = import_suvi - model = SpecialityUseVehicleIncentives - if dataset_selected == 'Public Charging': - import_func = import_public_charging - model = PublicCharging - if dataset_selected == 'Scrap It': - import_func = import_scrap_it - model = ScrapIt - if dataset_selected == 'ARC Project Tracking': - import_func = import_arc_project_tracking - model = ARCProjectTracking - if dataset_selected == 'Data Fleets': - import_func = import_data_fleets - model = DataFleets - if dataset_selected == 'Hydrogen Fleets': - import_func = import_hydrogen_fleets - model = HydrogenFleets - if replace_data: - starting_count = 0 - model.objects.all().delete() - else: - starting_count = model.objects.all().count() - done = import_func(filename) - if done: - os.remove(filename) - minio_remove_object(filename) - except Exception as error: - done = (error, 'file') - final_count = model.objects.all().count() - records_inserted = final_count - starting_count - records_inserted_msg = "{} records inserted. This table currently contains {} records.".format(records_inserted, final_count) - if done != True: - try: - error_location = done[1] - error = done[0] - error_row = 0 - error_msg = "There was an error. Please check your file and ensure you have the correctly named worksheets, column names, and data types in cells and reupload. Error: {}".format(error) - if len(done) > 2: - error_row = done[2] - error_type = type(error).__name__ - field_names = [f.name for f in model._meta.fields] - if error_location == 'data': - if error_type in (type(LookupError), type(KeyError), 'KeyError') : - error_msg = "Please make sure you've uploaded a file with the correct data including the correctly named columns. There was an error finding: {}. This dataset requires the following columns: {}".format(error, field_names) - elif error_type == 'ValueError' or type(ValueError): - ## note for next batch of scripts, possibly add str(type(ValueError)) - ## to this but check for impacts to other exceptions - error_msg = "{} on row {}. Please make sure you've uploaded a file with the correct data.".format(error, error_row) - elif isinstance(error, ValidationError): - error_msg ="Issue with cell value on row {}. {}".format(error_row, str(error)[2:-2]) - elif error_location == 'file': - error_msg = "{}. Please make sure you've uploaded a file with the correct data including the correctly named worksheets.".format(error) - if error_msg[-1] != '.': - error_msg+='.' - error_msg += records_inserted_msg - return Response(error_msg, status=status.HTTP_400_BAD_REQUEST) - except Exception as error: - print(error) - return Response('There was an issue!', status=status.HTTP_400_BAD_REQUEST) - else: - return Response(records_inserted_msg, status=status.HTTP_201_CREATED) + + config = constants.DATASET_CONFIG.get(dataset_selected) + if not config: + return Response(f"Dataset '{dataset_selected}' is not supported.", status=status.HTTP_400_BAD_REQUEST) + model = config['model'] + columns = config.get('columns') + mapping = config.get('column_mapping') + sheet_name = config.get('sheet_name', 'Sheet1') # Default to 'Sheet1' if not specified + preparation_functions = config.get('preparation_functions', []) + validation_functions = config.get('validation_functions', []) + header_row = config.get('header_row', 0) + + + result = import_from_xls( + excel_file=filename, + sheet_name=sheet_name, + model=model, + header_row = header_row, + preparation_functions=preparation_functions, + validation_functions=validation_functions, + dataset_columns=columns, + column_mapping_enum=mapping, + field_types=constants.FIELD_TYPES.get(dataset_selected), + replace_data=replace_data, + user = request.user + ) + + if not result['success']: + return Response(result, status=status.HTTP_400_BAD_REQUEST) + return Response(result, status=status.HTTP_201_CREATED) + + except Exception as e: + return Response(f"An error occurred: {str(e)}", status=status.HTTP_400_BAD_REQUEST) + + finally: + os.remove(filename) + minio_remove_object(filename) @action(detail=False, methods=['get']) diff --git a/frontend/src/uploads/UploadContainer.js b/frontend/src/uploads/UploadContainer.js index 32588bf0..4e73b0db 100644 --- a/frontend/src/uploads/UploadContainer.js +++ b/frontend/src/uploads/UploadContainer.js @@ -42,7 +42,7 @@ const UploadContainer = () => { const showError = (error) => { const { response: errorResponse } = error; - setAlertContent(errorResponse.data); + setAlertContent(`${errorResponse.data.message}\n${errorResponse.data.errors ? 'Errors: ' + errorResponse.data.errors.join('\n') : ''}`); setAlertSeverity('error'); setAlert(true); }; @@ -59,10 +59,10 @@ const UploadContainer = () => { filename, datasetSelected, replace, - }).then((postResponse) => { - setAlertContent(`Data has been successfully uploaded. ${postResponse.data}`); - setAlertSeverity('success'); + }).then((response) => { setAlert(true); + setAlertSeverity(response.data.success ? 'success' : 'error') + setAlertContent(`${response.data.message}${response.data.errors ? '\nErrors: ' + response.data.errors.join('\n') : ''}`); }).catch((error) => { showError(error); }); @@ -122,46 +122,53 @@ const UploadContainer = () => { return ; } - const alertElement = alert && alertContent && alertSeverity ? {alertContent} : null + const alertElement = alert && alertContent && alertSeverity ? + {alertContent.split('\n').map((line, index) => ( + + {line} +
+
+ ))}
: null return (
- - - - - - {adminUser - && ( + <> + + - + - )} - + {adminUser && ( + + + + )} + +
); -}; +} export default withRouter(UploadContainer); From ae5f367447005ce71bc52b50aa44bfb7ac4f884e Mon Sep 17 00:00:00 2001 From: JulianForeman <71847719+JulianForeman@users.noreply.github.com> Date: Thu, 28 Mar 2024 15:03:49 -0700 Subject: [PATCH 102/152] Removing dataset from dropdown to be added back later (#265) --- django/api/viewsets/upload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/django/api/viewsets/upload.py b/django/api/viewsets/upload.py index e983d7d1..c25e6965 100644 --- a/django/api/viewsets/upload.py +++ b/django/api/viewsets/upload.py @@ -34,7 +34,7 @@ class UploadViewset(GenericViewSet): @action(detail=False, methods=['get']) def datasets_list(self, request): - incomplete_datasets = ['LDV Rebates', 'Specialty Use Vehicle Incentive Program', 'Public Charging -Remove Later', 'EV Charging Rebates', 'Hydrogen Fueling', 'Hydrogen Fleets', 'ARC Project Tracking', 'Data Fleets', 'Scrap It'] + incomplete_datasets = ['LDV Rebates', 'Specialty Use Vehicle Incentive Program', 'Public Charging', 'EV Charging Rebates', 'Hydrogen Fueling', 'Hydrogen Fleets', 'ARC Project Tracking', 'Data Fleets', 'Scrap It'] datasets = Datasets.objects.all().exclude(name__in=incomplete_datasets) serializer = DatasetsSerializer(datasets, many=True, read_only=True) From 6edc4582eec5cb23e4f3e1be8d0d59df771fc2c4 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 28 Mar 2024 16:29:35 -0700 Subject: [PATCH 103/152] test new pip --- .github/workflows/test-ci-nobuild.yaml | 106 +++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 .github/workflows/test-ci-nobuild.yaml diff --git a/.github/workflows/test-ci-nobuild.yaml b/.github/workflows/test-ci-nobuild.yaml new file mode 100644 index 00000000..6c93d6d6 --- /dev/null +++ b/.github/workflows/test-ci-nobuild.yaml @@ -0,0 +1,106 @@ +## For each release, the value of workflow name, branches and VERSION need to be adjusted accordingly + +name: CTHUB 0.2.0 Test CI Promote from Dev + +on: + workflow_dispatch: + +env: + VERSION: 0.2.0 + GIT_URL: https://github.com/bcgov/cthub.git + DEV_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-DEV + TEST_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-test + + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + + set-pre-release: + name: Find Dev deployment pre-release number + runs-on: ubuntu-latest + + outputs: + output1: ${{ steps.set-pre-release.outputs.PRE_RELEASE }} + + steps: + + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.3 + with: + openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ env.TOOLS_NAMESPACE }} + + - id: set-pre-release + run: | + echo "PRE_RELEASE=$(oc -n ${{ env.DEV_NAMESPACE }} describe deployment cthub-dev-frontend | grep Image | awk -F '-' '{print $NF}')" >> $GITHUB_OUTPUT + + tryee: + + name: Deploy CTHUB on Test + runs-on: ubuntu-latest + needs: set-pre-release + + env: + PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} + + steps: + - name: show + run: | + echo "find ${{ env.PRE_RELEASE }} " + + # deploy: + + # name: Deploy CTHUB on Test + # runs-on: ubuntu-latest + # timeout-minutes: 60 + # needs: set-pre-release + + # env: + # PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} + + # steps: + # - name: Ask for approval for CTHUB release-${{ env.VERSION }} Test deployment + # uses: trstringer/manual-approval@v1.6.0 + # with: + # secret: ${{ github.TOKEN }} + # approvers: emi-hi,kuanfandevops,tim738745,JulianForeman + # minimum-approvals: 1 + # issue-title: "CTHUB release-${{ env.VERSION }} Test Deployment" + + # - name: Tag CTHUB images to Test + # run: | + # oc tag ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-Dev ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-$PRE_RELEASE + # oc tag ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-Dev ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-$PRE_RELEASE + # oc tag ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-Dev ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-Test + # oc tag ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-Dev ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-Test + + # - name: Checkout Manifest repository + # uses: actions/checkout@v4.1.1 + # with: + # repository: bcgov-c/tenant-gitops-30b186 + # ref: main + # ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} + + # - name: Update frontend tag + # uses: mikefarah/yq@v4.40.5 + # with: + # cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml + + # - name: Update backend tag + # uses: mikefarah/yq@v4.40.5 + # with: + # cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml + + # - name: GitHub Commit & Push + # run: | + # git config --global user.email "actions@github.com" + # git config --global user.name "GitHub Actions" + # git add cthub/values-test.yaml + # git commit -m "Update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }} on Test" + # git push + \ No newline at end of file From 912eeb22b796b6e3901849b91817555eead125cc Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 28 Mar 2024 16:32:38 -0700 Subject: [PATCH 104/152] login to test --- .github/workflows/test-ci-nobuild.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-ci-nobuild.yaml b/.github/workflows/test-ci-nobuild.yaml index 6c93d6d6..429b0e28 100644 --- a/.github/workflows/test-ci-nobuild.yaml +++ b/.github/workflows/test-ci-nobuild.yaml @@ -33,7 +33,7 @@ jobs: openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} insecure_skip_tls_verify: true - namespace: ${{ env.TOOLS_NAMESPACE }} + namespace: ${{ env.DEV_NAMESPACE }} - id: set-pre-release run: | From f077a30ee775b3eab93219133dbecbfb498d124d Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 28 Mar 2024 16:52:22 -0700 Subject: [PATCH 105/152] read pre-release number from backend --- .github/workflows/test-ci-nobuild.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-ci-nobuild.yaml b/.github/workflows/test-ci-nobuild.yaml index 429b0e28..117103df 100644 --- a/.github/workflows/test-ci-nobuild.yaml +++ b/.github/workflows/test-ci-nobuild.yaml @@ -37,7 +37,7 @@ jobs: - id: set-pre-release run: | - echo "PRE_RELEASE=$(oc -n ${{ env.DEV_NAMESPACE }} describe deployment cthub-dev-frontend | grep Image | awk -F '-' '{print $NF}')" >> $GITHUB_OUTPUT + echo "PRE_RELEASE=$(oc -n ${{ env.DEV_NAMESPACE }} describe deploymentconfig/cthub-dev-backend | grep Image | awk -F '-' '{print $NF}')" >> $GITHUB_OUTPUT tryee: From 0b8fd0f203fcd5055b528b83f2fa8c52917a0fd5 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 28 Mar 2024 16:56:33 -0700 Subject: [PATCH 106/152] update dev namespace --- .github/workflows/test-ci-nobuild.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-ci-nobuild.yaml b/.github/workflows/test-ci-nobuild.yaml index 117103df..0131c804 100644 --- a/.github/workflows/test-ci-nobuild.yaml +++ b/.github/workflows/test-ci-nobuild.yaml @@ -8,7 +8,7 @@ on: env: VERSION: 0.2.0 GIT_URL: https://github.com/bcgov/cthub.git - DEV_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-DEV + DEV_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev TEST_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-test @@ -37,7 +37,7 @@ jobs: - id: set-pre-release run: | - echo "PRE_RELEASE=$(oc -n ${{ env.DEV_NAMESPACE }} describe deploymentconfig/cthub-dev-backend | grep Image | awk -F '-' '{print $NF}')" >> $GITHUB_OUTPUT + echo "PRE_RELEASE=$(oc -n ${{ env.DEV_NAMESPACE }} describe deployment/cthub-dev-frontend | grep Image | awk -F '-' '{print $NF}')" >> $GITHUB_OUTPUT tryee: From 3cb241aa0e5eeaf9931406f023298c28243b2bec Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 28 Mar 2024 16:59:11 -0700 Subject: [PATCH 107/152] open deployment on Test --- .github/workflows/test-ci-nobuild.yaml | 90 +++++++++++--------------- 1 file changed, 37 insertions(+), 53 deletions(-) diff --git a/.github/workflows/test-ci-nobuild.yaml b/.github/workflows/test-ci-nobuild.yaml index 0131c804..f5ec064f 100644 --- a/.github/workflows/test-ci-nobuild.yaml +++ b/.github/workflows/test-ci-nobuild.yaml @@ -39,68 +39,52 @@ jobs: run: | echo "PRE_RELEASE=$(oc -n ${{ env.DEV_NAMESPACE }} describe deployment/cthub-dev-frontend | grep Image | awk -F '-' '{print $NF}')" >> $GITHUB_OUTPUT - tryee: + deploy: name: Deploy CTHUB on Test runs-on: ubuntu-latest + timeout-minutes: 60 needs: set-pre-release env: PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} steps: - - name: show - run: | - echo "find ${{ env.PRE_RELEASE }} " - - # deploy: - - # name: Deploy CTHUB on Test - # runs-on: ubuntu-latest - # timeout-minutes: 60 - # needs: set-pre-release - - # env: - # PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} - - # steps: - # - name: Ask for approval for CTHUB release-${{ env.VERSION }} Test deployment - # uses: trstringer/manual-approval@v1.6.0 - # with: - # secret: ${{ github.TOKEN }} - # approvers: emi-hi,kuanfandevops,tim738745,JulianForeman - # minimum-approvals: 1 - # issue-title: "CTHUB release-${{ env.VERSION }} Test Deployment" - - # - name: Tag CTHUB images to Test - # run: | - # oc tag ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-Dev ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-$PRE_RELEASE - # oc tag ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-Dev ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-$PRE_RELEASE - # oc tag ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-Dev ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-Test - # oc tag ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-Dev ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-Test + - name: Ask for approval for CTHUB release-${{ env.VERSION }} Test deployment + uses: trstringer/manual-approval@v1.6.0 + with: + secret: ${{ github.TOKEN }} + approvers: emi-hi,kuanfandevops,tim738745,JulianForeman + minimum-approvals: 1 + issue-title: "CTHUB release-${{ env.VERSION }} Test Deployment" + + - name: Tag CTHUB images to Test + run: | + oc tag ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-Dev ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-$PRE_RELEASE + oc tag ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-Dev ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-$PRE_RELEASE - # - name: Checkout Manifest repository - # uses: actions/checkout@v4.1.1 - # with: - # repository: bcgov-c/tenant-gitops-30b186 - # ref: main - # ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} + - name: Checkout Manifest repository + uses: actions/checkout@v4.1.1 + with: + repository: bcgov-c/tenant-gitops-30b186 + ref: main + ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} - # - name: Update frontend tag - # uses: mikefarah/yq@v4.40.5 - # with: - # cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml - - # - name: Update backend tag - # uses: mikefarah/yq@v4.40.5 - # with: - # cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml - - # - name: GitHub Commit & Push - # run: | - # git config --global user.email "actions@github.com" - # git config --global user.name "GitHub Actions" - # git add cthub/values-test.yaml - # git commit -m "Update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }} on Test" - # git push + - name: Update frontend tag + uses: mikefarah/yq@v4.40.5 + with: + cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml + + - name: Update backend tag + uses: mikefarah/yq@v4.40.5 + with: + cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml + + - name: GitHub Commit & Push + run: | + git config --global user.email "actions@github.com" + git config --global user.name "GitHub Actions" + git add cthub/values-test.yaml + git commit -m "Update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }} on Test" + git push \ No newline at end of file From 73a0a7795e3b8e7b0e9036db9606066afca7f10c Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 28 Mar 2024 17:04:49 -0700 Subject: [PATCH 108/152] correct tagging --- .github/workflows/test-ci-nobuild.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-ci-nobuild.yaml b/.github/workflows/test-ci-nobuild.yaml index f5ec064f..d7002c30 100644 --- a/.github/workflows/test-ci-nobuild.yaml +++ b/.github/workflows/test-ci-nobuild.yaml @@ -60,8 +60,8 @@ jobs: - name: Tag CTHUB images to Test run: | - oc tag ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-Dev ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-$PRE_RELEASE - oc tag ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-Dev ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-$PRE_RELEASE + oc tag ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} + oc tag ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - name: Checkout Manifest repository uses: actions/checkout@v4.1.1 From 41c0eb587a90052d8bbfa52324d6650a2ff39e5b Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 28 Mar 2024 17:07:11 -0700 Subject: [PATCH 109/152] login before deploying on test --- .github/workflows/test-ci-nobuild.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/test-ci-nobuild.yaml b/.github/workflows/test-ci-nobuild.yaml index d7002c30..bcb46b1c 100644 --- a/.github/workflows/test-ci-nobuild.yaml +++ b/.github/workflows/test-ci-nobuild.yaml @@ -58,6 +58,14 @@ jobs: minimum-approvals: 1 issue-title: "CTHUB release-${{ env.VERSION }} Test Deployment" + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.3 + with: + openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ env.DEV_NAMESPACE }} + - name: Tag CTHUB images to Test run: | oc tag ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} From 47c92598950be64110f738b9cf9f13893e2fe3f7 Mon Sep 17 00:00:00 2001 From: tim738745 <98717409+tim738745@users.noreply.github.com> Date: Tue, 2 Apr 2024 09:19:47 -0700 Subject: [PATCH 110/152] task: 257 - add guidance regarding package updates (#260) --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 666927c4..a1be2b22 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,13 @@ The Clean Transportation Data Hub provides an evidence base for the Clean Transp - To make your `RunPython` "script" cleaner, consider putting the actual queries themselves in separate sql files and reading from those in `RunPython` - To uncouple metabase from django, simply remove metabase from `settings.INSTALLED_APPS`. +# Updating packages +- From time to time, we may become aware of package updates (mainly the packages in package.json (frontend) and requirements.py (backend)). +- Tools like Dependabot (https://github.com/dependabot) may raise PRs that update these packages. +- If the package that can be updated is a npm package and is a transitive dependency (a dependency of an immediate dependency), we can implement the update using `overrides` (https://docs.npmjs.com/cli/v10/configuring-npm/package-json#overrides). +- When packages get updated, we'll have to confirm that things are still working; ideally, we would have test suites with good coverage that we can run. Otherwise, or in addition to that, some user testing may be needed. +- When an entire image is scanned by some tool, there may be deeper, OS level dependencies that show as being critically out of date/vulnerable; in cases like this, if an updated image is not yet available, there are usually `alpine` versions of images that simply don't include many of these dependencies; whether they will work for our purposes is another question. + # License The code is a fork from Richard's personal project. Please do not clone, copy or replicate this project unless you're authorized to do so. From f72ea0c6bfd598ca3813d0cffba4eb23b94f6176 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 2 Apr 2024 11:31:05 -0700 Subject: [PATCH 111/152] update workflow name --- .github/workflows/test-ci-nobuild.yaml | 98 -------------------------- .github/workflows/test-ci.yaml | 52 +++++--------- 2 files changed, 16 insertions(+), 134 deletions(-) delete mode 100644 .github/workflows/test-ci-nobuild.yaml diff --git a/.github/workflows/test-ci-nobuild.yaml b/.github/workflows/test-ci-nobuild.yaml deleted file mode 100644 index bcb46b1c..00000000 --- a/.github/workflows/test-ci-nobuild.yaml +++ /dev/null @@ -1,98 +0,0 @@ -## For each release, the value of workflow name, branches and VERSION need to be adjusted accordingly - -name: CTHUB 0.2.0 Test CI Promote from Dev - -on: - workflow_dispatch: - -env: - VERSION: 0.2.0 - GIT_URL: https://github.com/bcgov/cthub.git - DEV_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev - TEST_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-test - - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - - set-pre-release: - name: Find Dev deployment pre-release number - runs-on: ubuntu-latest - - outputs: - output1: ${{ steps.set-pre-release.outputs.PRE_RELEASE }} - - steps: - - - name: Log in to Openshift - uses: redhat-actions/oc-login@v1.3 - with: - openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} - openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} - insecure_skip_tls_verify: true - namespace: ${{ env.DEV_NAMESPACE }} - - - id: set-pre-release - run: | - echo "PRE_RELEASE=$(oc -n ${{ env.DEV_NAMESPACE }} describe deployment/cthub-dev-frontend | grep Image | awk -F '-' '{print $NF}')" >> $GITHUB_OUTPUT - - deploy: - - name: Deploy CTHUB on Test - runs-on: ubuntu-latest - timeout-minutes: 60 - needs: set-pre-release - - env: - PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} - - steps: - - name: Ask for approval for CTHUB release-${{ env.VERSION }} Test deployment - uses: trstringer/manual-approval@v1.6.0 - with: - secret: ${{ github.TOKEN }} - approvers: emi-hi,kuanfandevops,tim738745,JulianForeman - minimum-approvals: 1 - issue-title: "CTHUB release-${{ env.VERSION }} Test Deployment" - - - name: Log in to Openshift - uses: redhat-actions/oc-login@v1.3 - with: - openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} - openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} - insecure_skip_tls_verify: true - namespace: ${{ env.DEV_NAMESPACE }} - - - name: Tag CTHUB images to Test - run: | - oc tag ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - oc tag ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - - - name: Checkout Manifest repository - uses: actions/checkout@v4.1.1 - with: - repository: bcgov-c/tenant-gitops-30b186 - ref: main - ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} - - - name: Update frontend tag - uses: mikefarah/yq@v4.40.5 - with: - cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml - - - name: Update backend tag - uses: mikefarah/yq@v4.40.5 - with: - cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml - - - name: GitHub Commit & Push - run: | - git config --global user.email "actions@github.com" - git config --global user.name "GitHub Actions" - git add cthub/values-test.yaml - git commit -m "Update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }} on Test" - git push - \ No newline at end of file diff --git a/.github/workflows/test-ci.yaml b/.github/workflows/test-ci.yaml index ddf55013..0acf17ba 100644 --- a/.github/workflows/test-ci.yaml +++ b/.github/workflows/test-ci.yaml @@ -1,14 +1,13 @@ ## For each release, the value of workflow name, branches and VERSION need to be adjusted accordingly name: CTHUB 0.2.0 Test CI - on: workflow_dispatch: env: VERSION: 0.2.0 GIT_URL: https://github.com/bcgov/cthub.git - TOOLS_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools + DEV_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev TEST_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-test @@ -19,59 +18,32 @@ concurrency: jobs: set-pre-release: - name: Calculate pre-release number + name: Find Dev deployment pre-release number runs-on: ubuntu-latest outputs: output1: ${{ steps.set-pre-release.outputs.PRE_RELEASE }} - - steps: - - id: set-pre-release - run: echo "PRE_RELEASE=$(date +'%Y%m%d%H%M%S')" >> $GITHUB_OUTPUT - - build: - - name: Build CTHUB - runs-on: ubuntu-latest - needs: set-pre-release - timeout-minutes: 60 - - env: - PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} steps: - - name: Check out repository - uses: actions/checkout@v4.1.1 - - name: Log in to Openshift uses: redhat-actions/oc-login@v1.3 with: openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} insecure_skip_tls_verify: true - namespace: ${{ env.TOOLS_NAMESPACE }} + namespace: ${{ env.DEV_NAMESPACE }} - - name: Build CTHUB Backend - run: | - cd openshift/templates/backend - oc process -f ./backend-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} - sleep 5s - oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-backend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 - - - name: Build CTHUB Frontend + - id: set-pre-release run: | - cd openshift/templates/frontend - oc process -f ./frontend-bc-docker.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} - sleep 5s - oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-frontend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 + echo "PRE_RELEASE=$(oc -n ${{ env.DEV_NAMESPACE }} describe deployment/cthub-dev-frontend | grep Image | awk -F '-' '{print $NF}')" >> $GITHUB_OUTPUT deploy: name: Deploy CTHUB on Test runs-on: ubuntu-latest timeout-minutes: 60 - needs: [set-pre-release, build] + needs: set-pre-release env: PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} @@ -85,10 +57,18 @@ jobs: minimum-approvals: 1 issue-title: "CTHUB release-${{ env.VERSION }} Test Deployment" + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.3 + with: + openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ env.DEV_NAMESPACE }} + - name: Tag CTHUB images to Test run: | - oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-$PRE_RELEASE ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-$PRE_RELEASE - oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-$PRE_RELEASE ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-$PRE_RELEASE + oc tag ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} + oc tag ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - name: Checkout Manifest repository uses: actions/checkout@v4.1.1 From 31ffce64ead4259a8e1f13b00a153cd197cd2d2e Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 2 Apr 2024 11:34:51 -0700 Subject: [PATCH 112/152] show PRE_RELEASE --- .github/workflows/test-ci.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test-ci.yaml b/.github/workflows/test-ci.yaml index 0acf17ba..c07dd08d 100644 --- a/.github/workflows/test-ci.yaml +++ b/.github/workflows/test-ci.yaml @@ -37,6 +37,7 @@ jobs: - id: set-pre-release run: | echo "PRE_RELEASE=$(oc -n ${{ env.DEV_NAMESPACE }} describe deployment/cthub-dev-frontend | grep Image | awk -F '-' '{print $NF}')" >> $GITHUB_OUTPUT + echo "PRE_RELEASE is $PRE_RELEASE " deploy: From 362d4b786a6e0fe1cdc59c7fccfab9223eacee9c Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 2 Apr 2024 11:38:46 -0700 Subject: [PATCH 113/152] add pre-release to issue title for apprival --- .github/workflows/test-ci.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/test-ci.yaml b/.github/workflows/test-ci.yaml index c07dd08d..35ae5bad 100644 --- a/.github/workflows/test-ci.yaml +++ b/.github/workflows/test-ci.yaml @@ -37,7 +37,6 @@ jobs: - id: set-pre-release run: | echo "PRE_RELEASE=$(oc -n ${{ env.DEV_NAMESPACE }} describe deployment/cthub-dev-frontend | grep Image | awk -F '-' '{print $NF}')" >> $GITHUB_OUTPUT - echo "PRE_RELEASE is $PRE_RELEASE " deploy: @@ -56,7 +55,7 @@ jobs: secret: ${{ github.TOKEN }} approvers: emi-hi,kuanfandevops,tim738745,JulianForeman minimum-approvals: 1 - issue-title: "CTHUB release-${{ env.VERSION }} Test Deployment" + issue-title: "CTHUB release-${{ env.VERSION }}-${{ env.PRE_RELEASE }} Test Deployment" - name: Log in to Openshift uses: redhat-actions/oc-login@v1.3 From 5fe4d46217f244d519237c2d54569f5614036637 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 2 Apr 2024 11:41:35 -0700 Subject: [PATCH 114/152] add pre-release to issue title for apprival --- .github/workflows/test-ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-ci.yaml b/.github/workflows/test-ci.yaml index 35ae5bad..302f6347 100644 --- a/.github/workflows/test-ci.yaml +++ b/.github/workflows/test-ci.yaml @@ -49,7 +49,7 @@ jobs: PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} steps: - - name: Ask for approval for CTHUB release-${{ env.VERSION }} Test deployment + - name: Ask for approval for CTHUB release-${{ env.VERSION }}-${{ env.PRE_RELEASE }} Test deployment uses: trstringer/manual-approval@v1.6.0 with: secret: ${{ github.TOKEN }} From e22290771c513f3d0acdeaae976ec850591b5cac Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 2 Apr 2024 11:58:04 -0700 Subject: [PATCH 115/152] add prod pipeline --- .github/workflows/prod-ci.yaml | 97 ++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 .github/workflows/prod-ci.yaml diff --git a/.github/workflows/prod-ci.yaml b/.github/workflows/prod-ci.yaml new file mode 100644 index 00000000..047bffed --- /dev/null +++ b/.github/workflows/prod-ci.yaml @@ -0,0 +1,97 @@ +## For each release, the value of workflow name, branches and VERSION need to be adjusted accordingly + +name: CTHUB 0.2.0 Prod CI +on: + workflow_dispatch: + +env: + VERSION: 0.2.0 + GIT_URL: https://github.com/bcgov/cthub.git + TEST_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-test + PROD_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-prod + + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + + set-pre-release: + name: Find Test deployment pre-release number + runs-on: ubuntu-latest + + outputs: + output1: ${{ steps.set-pre-release.outputs.PRE_RELEASE }} + + steps: + + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.3 + with: + openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ env.TEST_NAMESPACE }} + + - id: set-pre-release + run: | + echo "PRE_RELEASE=$(oc -n ${{ env.TEST_NAMESPACE }} describe deployment/cthub-test-frontend | grep Image | awk -F '-' '{print $NF}')" >> $GITHUB_OUTPUT + + deploy: + + name: Deploy CTHUB on Prod + runs-on: ubuntu-latest + timeout-minutes: 60 + needs: set-pre-release + + env: + PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} + + steps: + - name: Ask for approval for CTHUB release-${{ env.VERSION }}-${{ env.PRE_RELEASE }} PRODUCTION deployment + uses: trstringer/manual-approval@v1.6.0 + with: + secret: ${{ github.TOKEN }} + approvers: emi-hi,kuanfandevops,tim738745,JulianForeman + minimum-approvals: 2 + issue-title: "CTHUB release-${{ env.VERSION }}-${{ env.PRE_RELEASE }} PRODUCTION Deployment" + + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.3 + with: + openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ env.TEST_NAMESPACE }} + + - name: Tag CTHUB images to Test + run: | + oc tag ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.PROD_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} + oc tag ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.PROD_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} + + - name: Checkout Manifest repository + uses: actions/checkout@v4.1.1 + with: + repository: bcgov-c/tenant-gitops-30b186 + ref: main + ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} + + - name: Update frontend tag + uses: mikefarah/yq@v4.40.5 + with: + cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml + + - name: Update backend tag + uses: mikefarah/yq@v4.40.5 + with: + cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml + + - name: GitHub Commit & Push + run: | + git config --global user.email "actions@github.com" + git config --global user.name "GitHub Actions" + git add cthub/values-prod.yaml + git commit -m "Update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }} on PRODUCTION" + git push + \ No newline at end of file From beee697170f180266b4c2163b94d6fef78a339eb Mon Sep 17 00:00:00 2001 From: JulianForeman <71847719+JulianForeman@users.noreply.github.com> Date: Tue, 2 Apr 2024 14:12:39 -0700 Subject: [PATCH 116/152] Added more styling options to loading component, loading component now displays instead of the upload button on click until the upload is finished. Added state variable for page refreshes instead of using loading. (#264) --- django/api/viewsets/upload.py | 10 ---- frontend/src/app/components/Loading.js | 16 +++-- frontend/src/uploads/UploadContainer.js | 60 ++++++++++++------- frontend/src/uploads/components/UploadPage.js | 26 ++++---- 4 files changed, 60 insertions(+), 52 deletions(-) diff --git a/django/api/viewsets/upload.py b/django/api/viewsets/upload.py index c25e6965..5a70faa8 100644 --- a/django/api/viewsets/upload.py +++ b/django/api/viewsets/upload.py @@ -10,16 +10,6 @@ from django.utils.decorators import method_decorator from api.decorators.permission import check_upload_permission from api.models.datasets import Datasets -from api.models.ldv_rebates import LdvRebates -from api.models.public_charging import PublicCharging -from api.models.charger_rebates import ChargerRebates -from api.models.speciality_use_vehicle_incentives import \ - SpecialityUseVehicleIncentives -from api.models.hydrogen_fueling import HydrogrenFueling -from api.models.scrap_it import ScrapIt -from api.models.arc_project_tracking import ARCProjectTracking -from api.models.data_fleets import DataFleets -from api.models.hydrogen_fleets import HydrogenFleets from api.serializers.datasets import DatasetsSerializer from api.services.minio import minio_get_object, minio_remove_object from api.services.datasheet_template_generator import generate_template diff --git a/frontend/src/app/components/Loading.js b/frontend/src/app/components/Loading.js index 02b9b5a3..a6d7fe57 100644 --- a/frontend/src/app/components/Loading.js +++ b/frontend/src/app/components/Loading.js @@ -1,12 +1,10 @@ -import React from 'react' -import { CircularProgress } from '@mui/material' +import React from 'react'; +import { CircularProgress } from '@mui/material'; -const Loading = () => { +const Loading = ({ color = 'inherit' }) => { return ( -
- -
- ) -} + + ); +}; -export default Loading \ No newline at end of file +export default Loading; diff --git a/frontend/src/uploads/UploadContainer.js b/frontend/src/uploads/UploadContainer.js index 4e73b0db..f6b7b75d 100644 --- a/frontend/src/uploads/UploadContainer.js +++ b/frontend/src/uploads/UploadContainer.js @@ -15,6 +15,7 @@ const UploadContainer = () => { const [uploadFiles, setUploadFiles] = useState([]); // array of objects for files to be uploaded const [datasetList, setDatasetList] = useState([{}]); // holds the array of names of datasets const [loading, setLoading] = useState(false); + const [refresh, setRefresh] = useState(false); // Used for page refresh instead of loading progress const [datasetSelected, setDatasetSelected] = useState(''); // string identifying which dataset is being uploaded const [replaceData, setReplaceData] = useState(false); // if true, we will replace all const [alertContent, setAlertContent] = useState(); @@ -27,10 +28,10 @@ const UploadContainer = () => { const axiosDefault = useAxios(true); const refreshList = () => { - setLoading(true); + setRefresh(true); axios.get(ROUTES_UPLOAD.LIST).then((response) => { setDatasetList(response.data); - setLoading(false); + setRefresh(false); axios.get(ROUTES_USERS.CURRENT).then((currentUserResp) => { if (currentUserResp.data.user_permissions.admin === true) { setAdminUser(true); @@ -48,32 +49,44 @@ const UploadContainer = () => { }; const doUpload = () => uploadFiles.forEach((file) => { - axios.get(ROUTES_UPLOAD.MINIO_URL).then((response) => { - const { url: uploadUrl, minio_object_name: filename } = response.data; - axiosDefault.put(uploadUrl, file).then(() => { - let replace = false; - if (replaceData === true) { - replace = true; - } - axios.post(ROUTES_UPLOAD.UPLOAD, { - filename, - datasetSelected, - replace, - }).then((response) => { - setAlert(true); - setAlertSeverity(response.data.success ? 'success' : 'error') - setAlertContent(`${response.data.message}${response.data.errors ? '\nErrors: ' + response.data.errors.join('\n') : ''}`); - }).catch((error) => { - showError(error); + setLoading(true) + const uploadPromises = uploadFiles.map((file) => { + return axios.get(ROUTES_UPLOAD.MINIO_URL).then((response) => { + const { url: uploadUrl, minio_object_name: filename } = response.data; + return axiosDefault.put(uploadUrl, file).then(() => { + let replace = false; + if (replaceData === true) { + replace = true; + } + return axios.post(ROUTES_UPLOAD.UPLOAD, { + filename, + datasetSelected, + replace, + }); }); - }).finally(() => { - setUploadFiles([]); }); + }); + + Promise.all(uploadPromises).then((responses) => { + + const errorCheck = responses.some(response => response.data.success) + + setAlertSeverity(errorCheck ? 'success' : 'error') + + const message = responses.map(response => + `${response.data.message}${response.data.errors ? '\nErrors: ' + response.data.errors.join('\n') : ''}` + ).join('\n'); + + setAlertContent(message); + setAlert(true); + setUploadFiles([]); }).catch((error) => { - const { response: errorResponse } = error; showError(error); + }).finally(() => { + setLoading(false); }); }); + const downloadSpreadsheet = () => { axios.get(ROUTES_UPLOAD.DOWNLOAD_SPREADSHEET, { params: { @@ -118,7 +131,7 @@ const UploadContainer = () => { refreshList(true); }, []); - if (loading) { + if (refresh) { return ; } @@ -158,6 +171,7 @@ const UploadContainer = () => { handleRadioChange={handleRadioChange} downloadSpreadsheet={downloadSpreadsheet} setAlert={setAlert} + loading={loading} />
{adminUser && ( diff --git a/frontend/src/uploads/components/UploadPage.js b/frontend/src/uploads/components/UploadPage.js index 972f35ea..d64b70f8 100644 --- a/frontend/src/uploads/components/UploadPage.js +++ b/frontend/src/uploads/components/UploadPage.js @@ -5,6 +5,7 @@ import { } from '@mui/material'; import UploadIcon from '@mui/icons-material/Upload'; import FileDropArea from './FileDropArea'; +import Loading from '../../app/components/Loading'; const UploadPage = (props) => { const { @@ -19,6 +20,7 @@ const UploadPage = (props) => { handleRadioChange, downloadSpreadsheet, setAlert, + loading, } = props; const selectionList = datasetList.map((obj, index) => ( @@ -78,16 +80,20 @@ const UploadPage = (props) => { />
- + {loading ? ( + + ) : ( + + )} From dc677c411c9a6298ef61daca1efe3b922e287e34 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 4 Apr 2024 11:53:37 -0700 Subject: [PATCH 117/152] add image cleanup workflow --- .github/workflows/cleanup-imagetags.yaml | 68 ++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 .github/workflows/cleanup-imagetags.yaml diff --git a/.github/workflows/cleanup-imagetags.yaml b/.github/workflows/cleanup-imagetags.yaml new file mode 100644 index 00000000..de5a16c3 --- /dev/null +++ b/.github/workflows/cleanup-imagetags.yaml @@ -0,0 +1,68 @@ +name: Scheduled cleanup unused images +on: + workflow_dispatch: + schedule: + - cron: '0 0 * * 0' +# At 00:00 on Sunday. + +jobs: + + cleanup-images: + runs-on: ubuntu-latest + steps: + - name: cleanup-images + run: | + + #!/bin/bash + + # This script will delete all image tags for both frontend and backend except the one being referenced + + # The sample of search_string is cthub-backend:0.2.0-20240403221450 + # The sample of oc_output could include + # cthub-backend:0.2.0-20240403210040 + # cthub-backend:0.2.0-20240403211844 + # cthub-backend:0.2.0-20240403221450 + # The script will remove the first two image tags + + delete_resources() { + local search_string="$1" + local oc_output="$2" + local namepace="$3" + + # Check if the oc_output is empty + if [ -z "$oc_output" ]; then + echo "Error: No output provided." + return 1 + fi + + # Loop through each line in the oc output + while IFS= read -r line; do + # Check if the line contains the search string + if [[ "$line" != *"$search_string"* ]]; then + # Extract the name of the resource from the line + resource_name=$(echo "$line" | awk '{print $1}') + # Delete the resource + oc -n "$namepace" delete imagetag/"$resource_name" + echo "deleted $line" + fi + done <<< "$oc_output" + } + + # Define the search string + search_string=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev describe deploymentconfig/cthub-dev-backend | grep Image | awk -F '/' '{print $NF}') + # Run the oc command and store the output in a variable + oc_output=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev get imagetags | grep cthub-backend | awk '{print $1}') + namespace="${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev" + delete_resources "$search_string" "$oc_output" "$namespace" + + + # Define the search string + search_string=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev describe deployment/cthub-dev-frontend | grep Image | awk -F '/' '{print $NF}') + # Run the oc command and store the output in a variable + oc_output=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev get imagetags | grep cthub-frontend | awk '{print $1}') + namespace="${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev" + delete_resources "$search_string" "$oc_output" "$namespace" + + + oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-frontend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag + oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag \ No newline at end of file From 198372b9fbbca6d16454e55c668c22d54a19f9d6 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 4 Apr 2024 11:59:16 -0700 Subject: [PATCH 118/152] login first --- .github/workflows/cleanup-imagetags.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/cleanup-imagetags.yaml b/.github/workflows/cleanup-imagetags.yaml index de5a16c3..b1217f79 100644 --- a/.github/workflows/cleanup-imagetags.yaml +++ b/.github/workflows/cleanup-imagetags.yaml @@ -10,6 +10,15 @@ jobs: cleanup-images: runs-on: ubuntu-latest steps: + + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.3 + with: + openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ env.TOOLS_NAMESPACE }} + - name: cleanup-images run: | From c160949034e037acb28fd7d32b77b01e5f042018 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 4 Apr 2024 12:05:49 -0700 Subject: [PATCH 119/152] update image cleanup messages --- .github/workflows/cleanup-imagetags.yaml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cleanup-imagetags.yaml b/.github/workflows/cleanup-imagetags.yaml index b1217f79..d88ab44c 100644 --- a/.github/workflows/cleanup-imagetags.yaml +++ b/.github/workflows/cleanup-imagetags.yaml @@ -10,7 +10,7 @@ jobs: cleanup-images: runs-on: ubuntu-latest steps: - + - name: Log in to Openshift uses: redhat-actions/oc-login@v1.3 with: @@ -52,7 +52,6 @@ jobs: resource_name=$(echo "$line" | awk '{print $1}') # Delete the resource oc -n "$namepace" delete imagetag/"$resource_name" - echo "deleted $line" fi done <<< "$oc_output" } @@ -62,16 +61,16 @@ jobs: # Run the oc command and store the output in a variable oc_output=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev get imagetags | grep cthub-backend | awk '{print $1}') namespace="${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev" + echo "Will delete all cthub-bakcend image tags in ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev except search_string" delete_resources "$search_string" "$oc_output" "$namespace" - # Define the search string search_string=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev describe deployment/cthub-dev-frontend | grep Image | awk -F '/' '{print $NF}') # Run the oc command and store the output in a variable oc_output=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev get imagetags | grep cthub-frontend | awk '{print $1}') + echo "Will delete all cthub-frontend image tags in ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev except search_string" namespace="${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev" delete_resources "$search_string" "$oc_output" "$namespace" - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-frontend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag \ No newline at end of file From 0c0c3c142f0dab69520adad70fedf0ec2ff93a0c Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 4 Apr 2024 12:07:11 -0700 Subject: [PATCH 120/152] coeect var usage --- .github/workflows/cleanup-imagetags.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cleanup-imagetags.yaml b/.github/workflows/cleanup-imagetags.yaml index d88ab44c..a7bf9463 100644 --- a/.github/workflows/cleanup-imagetags.yaml +++ b/.github/workflows/cleanup-imagetags.yaml @@ -61,14 +61,14 @@ jobs: # Run the oc command and store the output in a variable oc_output=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev get imagetags | grep cthub-backend | awk '{print $1}') namespace="${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev" - echo "Will delete all cthub-bakcend image tags in ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev except search_string" + echo "Will delete all cthub-bakcend image tags in ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev except $search_string" delete_resources "$search_string" "$oc_output" "$namespace" # Define the search string search_string=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev describe deployment/cthub-dev-frontend | grep Image | awk -F '/' '{print $NF}') # Run the oc command and store the output in a variable oc_output=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev get imagetags | grep cthub-frontend | awk '{print $1}') - echo "Will delete all cthub-frontend image tags in ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev except search_string" + echo "Will delete all cthub-frontend image tags in ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev except $search_string" namespace="${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev" delete_resources "$search_string" "$oc_output" "$namespace" From 3fd442a540afa81d4dbfa83dc6ce27943f326882 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 4 Apr 2024 12:12:42 -0700 Subject: [PATCH 121/152] add continue-on-error: true --- .github/workflows/cleanup-imagetags.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/cleanup-imagetags.yaml b/.github/workflows/cleanup-imagetags.yaml index a7bf9463..94d19522 100644 --- a/.github/workflows/cleanup-imagetags.yaml +++ b/.github/workflows/cleanup-imagetags.yaml @@ -20,6 +20,7 @@ jobs: namespace: ${{ env.TOOLS_NAMESPACE }} - name: cleanup-images + continue-on-error: true run: | #!/bin/bash From 247f60a35c6dd8b0d2bd688fdd56ec809111ab8b Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 4 Apr 2024 12:16:36 -0700 Subject: [PATCH 122/152] echo message before delete tool images --- .github/workflows/cleanup-imagetags.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cleanup-imagetags.yaml b/.github/workflows/cleanup-imagetags.yaml index 94d19522..4f776cd9 100644 --- a/.github/workflows/cleanup-imagetags.yaml +++ b/.github/workflows/cleanup-imagetags.yaml @@ -72,6 +72,7 @@ jobs: echo "Will delete all cthub-frontend image tags in ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev except $search_string" namespace="${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev" delete_resources "$search_string" "$oc_output" "$namespace" - + + echo "will delete images in tools env" oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-frontend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag \ No newline at end of file From 493f4fe6fae2927f624ccdcda6a72e2e6b3505d6 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 4 Apr 2024 12:17:45 -0700 Subject: [PATCH 123/152] comment tools env image cleanup --- .github/workflows/cleanup-imagetags.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cleanup-imagetags.yaml b/.github/workflows/cleanup-imagetags.yaml index 4f776cd9..e9088775 100644 --- a/.github/workflows/cleanup-imagetags.yaml +++ b/.github/workflows/cleanup-imagetags.yaml @@ -74,5 +74,5 @@ jobs: delete_resources "$search_string" "$oc_output" "$namespace" echo "will delete images in tools env" - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-frontend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag \ No newline at end of file + # oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-frontend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag + # oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag \ No newline at end of file From 897ddbd80dfa42e6850f8c29a14a92044f7e228c Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 4 Apr 2024 12:28:40 -0700 Subject: [PATCH 124/152] update cleanuo script --- .github/workflows/cleanup-imagetags.yaml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cleanup-imagetags.yaml b/.github/workflows/cleanup-imagetags.yaml index e9088775..e88305a2 100644 --- a/.github/workflows/cleanup-imagetags.yaml +++ b/.github/workflows/cleanup-imagetags.yaml @@ -74,5 +74,13 @@ jobs: delete_resources "$search_string" "$oc_output" "$namespace" echo "will delete images in tools env" - # oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-frontend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag - # oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag \ No newline at end of file + oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-frontend | awk '{print $1}' + if [[ $? != 0 ]]; then + oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-frontend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag + fi + + oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}' + if [[ $? != 0 ]]; then + oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag + fi + \ No newline at end of file From aa8d4f91f6d56804fdedd294ebb9ec50a314a2ef Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 4 Apr 2024 12:34:13 -0700 Subject: [PATCH 125/152] update cleanup script --- .github/workflows/cleanup-imagetags.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cleanup-imagetags.yaml b/.github/workflows/cleanup-imagetags.yaml index e88305a2..41f6ba12 100644 --- a/.github/workflows/cleanup-imagetags.yaml +++ b/.github/workflows/cleanup-imagetags.yaml @@ -74,13 +74,13 @@ jobs: delete_resources "$search_string" "$oc_output" "$namespace" echo "will delete images in tools env" - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-frontend | awk '{print $1}' - if [[ $? != 0 ]]; then + frontendimages=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-frontend | awk '{print $1}') + if [ -z "$frontendimages" ]; then oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-frontend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag fi - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}' - if [[ $? != 0 ]]; then + backendimages=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}') + if [ -z "$backendimages" ]; then oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag fi \ No newline at end of file From ae02852bfe47f68f67299c0893bf5a23e60ce391 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Thu, 4 Apr 2024 12:47:20 -0700 Subject: [PATCH 126/152] update test --- .github/workflows/cleanup-imagetags.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cleanup-imagetags.yaml b/.github/workflows/cleanup-imagetags.yaml index 41f6ba12..575cad39 100644 --- a/.github/workflows/cleanup-imagetags.yaml +++ b/.github/workflows/cleanup-imagetags.yaml @@ -75,12 +75,12 @@ jobs: echo "will delete images in tools env" frontendimages=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-frontend | awk '{print $1}') - if [ -z "$frontendimages" ]; then + if [ ! -z "$frontendimages" ]; then oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-frontend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag fi backendimages=$(oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}') - if [ -z "$backendimages" ]; then + if [ ! -z "$backendimages" ]; then oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag fi \ No newline at end of file From 7819e28aec0fa4ecdbfada787c6063594f9a6772 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Fri, 5 Apr 2024 10:50:41 -0700 Subject: [PATCH 127/152] further clean up pods, builds and buildconfigs --- .github/workflows/cleanup-imagetags.yaml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cleanup-imagetags.yaml b/.github/workflows/cleanup-imagetags.yaml index 575cad39..63c10c50 100644 --- a/.github/workflows/cleanup-imagetags.yaml +++ b/.github/workflows/cleanup-imagetags.yaml @@ -83,4 +83,13 @@ jobs: if [ ! -z "$backendimages" ]; then oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag fi - \ No newline at end of file + + echo "Cleaning up Completed pods on Dev" + oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev get pods | grep Completed | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev delete pod + + echo "Cleaning up Complete and Failed builds on Tools" + oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get builds | grep Complete | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete build + oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get builds | grep Failed | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete build + + echo "Cleaning up buildconfigs on Tools" + oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get buildconfig | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete buildconfig From e6f69a6153df6f265e88913d406a86c305665df4 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Fri, 5 Apr 2024 10:55:51 -0700 Subject: [PATCH 128/152] except crunchy pods --- .github/workflows/cleanup-imagetags.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cleanup-imagetags.yaml b/.github/workflows/cleanup-imagetags.yaml index 63c10c50..9eb6bec8 100644 --- a/.github/workflows/cleanup-imagetags.yaml +++ b/.github/workflows/cleanup-imagetags.yaml @@ -84,8 +84,8 @@ jobs: oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get imagetags | grep cthub-backend | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete imagetag fi - echo "Cleaning up Completed pods on Dev" - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev get pods | grep Completed | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev delete pod + echo "Cleaning up Completed pods on Dev except CrunchyDB pods" + oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev get pods | grep Completed | grep -v crunchy | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev delete pod echo "Cleaning up Complete and Failed builds on Tools" oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get builds | grep Complete | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete build From 1d4deffcd2c65a706078e1398fd1cf69ac95dc09 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Fri, 5 Apr 2024 11:03:58 -0700 Subject: [PATCH 129/152] ignore cleanup errors --- .github/workflows/cleanup-imagetags.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cleanup-imagetags.yaml b/.github/workflows/cleanup-imagetags.yaml index 9eb6bec8..b8a5c64d 100644 --- a/.github/workflows/cleanup-imagetags.yaml +++ b/.github/workflows/cleanup-imagetags.yaml @@ -85,11 +85,11 @@ jobs: fi echo "Cleaning up Completed pods on Dev except CrunchyDB pods" - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev get pods | grep Completed | grep -v crunchy | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev delete pod + oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev get pods | grep Completed | grep -v crunchy | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev delete pod || true echo "Cleaning up Complete and Failed builds on Tools" - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get builds | grep Complete | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete build - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get builds | grep Failed | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete build + oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get builds | grep Complete | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete build || true + oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get builds | grep Failed | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete build || true echo "Cleaning up buildconfigs on Tools" - oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get buildconfig | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete buildconfig + oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools get buildconfig | awk '{print $1}' | xargs oc -n ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools delete buildconfig || true From fe184010c832f421fd06f4952621d913056a5b8e Mon Sep 17 00:00:00 2001 From: Emily <44536222+emi-hi@users.noreply.github.com> Date: Mon, 8 Apr 2024 10:08:17 -0700 Subject: [PATCH 130/152] feat: updates radio text and order (#274) --- frontend/src/uploads/UploadContainer.js | 2 +- frontend/src/uploads/components/UploadPage.js | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/frontend/src/uploads/UploadContainer.js b/frontend/src/uploads/UploadContainer.js index f6b7b75d..44532f6a 100644 --- a/frontend/src/uploads/UploadContainer.js +++ b/frontend/src/uploads/UploadContainer.js @@ -33,7 +33,7 @@ const UploadContainer = () => { setDatasetList(response.data); setRefresh(false); axios.get(ROUTES_USERS.CURRENT).then((currentUserResp) => { - if (currentUserResp.data.user_permissions.admin === true) { + if (currentUserResp.data && currentUserResp.data.user_permissions && currentUserResp.data.user_permissions.admin === true) { setAdminUser(true); setCurrentUser(currentUserResp.data.idir); } diff --git a/frontend/src/uploads/components/UploadPage.js b/frontend/src/uploads/components/UploadPage.js index d64b70f8..330ff4a4 100644 --- a/frontend/src/uploads/components/UploadPage.js +++ b/frontend/src/uploads/components/UploadPage.js @@ -55,18 +55,19 @@ const UploadPage = (props) => { value={replaceData ? "replace" : "add"} name="radio-buttons-group" onChange={handleRadioChange} + defaultValue="add" > } - label="Replace existing data" + label="Add to existing data (default)" /> } - label="Add to existing data" + label="Replace existing data (data cannot be restored, proceed only if you are certain that the new file contains all required data)." /> From 3215248ac4934b0259c59b4c2e4763fe60fec31e Mon Sep 17 00:00:00 2001 From: JulianForeman <71847719+JulianForeman@users.noreply.github.com> Date: Mon, 8 Apr 2024 13:40:22 -0700 Subject: [PATCH 131/152] Clearing text box on button click & sorting list when adding users (#275) --- frontend/src/users/UsersContainer.js | 2 ++ frontend/src/users/components/UsersPage.js | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/frontend/src/users/UsersContainer.js b/frontend/src/users/UsersContainer.js index b9614b3c..5f0381bc 100644 --- a/frontend/src/users/UsersContainer.js +++ b/frontend/src/users/UsersContainer.js @@ -33,8 +33,10 @@ const UsersContainer = (props) => { setUsers( produce((draft) => { draft.push(userObject); + draft.sort((a, b) => a.idir.localeCompare(b.idir)); }), ); + setNewUser('') }) .catch((error) => { setMessageSeverity('error'); diff --git a/frontend/src/users/components/UsersPage.js b/frontend/src/users/components/UsersPage.js index f35f7e3d..c6dfea5d 100644 --- a/frontend/src/users/components/UsersPage.js +++ b/frontend/src/users/components/UsersPage.js @@ -66,7 +66,7 @@ const UsersPage = (props) => { - { setNewUser(event.target.value); setMessage(''); }} /> + { setNewUser(event.target.value); setMessage(''); }} /> From d7c859276f6bf8298496e4c46797b38eaf1646de Mon Sep 17 00:00:00 2001 From: tim738745 <98717409+tim738745@users.noreply.github.com> Date: Wed, 10 Apr 2024 16:13:47 -0700 Subject: [PATCH 132/152] chore: 217 - install formatters and do initial format (#277) * add formatters * format python code with black * format js code with prettier --- django/api/apps.py | 2 +- django/api/asgi.py | 2 +- django/api/constants.py | 207 ++++++---- django/api/decorators/permission.py | 16 +- django/api/filters/icbc_data.py | 11 +- django/api/filters/order_by.py | 51 ++- django/api/keycloak_authentication.py | 44 +- django/api/management/commands/decode_vin.py | 8 +- .../commands/import_arc_project_tracking.py | 27 +- .../commands/import_charger_rebates.py | 23 +- .../management/commands/import_data_fleets.py | 23 +- .../commands/import_hydrogen_fleets.py | 23 +- .../commands/import_hydrogen_fueling.py | 23 +- .../management/commands/import_ldv_rebates.py | 23 +- .../commands/import_public_charging.py | 23 +- .../management/commands/import_scrap_it.py | 23 +- django/api/management/commands/import_suvi.py | 23 +- django/api/migrations/0001_initial.py | 383 +++++++++++++----- django/api/migrations/0002_ldvrebates.py | 132 ++++-- .../migrations/0003_vindecodedinformation.py | 43 +- .../api/migrations/0004_auto_20211116_1813.py | 42 +- .../0005_vindecodedinformation_vin.py | 6 +- .../0006_specialityusevehicleincentives.py | 59 ++- django/api/migrations/0007_datasets.py | 29 +- django/api/migrations/0008_chargerrebates.py | 66 ++- django/api/migrations/0009_publiccharging.py | 90 ++-- .../0010_chargerrebates_fix_columns.py | 14 +- .../api/migrations/0011_auto_20211216_1944.py | 6 +- .../api/migrations/0012_hydrogrenfueling.py | 92 +++-- django/api/migrations/0013_hydrogenfleets.py | 75 ++-- django/api/migrations/0014_datafleets.py | 136 +++++-- .../api/migrations/0015_arcprojecttracking.py | 78 ++-- django/api/migrations/0016_scrapit.py | 77 +++- .../api/migrations/0017_whitelistedusers.py | 29 +- .../api/migrations/0018_auto_20231201_2301.py | 8 +- .../api/migrations/0019_auto_20240223_1820.py | 101 +++-- .../api/migrations/0020_auto_20240311_2136.py | 26 +- .../api/migrations/0021_auto_20240326_2152.py | 6 +- django/api/models/__init__.py | 2 +- django/api/models/arc_project_tracking.py | 86 +--- django/api/models/charger_rebates.py | 69 +--- django/api/models/credit_class.py | 9 +- django/api/models/data_fleets.py | 135 ++---- django/api/models/datasets.py | 2 +- django/api/models/hydrogen_fleets.py | 91 +---- django/api/models/hydrogen_fueling.py | 101 +---- django/api/models/icbc_registration_data.py | 14 +- django/api/models/icbc_upload_date.py | 8 +- django/api/models/icbc_vehicle.py | 29 +- django/api/models/ldv_rebates.py | 123 ++---- django/api/models/mixins/effective_dates.py | 10 +- django/api/models/mixins/named.py | 16 +- django/api/models/organization.py | 20 +- django/api/models/permission.py | 10 +- django/api/models/public_charging.py | 87 +--- django/api/models/scrap_it.py | 67 +-- .../speciality_use_vehicle_incentives.py | 44 +- django/api/models/user.py | 6 +- django/api/models/user_permission.py | 16 +- django/api/models/vehicle.py | 68 +--- django/api/models/vehicle_class.py | 7 +- django/api/models/vehicle_zev_type.py | 7 +- django/api/models/vin_decoded_information.py | 32 +- django/api/pagination.py | 4 +- django/api/serializers/datasets.py | 3 +- .../api/serializers/icbc_registration_data.py | 4 +- django/api/serializers/icbc_vehicle.py | 9 +- django/api/serializers/permission.py | 6 +- django/api/serializers/user.py | 22 +- .../services/datasheet_template_generator.py | 45 +- django/api/services/ldv_rebates.py | 44 +- django/api/services/minio.py | 8 +- django/api/services/permissions.py | 7 +- django/api/services/spreadsheet_uploader.py | 107 +++-- .../api/services/spreadsheet_uploader_prep.py | 79 ++-- django/api/services/user.py | 4 +- django/api/services/vin_decoder.py | 69 ++-- django/api/services/vin_decoder_old.py | 28 +- django/api/settings.py | 196 ++++----- django/api/tests/base_test.py | 5 +- django/api/urls.py | 21 +- django/api/viewsets/icbc_data.py | 17 +- django/api/viewsets/minio.py | 10 +- django/api/viewsets/upload.py | 80 ++-- django/api/viewsets/user.py | 33 +- django/api/wsgi.py | 2 +- django/requirements.txt | 1 + frontend/package.json | 1 + frontend/src/app/components/AlertDialog.js | 44 +- frontend/src/app/components/App.js | 30 +- frontend/src/app/components/Footer.js | 80 ++-- frontend/src/app/components/Header.js | 40 +- .../src/app/components/KeycloakProvider.js | 33 +- frontend/src/app/components/Layout.js | 14 +- frontend/src/app/components/Loading.js | 10 +- frontend/src/app/components/Login.js | 23 +- frontend/src/app/components/Logout.js | 20 +- frontend/src/app/components/ReactTable.js | 214 +++++----- .../app/components/ReactTablePagination.js | 33 +- frontend/src/app/styles/App.scss | 24 +- frontend/src/app/styles/FileUpload.scss | 9 +- frontend/src/app/styles/Footer.scss | 115 +++--- frontend/src/app/styles/Header.scss | 166 ++++---- frontend/src/app/styles/Login.scss | 23 +- frontend/src/app/styles/ReactTable.scss | 6 +- frontend/src/app/styles/Roboto.scss | 14 +- frontend/src/app/styles/Users.scss | 18 +- frontend/src/app/styles/index.scss | 18 +- frontend/src/app/styles/variables.scss | 2 +- frontend/src/app/utilities/getFileSize.js | 7 +- frontend/src/app/utilities/props.js | 2 +- frontend/src/app/utilities/reactTable.js | 12 +- frontend/src/app/utilities/useAxios.js | 32 +- frontend/src/app/utilities/useKeycloak.js | 12 +- frontend/src/contexts.js | 4 +- frontend/src/dashboard/DashboardContainer.js | 4 +- frontend/src/dashboard/router.js | 11 +- frontend/src/icbc_data/IcbcDataContainer.js | 14 +- .../src/icbc_data/components/IcbcDataTable.js | 77 ++-- frontend/src/icbc_data/router.js | 16 +- frontend/src/icbc_data/routes.js | 2 +- frontend/src/index.js | 30 +- frontend/src/uploads/UploadContainer.js | 197 +++++---- frontend/src/uploads/components/FileDrop.js | 36 +- .../src/uploads/components/FileDropArea.js | 47 +-- frontend/src/uploads/components/UploadPage.js | 60 ++- frontend/src/uploads/router.js | 16 +- frontend/src/uploads/routes.js | 6 +- frontend/src/users/UsersContainer.js | 102 ++--- frontend/src/users/components/UsersPage.js | 132 ++++-- frontend/src/users/routes.js | 2 +- 131 files changed, 2924 insertions(+), 2747 deletions(-) diff --git a/django/api/apps.py b/django/api/apps.py index d87006dd..14b89a82 100644 --- a/django/api/apps.py +++ b/django/api/apps.py @@ -2,4 +2,4 @@ class ApiConfig(AppConfig): - name = 'api' + name = "api" diff --git a/django/api/asgi.py b/django/api/asgi.py index 82cb1c14..1bc5b37e 100644 --- a/django/api/asgi.py +++ b/django/api/asgi.py @@ -11,6 +11,6 @@ from django.core.asgi import get_asgi_application -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api.settings') +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.settings") application = get_asgi_application() diff --git a/django/api/constants.py b/django/api/constants.py index d461598c..ce0ddef6 100644 --- a/django/api/constants.py +++ b/django/api/constants.py @@ -13,8 +13,15 @@ from api.models.public_charging import PublicCharging from api.models.scrap_it import ScrapIt from api.models.speciality_use_vehicle_incentives import SpecialityUseVehicleIncentives -from api.services.spreadsheet_uploader_prep import prepare_arc_project_tracking, prepare_hydrogen_fleets, prepare_hydrogen_fueling, prepare_ldv_rebates, prepare_public_charging, prepare_scrap_it, prepare_speciality_use_vehicle_incentives - +from api.services.spreadsheet_uploader_prep import ( + prepare_arc_project_tracking, + prepare_hydrogen_fleets, + prepare_hydrogen_fueling, + prepare_ldv_rebates, + prepare_public_charging, + prepare_scrap_it, + prepare_speciality_use_vehicle_incentives, +) class ARCProjectTrackingColumns(Enum): @@ -34,6 +41,7 @@ class ARCProjectTrackingColumns(Enum): FUEL_TYPE = "Fuel Type" PUBLICLY_ANNOUNCED = "Publicly Announced" + class ArcProjectTrackingColumnMapping(Enum): funding_call = "Funding Call" proponent = "Proponent" @@ -51,6 +59,7 @@ class ArcProjectTrackingColumnMapping(Enum): fuel_type = "Fuel Type" publicly_announced = "Publicly Announced" + class EVChargingRebatesColumns(Enum): ORGANIZATION = "Organization" REGION = "Region" @@ -63,6 +72,7 @@ class EVChargingRebatesColumns(Enum): BC_EMPR_FUNDING_ANTICIPATED = "B.C. (EMPR) Funding Anticipated (Max $25,000 per station, excludes MOTI stations) (Not all funding paid out yet as depends on station completion)" NOTES = "Notes" + class EVChargingRebatesColumnMapping(Enum): organization = "Organization" region = "Region" @@ -95,15 +105,24 @@ class DataFleetsColumns(Enum): AVERAGE_DAILY_TRAVEL_DISTANCE = "Average daily travel distance?" WHICH_COMPONENT_ARE_YOU_APPLYING_FOR = "Which component are you applying for?*" ESTIMATED_COST = "Estimated cost" - WHICH_TYPE_OF_CHARGER_ARE_YOU_INSTALLING = "Which type of charger are you installing?" - HOW_MANY_LEVEL_2_CHARGING_STATIONS = "How many Level 2 Charging Stations are you applying for" - HOW_MANY_LEVEL_3_DC_FAST_CHARGING_STATIONS = "How many Level 3/DC Fast Charging Stations are you applying for" - APPLICATION_FORM_FLEETS_COMPLETION_DATE_TIME = '"Application Form Fleets" completion date/time' + WHICH_TYPE_OF_CHARGER_ARE_YOU_INSTALLING = ( + "Which type of charger are you installing?" + ) + HOW_MANY_LEVEL_2_CHARGING_STATIONS = ( + "How many Level 2 Charging Stations are you applying for" + ) + HOW_MANY_LEVEL_3_DC_FAST_CHARGING_STATIONS = ( + "How many Level 3/DC Fast Charging Stations are you applying for" + ) + APPLICATION_FORM_FLEETS_COMPLETION_DATE_TIME = ( + '"Application Form Fleets" completion date/time' + ) PRE_APPROVAL_DATE = "Pre-Approval Date" DEADLINE = "Deadline" APPLICATION_NUMBER = "Application Number" POTENTIAL_REBATE = "Potential Rebate" + class DataFleetsColumnMapping(Enum): current_stage = "Current Stage" rebate_value = "Rebate Value" @@ -124,14 +143,21 @@ class DataFleetsColumnMapping(Enum): component_being_applied_for = "Which component are you applying for?*" estimated_cost = "Estimated cost" type_of_charger_being_installed = "Which type of charger are you installing?" - number_of_level_2_charging_stations_being_applied_for = "How many Level 2 Charging Stations are you applying for" - number_of_level_3_dc_fast_charging_stations_being_applied_for = "How many Level 3/DC Fast Charging Stations are you applying for" - application_form_fleets_completion_date_time = '"Application Form Fleets" completion date/time' + number_of_level_2_charging_stations_being_applied_for = ( + "How many Level 2 Charging Stations are you applying for" + ) + number_of_level_3_dc_fast_charging_stations_being_applied_for = ( + "How many Level 3/DC Fast Charging Stations are you applying for" + ) + application_form_fleets_completion_date_time = ( + '"Application Form Fleets" completion date/time' + ) pre_approval_date = "Pre-Approval Date" deadline = "Deadline" application_number = "Application Number" potential_rebate = "Potential Rebate" + class HydrogenFleetsColumns(Enum): APPLICATION_NUMBER = "Application #" FLEET_NUMBER = "Fleet #" @@ -149,6 +175,7 @@ class HydrogenFleetsColumns(Enum): DEALER_NAME = "Dealer Name" REBATE_AMOUNT = "Rebate Amount" + class HydrogenFleetsColumnMapping(Enum): application_number = "Application #" fleet_number = "Fleet #" @@ -166,6 +193,7 @@ class HydrogenFleetsColumnMapping(Enum): dealer_name = "Dealer Name" rebate_amount = "Rebate Amount" + class HydrogenFuelingColumns(Enum): STATION_NUMBER = "Station Number" RFP_CLOSE_DATE = "RFP Close Date" @@ -186,6 +214,7 @@ class HydrogenFuelingColumns(Enum): OPENING_DATE = "Opening Date" TOTAL_CAPITAL_COST = "Total Capital Cost" + class HydrogenFuelingColumnMapping(Enum): station_number = "Station Number" rfp_close_date = "RFP Close Date" @@ -206,6 +235,7 @@ class HydrogenFuelingColumnMapping(Enum): opening_date = "Opening Date" total_capital_cost = "Total Capital Cost" + class LDVRebatesColumns(Enum): CASL_CONSENT = "CASL Consent" DATE_APPROVED = "DATE APPROVED" @@ -235,6 +265,7 @@ class LDVRebatesColumns(Enum): DELIVERED = "Delivered" CONSENT_TO_CONTACT = "Consent to Contact" + class LdvRebatesColumnMapping(Enum): casl_consent = "CASL Consent" date_approved = "DATE APPROVED" @@ -284,6 +315,7 @@ class PublicChargingColumns(Enum): REVIEW_NUMBER = "Review Number" PAID_OUT_REBATE_AMOUNT = "Paid out rebate amount" + class PublicChargingColumnMapping(Enum): applicant_name = "Applicant Name" address = "Address" @@ -303,6 +335,7 @@ class PublicChargingColumnMapping(Enum): review_number = "Review Number" rebate_paid = "Paid out rebate amount" + class ScrapItColumns(Enum): APPROVAL_NUM = "Approval Num" APP_RECVD_DATE = "App Recv'd Date" @@ -316,6 +349,7 @@ class ScrapItColumns(Enum): BUDGET_CODE = "Budget Code" SCRAP_DATE = "Scrap Date" + class ScrapItColumnMapping(Enum): approval_number = "Approval Num" application_received_date = "App Recv'd Date" @@ -329,6 +363,7 @@ class ScrapItColumnMapping(Enum): budget_code = "Budget Code" scrap_date = "Scrap Date" + class SpecialityUseVehicleIncentiveProgramColumns(Enum): APPROVALS = "Approvals" DATE = "Date" @@ -342,6 +377,7 @@ class SpecialityUseVehicleIncentiveProgramColumns(Enum): MANUFACTURER = "Manufacturer" MODEL = "Model" + class SpecialityUseVehicleIncentivesColumnMapping(Enum): approvals = "Approvals" date = "Date" @@ -354,8 +390,9 @@ class SpecialityUseVehicleIncentivesColumnMapping(Enum): manufacturer = "Manufacturer" model = "Model" + FIELD_TYPES = { - 'ARC Project Tracking': { + "ARC Project Tracking": { "funding_call": str, "proponent": str, "reference_number": str, @@ -372,7 +409,7 @@ class SpecialityUseVehicleIncentivesColumnMapping(Enum): "fuel_type": str, "publicly_announced": bool, }, - 'EV Charging Rebates': { + "EV Charging Rebates": { "organization": str, "region": str, "city": str, @@ -384,7 +421,7 @@ class SpecialityUseVehicleIncentivesColumnMapping(Enum): "rebate_paid": float, "notes": str, }, - 'Data Fleets': { + "Data Fleets": { "current_stage": str, "rebate_value": str, "legal_name_of_organization_fleet": str, @@ -412,7 +449,7 @@ class SpecialityUseVehicleIncentivesColumnMapping(Enum): "application_number": str, "potential_rebate": str, }, - 'Hydrogen Fleets': { + "Hydrogen Fleets": { "application_number": int, "fleet_number": int, "application_date": str, @@ -427,9 +464,9 @@ class SpecialityUseVehicleIncentivesColumnMapping(Enum): "year": str, "purchase_date": str, "dealer_name": str, - "rebate_amount": str + "rebate_amount": str, }, - 'Hydrogen Fueling': { + "Hydrogen Fueling": { "station_number": int, "rfp_close_date": datetime.date, "station_name": str, @@ -447,9 +484,9 @@ class SpecialityUseVehicleIncentivesColumnMapping(Enum): "number_of_fueling_positions": int, "operational_date": datetime.date, "opening_date": datetime.date, - "total_capital_cost": Decimal + "total_capital_cost": Decimal, }, - 'LDV Rebates': { + "LDV Rebates": { "casl_consent": bool, "date_approved": str, "submission_id": int, @@ -478,7 +515,7 @@ class SpecialityUseVehicleIncentivesColumnMapping(Enum): "delivered": bool, "consent_to_contact": bool, }, - 'Public Charging': { + "Public Charging": { "applicant_name": str, "address": str, "charging_station_info": str, @@ -497,7 +534,7 @@ class SpecialityUseVehicleIncentivesColumnMapping(Enum): "review_number": int, "rebate_paid": float, }, - 'Scrap It': { + "Scrap It": { "approval_number": int, "application_received_date": str, "completion_date": str, @@ -510,7 +547,7 @@ class SpecialityUseVehicleIncentivesColumnMapping(Enum): "budget_code": str, "scrap_date": str, }, - 'Specialty Use Vehicle Incentive Program': { + "Specialty Use Vehicle Incentive Program": { "approvals": str, "date": str, "applicant_name": str, @@ -521,71 +558,71 @@ class SpecialityUseVehicleIncentivesColumnMapping(Enum): "total_purchase_price": int, "manufacturer": str, "model": str, - } + }, } DATASET_CONFIG = { - 'ARC Project Tracking': { - 'model': ARCProjectTracking, - 'columns': ARCProjectTrackingColumns, - 'column_mapping': ArcProjectTrackingColumnMapping, - 'sheet_name': 'Project_Tracking', - 'preparation_functions': [prepare_arc_project_tracking] - }, - 'EV Charging Rebates': { - 'model': ChargerRebates, - 'columns': EVChargingRebatesColumns, - 'column_mapping': EVChargingRebatesColumnMapping, - 'sheet_name': 'Updated', - 'header_row': 2 - }, - 'Data Fleets': { - 'model': DataFleets, - 'columns': DataFleetsColumns, - 'column_mapping': DataFleetsColumnMapping, - 'sheet_name': 'Data Fleets' - }, - 'Hydrogen Fleets': { - 'model': HydrogenFleets, - 'columns': HydrogenFleetsColumnMapping, - 'column_mapping': HydrogenFleetsColumnMapping, - 'sheet_name': 'Fleets', - 'preparation_functions': [prepare_hydrogen_fleets] - }, - 'Hydrogen Fueling': { - 'model': HydrogrenFueling, - 'columns': HydrogenFuelingColumnMapping, - 'column_mapping': HydrogenFuelingColumnMapping, - 'sheet_name': 'Station_Tracking', - 'preparation_functions': [prepare_hydrogen_fueling] - }, - 'LDV Rebates': { - 'model': LdvRebates, - 'columns': LdvRebatesColumnMapping, - 'sheet_name': 'Raw Data', - 'preparation_functions': [prepare_ldv_rebates] - }, - 'Public Charging': { - 'model': PublicCharging, - 'columns': PublicChargingColumns, - 'column_mapping': PublicChargingColumnMapping, - 'sheet_name': 'Project_applications', - 'header_row': 2, - 'preparation_functions': [prepare_public_charging] - }, - 'Scrap It': { - 'model': ScrapIt, - 'columns': ScrapItColumns, - 'column_mapping': ScrapItColumnMapping, - 'sheet_name': 'TOP OTHER TRANSACTIONS', - 'header_row': 5, - 'preparation_functions': [prepare_scrap_it] - }, - 'Specialty Use Vehicle Incentive Program': { - 'model': SpecialityUseVehicleIncentives, - 'columns': SpecialityUseVehicleIncentiveProgramColumns, - 'column_mapping': SpecialityUseVehicleIncentivesColumnMapping, - 'sheet_name': 'Sheet1', - 'preparation_functions': [prepare_speciality_use_vehicle_incentives] - }, - } \ No newline at end of file + "ARC Project Tracking": { + "model": ARCProjectTracking, + "columns": ARCProjectTrackingColumns, + "column_mapping": ArcProjectTrackingColumnMapping, + "sheet_name": "Project_Tracking", + "preparation_functions": [prepare_arc_project_tracking], + }, + "EV Charging Rebates": { + "model": ChargerRebates, + "columns": EVChargingRebatesColumns, + "column_mapping": EVChargingRebatesColumnMapping, + "sheet_name": "Updated", + "header_row": 2, + }, + "Data Fleets": { + "model": DataFleets, + "columns": DataFleetsColumns, + "column_mapping": DataFleetsColumnMapping, + "sheet_name": "Data Fleets", + }, + "Hydrogen Fleets": { + "model": HydrogenFleets, + "columns": HydrogenFleetsColumnMapping, + "column_mapping": HydrogenFleetsColumnMapping, + "sheet_name": "Fleets", + "preparation_functions": [prepare_hydrogen_fleets], + }, + "Hydrogen Fueling": { + "model": HydrogrenFueling, + "columns": HydrogenFuelingColumnMapping, + "column_mapping": HydrogenFuelingColumnMapping, + "sheet_name": "Station_Tracking", + "preparation_functions": [prepare_hydrogen_fueling], + }, + "LDV Rebates": { + "model": LdvRebates, + "columns": LdvRebatesColumnMapping, + "sheet_name": "Raw Data", + "preparation_functions": [prepare_ldv_rebates], + }, + "Public Charging": { + "model": PublicCharging, + "columns": PublicChargingColumns, + "column_mapping": PublicChargingColumnMapping, + "sheet_name": "Project_applications", + "header_row": 2, + "preparation_functions": [prepare_public_charging], + }, + "Scrap It": { + "model": ScrapIt, + "columns": ScrapItColumns, + "column_mapping": ScrapItColumnMapping, + "sheet_name": "TOP OTHER TRANSACTIONS", + "header_row": 5, + "preparation_functions": [prepare_scrap_it], + }, + "Specialty Use Vehicle Incentive Program": { + "model": SpecialityUseVehicleIncentives, + "columns": SpecialityUseVehicleIncentiveProgramColumns, + "column_mapping": SpecialityUseVehicleIncentivesColumnMapping, + "sheet_name": "Sheet1", + "preparation_functions": [prepare_speciality_use_vehicle_incentives], + }, +} diff --git a/django/api/decorators/permission.py b/django/api/decorators/permission.py index beb030bc..a2836c97 100644 --- a/django/api/decorators/permission.py +++ b/django/api/decorators/permission.py @@ -5,6 +5,7 @@ from api.models.user import User from api.models.user_permission import UserPermission + def check_upload_permission(): def wrapper(func): def wrapped(request, *args, **kwargs): @@ -15,22 +16,29 @@ def wrapped(request, *args, **kwargs): for each in user_permission: permission = Permission.objects.get(id=each.permission_id) permissions.append(permission.description) - if 'uploader' not in permissions: + if "uploader" not in permissions: return Response( - 'You do not have permission to upload data.', status=status.HTTP_403_FORBIDDEN + "You do not have permission to upload data.", + status=status.HTTP_403_FORBIDDEN, ) return func(request, *args, **kwargs) + return wrapped + return wrapper + def check_admin_permission(): def wrapper(func): def wrapped(request, *args, **kwargs): permissions = create_permission_list(request.user) - if 'admin' not in permissions: + if "admin" not in permissions: return Response( - "You do not have permission to make changes to other users' permissions.", status=status.HTTP_403_FORBIDDEN + "You do not have permission to make changes to other users' permissions.", + status=status.HTTP_403_FORBIDDEN, ) return func(request, *args, **kwargs) + return wrapped + return wrapper diff --git a/django/api/filters/icbc_data.py b/django/api/filters/icbc_data.py index 87db8c36..722f9b43 100644 --- a/django/api/filters/icbc_data.py +++ b/django/api/filters/icbc_data.py @@ -7,6 +7,7 @@ Further reading: https://django-filter.readthedocs.io/en/master/ref/filters.html """ + from django.db.models import Q from django_filters import FilterSet, CharFilter @@ -14,13 +15,13 @@ class IcbcDataFilter(FilterSet): - icbc_vehicle__make = CharFilter(lookup_expr='icontains') - icbc_vehicle__model_name = CharFilter(lookup_expr='icontains') - icbc_vehicle__model_year__name = CharFilter(lookup_expr='icontains') - vin = CharFilter(lookup_expr='icontains') + icbc_vehicle__make = CharFilter(lookup_expr="icontains") + icbc_vehicle__model_name = CharFilter(lookup_expr="icontains") + icbc_vehicle__model_year__name = CharFilter(lookup_expr="icontains") + vin = CharFilter(lookup_expr="icontains") class Meta: model = IcbcRegistrationData fields = [ - 'vin', + "vin", ] diff --git a/django/api/filters/order_by.py b/django/api/filters/order_by.py index c84e33de..1f4e8a07 100644 --- a/django/api/filters/order_by.py +++ b/django/api/filters/order_by.py @@ -3,6 +3,7 @@ This is to support ordering by nested fields without needing to add custom coding per Model """ + from typing import List, Tuple from rest_framework.filters import OrderingFilter from django.db.models import Field, Model, QuerySet @@ -13,15 +14,14 @@ class RelatedOrderingFilter(OrderingFilter): @staticmethod def _get_verbose_name(field: Field, non_verbose_name: str) -> str: - return field.verbose_name \ - if hasattr(field, 'verbose_name') \ - else non_verbose_name.replace('_', ' ') + return ( + field.verbose_name + if hasattr(field, "verbose_name") + else non_verbose_name.replace("_", " ") + ) def _retrieve_all_related_fields( - self, - fields: Tuple[Field], - model: Model, - depth: int = 0 + self, fields: Tuple[Field], model: Model, depth: int = 0 ) -> List[tuple]: valid_fields = [] if depth > self._max_related_depth: @@ -31,38 +31,37 @@ def _retrieve_all_related_fields( rel_fields = self._retrieve_all_related_fields( field.related_model._meta.get_fields(), field.related_model, - depth + 1 + depth + 1, ) for rel_field in rel_fields: - valid_fields.append(( - f'{field.name}__{rel_field[0]}', - self._get_verbose_name(field, rel_field[1]) - )) + valid_fields.append( + ( + f"{field.name}__{rel_field[0]}", + self._get_verbose_name(field, rel_field[1]), + ) + ) else: - valid_fields.append(( - field.name, - self._get_verbose_name(field, field.name), - )) + valid_fields.append( + ( + field.name, + self._get_verbose_name(field, field.name), + ) + ) return valid_fields def get_valid_fields( - self, - queryset: QuerySet, - view, - context: dict = None + self, queryset: QuerySet, view, context: dict = None ) -> List[tuple]: - valid_fields = getattr(view, 'ordering_fields', self.ordering_fields) - if not valid_fields == '__all_related__': + valid_fields = getattr(view, "ordering_fields", self.ordering_fields) + if not valid_fields == "__all_related__": if not context: context = {} valid_fields = super().get_valid_fields(queryset, view, context) else: valid_fields = [ *self._retrieve_all_related_fields( - queryset.model._meta.get_fields(), - queryset.model + queryset.model._meta.get_fields(), queryset.model ), - *[(key, key.title().split('__')) - for key in queryset.query.annotations] + *[(key, key.title().split("__")) for key in queryset.query.annotations], ] return valid_fields diff --git a/django/api/keycloak_authentication.py b/django/api/keycloak_authentication.py index d44f7a03..7abd1d86 100644 --- a/django/api/keycloak_authentication.py +++ b/django/api/keycloak_authentication.py @@ -6,57 +6,45 @@ class KeycloakAuthentication(authentication.BaseAuthentication): def authenticate(self, request): - auth = request.headers.get('Authorization', None) + auth = request.headers.get("Authorization", None) if not auth: - raise exceptions.AuthenticationFailed( - 'Authorization token required' - ) + raise exceptions.AuthenticationFailed("Authorization token required") try: scheme, token = auth.split() except ValueError: - raise exceptions.AuthenticationFailed( - 'Authorization token required' - ) + raise exceptions.AuthenticationFailed("Authorization token required") if not token: - raise exceptions.AuthenticationFailed( - 'Authorization token required' - ) + raise exceptions.AuthenticationFailed("Authorization token required") keycloak_openid = KeycloakOpenID( server_url=settings.KEYCLOAK_URL, client_id=settings.KEYCLOAK_CLIENT_ID, - realm_name=settings.KEYCLOAK_REALM + realm_name=settings.KEYCLOAK_REALM, ) # Decode the token from the front-end - KEYCLOAK_PUBLIC_KEY = \ - "-----BEGIN PUBLIC KEY-----\n" + \ - keycloak_openid.public_key() + \ - "\n-----END PUBLIC KEY-----" + KEYCLOAK_PUBLIC_KEY = ( + "-----BEGIN PUBLIC KEY-----\n" + + keycloak_openid.public_key() + + "\n-----END PUBLIC KEY-----" + ) - options = { - 'verify_signature': True, - 'verify_aud': True, - 'verify_exp': True - } + options = {"verify_signature": True, "verify_aud": True, "verify_exp": True} token_info = keycloak_openid.decode_token( - token, - key=KEYCLOAK_PUBLIC_KEY, - options=options + token, key=KEYCLOAK_PUBLIC_KEY, options=options ) user_info = keycloak_openid.userinfo(token) - if user_info.get('user_id') != token_info.get('user_id'): - raise exceptions.AuthenticationFailed( - 'Invalid Token' - ) - return user_info.get('idir_username'), None + if user_info.get("user_id") != token_info.get("user_id"): + raise exceptions.AuthenticationFailed("Invalid Token") + return user_info.get("idir_username"), None # user = None + # if 'user_id' not in user_info: # # try email # if 'email' in user_info: diff --git a/django/api/management/commands/decode_vin.py b/django/api/management/commands/decode_vin.py index 7b2b224e..327a2a41 100644 --- a/django/api/management/commands/decode_vin.py +++ b/django/api/management/commands/decode_vin.py @@ -3,12 +3,8 @@ class Command(BaseCommand): - help = 'Loads operational data' + help = "Loads operational data" def handle(self, *args, **options): decoder() - self.stdout.write( - self.style.SUCCESS( - 'Decoding Completed!' - ) - ) + self.stdout.write(self.style.SUCCESS("Decoding Completed!")) diff --git a/django/api/management/commands/import_arc_project_tracking.py b/django/api/management/commands/import_arc_project_tracking.py index 5c937cb5..e5159b53 100644 --- a/django/api/management/commands/import_arc_project_tracking.py +++ b/django/api/management/commands/import_arc_project_tracking.py @@ -2,39 +2,36 @@ from os import path from django.core.management import BaseCommand -#from django.api.services.spreadsheet_uploader_prep import import_from_xls +# from django.api.services.spreadsheet_uploader_prep import import_from_xls class Command(BaseCommand): """ This command takes in an excel file and will parse and create records """ - help = 'Loads file into the data fleets table' + + help = "Loads file into the data fleets table" def add_arguments(self, parser): """ Currently only takes in an excel file as a required argument """ - parser.add_argument( - 'xls_file', help='Filename of the xls being imported' - ) + parser.add_argument("xls_file", help="Filename of the xls being imported") def handle(self, *args, **options): """ Function to parse the file and pass it to the import service """ - xls_file = options.get('xls_file') + xls_file = options.get("xls_file") if not path.exists(xls_file): - self.stdout.write(self.style.ERROR( - 'Cannot find {file}. ' - 'Please make sure the filename is correct.'.format( - file=xls_file + self.stdout.write( + self.style.ERROR( + "Cannot find {file}. " + "Please make sure the filename is correct.".format(file=xls_file) ) - )) + ) return False - #import_from_xls(xls_file) - self.stdout.write(self.style.SUCCESS( - 'Import complete' - )) + # import_from_xls(xls_file) + self.stdout.write(self.style.SUCCESS("Import complete")) diff --git a/django/api/management/commands/import_charger_rebates.py b/django/api/management/commands/import_charger_rebates.py index 395e6ab6..366c1685 100644 --- a/django/api/management/commands/import_charger_rebates.py +++ b/django/api/management/commands/import_charger_rebates.py @@ -11,32 +11,29 @@ class Command(BaseCommand): TODO: Allow users to put in a directory as an argument so that the function can parse multiple files """ - help = 'Loads file into the ldv rebates table' + + help = "Loads file into the ldv rebates table" def add_arguments(self, parser): """ Currently only takes in an excel file as a required argument """ - parser.add_argument( - 'xls_file', help='Filename of the xls being imported' - ) + parser.add_argument("xls_file", help="Filename of the xls being imported") def handle(self, *args, **options): """ Function to parse the file and pass it to the import service """ - xls_file = options.get('xls_file') + xls_file = options.get("xls_file") if not path.exists(xls_file): - self.stdout.write(self.style.ERROR( - 'Cannot find {file}. ' - 'Please make sure the filename is correct.'.format( - file=xls_file + self.stdout.write( + self.style.ERROR( + "Cannot find {file}. " + "Please make sure the filename is correct.".format(file=xls_file) ) - )) + ) return False import_from_xls(xls_file) - self.stdout.write(self.style.SUCCESS( - 'Import complete' - )) + self.stdout.write(self.style.SUCCESS("Import complete")) diff --git a/django/api/management/commands/import_data_fleets.py b/django/api/management/commands/import_data_fleets.py index 33306804..3ce9fe09 100644 --- a/django/api/management/commands/import_data_fleets.py +++ b/django/api/management/commands/import_data_fleets.py @@ -9,32 +9,29 @@ class Command(BaseCommand): """ This command takes in an excel file and will parse and create records """ - help = 'Loads file into the data fleets table' + + help = "Loads file into the data fleets table" def add_arguments(self, parser): """ Currently only takes in an excel file as a required argument """ - parser.add_argument( - 'xls_file', help='Filename of the xls being imported' - ) + parser.add_argument("xls_file", help="Filename of the xls being imported") def handle(self, *args, **options): """ Function to parse the file and pass it to the import service """ - xls_file = options.get('xls_file') + xls_file = options.get("xls_file") if not path.exists(xls_file): - self.stdout.write(self.style.ERROR( - 'Cannot find {file}. ' - 'Please make sure the filename is correct.'.format( - file=xls_file + self.stdout.write( + self.style.ERROR( + "Cannot find {file}. " + "Please make sure the filename is correct.".format(file=xls_file) ) - )) + ) return False import_from_xls(xls_file) - self.stdout.write(self.style.SUCCESS( - 'Import complete' - )) + self.stdout.write(self.style.SUCCESS("Import complete")) diff --git a/django/api/management/commands/import_hydrogen_fleets.py b/django/api/management/commands/import_hydrogen_fleets.py index 67a8df28..c715a69f 100644 --- a/django/api/management/commands/import_hydrogen_fleets.py +++ b/django/api/management/commands/import_hydrogen_fleets.py @@ -9,32 +9,29 @@ class Command(BaseCommand): """ This command takes in an excel file and will parse and create records """ - help = 'Loads file into the hydrogen fleets table' + + help = "Loads file into the hydrogen fleets table" def add_arguments(self, parser): """ Currently only takes in an excel file as a required argument """ - parser.add_argument( - 'xls_file', help='Filename of the xls being imported' - ) + parser.add_argument("xls_file", help="Filename of the xls being imported") def handle(self, *args, **options): """ Function to parse the file and pass it to the import service """ - xls_file = options.get('xls_file') + xls_file = options.get("xls_file") if not path.exists(xls_file): - self.stdout.write(self.style.ERROR( - 'Cannot find {file}. ' - 'Please make sure the filename is correct.'.format( - file=xls_file + self.stdout.write( + self.style.ERROR( + "Cannot find {file}. " + "Please make sure the filename is correct.".format(file=xls_file) ) - )) + ) return False import_from_xls(xls_file) - self.stdout.write(self.style.SUCCESS( - 'Import complete' - )) + self.stdout.write(self.style.SUCCESS("Import complete")) diff --git a/django/api/management/commands/import_hydrogen_fueling.py b/django/api/management/commands/import_hydrogen_fueling.py index 38c84b1f..3db1e30a 100644 --- a/django/api/management/commands/import_hydrogen_fueling.py +++ b/django/api/management/commands/import_hydrogen_fueling.py @@ -10,32 +10,29 @@ class Command(BaseCommand): TODO: Allow users to put in a directory as an argument so that the function can parse multiple files """ - help = 'Loads file into the hydrogen fueling table' + + help = "Loads file into the hydrogen fueling table" def add_arguments(self, parser): """ Currently only takes in an excel file as a required argument """ - parser.add_argument( - 'xls_file', help='Filename of the xls being imported' - ) + parser.add_argument("xls_file", help="Filename of the xls being imported") def handle(self, *args, **options): """ Function to parse the file and pass it to the import service """ - xls_file = options.get('xls_file') + xls_file = options.get("xls_file") if not path.exists(xls_file): - self.stdout.write(self.style.ERROR( - 'Cannot find {file}. ' - 'Please make sure the filename is correct.'.format( - file=xls_file + self.stdout.write( + self.style.ERROR( + "Cannot find {file}. " + "Please make sure the filename is correct.".format(file=xls_file) ) - )) + ) return False import_from_xls(xls_file) - self.stdout.write(self.style.SUCCESS( - 'Import complete' - )) + self.stdout.write(self.style.SUCCESS("Import complete")) diff --git a/django/api/management/commands/import_ldv_rebates.py b/django/api/management/commands/import_ldv_rebates.py index 35101d95..27a34f12 100644 --- a/django/api/management/commands/import_ldv_rebates.py +++ b/django/api/management/commands/import_ldv_rebates.py @@ -11,32 +11,29 @@ class Command(BaseCommand): TODO: Allow users to put in a directory as an argument so that the function can parse multiple files """ - help = 'Loads file into the ldv rebates table' + + help = "Loads file into the ldv rebates table" def add_arguments(self, parser): """ Currently only takes in an excel file as a required argument """ - parser.add_argument( - 'xls_file', help='Filename of the xls being imported' - ) + parser.add_argument("xls_file", help="Filename of the xls being imported") def handle(self, *args, **options): """ Function to parse the file and pass it to the import service """ - xls_file = options.get('xls_file') + xls_file = options.get("xls_file") if not path.exists(xls_file): - self.stdout.write(self.style.ERROR( - 'Cannot find {file}. ' - 'Please make sure the filename is correct.'.format( - file=xls_file + self.stdout.write( + self.style.ERROR( + "Cannot find {file}. " + "Please make sure the filename is correct.".format(file=xls_file) ) - )) + ) return False import_from_xls(xls_file) - self.stdout.write(self.style.SUCCESS( - 'Import complete' - )) + self.stdout.write(self.style.SUCCESS("Import complete")) diff --git a/django/api/management/commands/import_public_charging.py b/django/api/management/commands/import_public_charging.py index 721d4ce5..88a8b9b3 100644 --- a/django/api/management/commands/import_public_charging.py +++ b/django/api/management/commands/import_public_charging.py @@ -10,32 +10,29 @@ class Command(BaseCommand): TODO: Allow users to put in a directory as an argument so that the function can parse multiple files """ - help = 'Loads file into the public charging table' + + help = "Loads file into the public charging table" def add_arguments(self, parser): """ Currently only takes in an excel file as a required argument """ - parser.add_argument( - 'xls_file', help='Filename of the xls being imported' - ) + parser.add_argument("xls_file", help="Filename of the xls being imported") def handle(self, *args, **options): """ Function to parse the file and pass it to the import service """ - xls_file = options.get('xls_file') + xls_file = options.get("xls_file") if not path.exists(xls_file): - self.stdout.write(self.style.ERROR( - 'Cannot find {file}. ' - 'Please make sure the filename is correct.'.format( - file=xls_file + self.stdout.write( + self.style.ERROR( + "Cannot find {file}. " + "Please make sure the filename is correct.".format(file=xls_file) ) - )) + ) return False import_from_xls(xls_file) - self.stdout.write(self.style.SUCCESS( - 'Import complete' - )) + self.stdout.write(self.style.SUCCESS("Import complete")) diff --git a/django/api/management/commands/import_scrap_it.py b/django/api/management/commands/import_scrap_it.py index 56e424dc..b16a537c 100644 --- a/django/api/management/commands/import_scrap_it.py +++ b/django/api/management/commands/import_scrap_it.py @@ -9,32 +9,29 @@ class Command(BaseCommand): """ This command takes in an excel file and will parse and create records """ - help = 'Loads file into the data fleets table' + + help = "Loads file into the data fleets table" def add_arguments(self, parser): """ Currently only takes in an excel file as a required argument """ - parser.add_argument( - 'xls_file', help='Filename of the xls being imported' - ) + parser.add_argument("xls_file", help="Filename of the xls being imported") def handle(self, *args, **options): """ Function to parse the file and pass it to the import service """ - xls_file = options.get('xls_file') + xls_file = options.get("xls_file") if not path.exists(xls_file): - self.stdout.write(self.style.ERROR( - 'Cannot find {file}. ' - 'Please make sure the filename is correct.'.format( - file=xls_file + self.stdout.write( + self.style.ERROR( + "Cannot find {file}. " + "Please make sure the filename is correct.".format(file=xls_file) ) - )) + ) return False import_from_xls(xls_file) - self.stdout.write(self.style.SUCCESS( - 'Import complete' - )) + self.stdout.write(self.style.SUCCESS("Import complete")) diff --git a/django/api/management/commands/import_suvi.py b/django/api/management/commands/import_suvi.py index a7b76636..897c0b8c 100644 --- a/django/api/management/commands/import_suvi.py +++ b/django/api/management/commands/import_suvi.py @@ -10,32 +10,29 @@ class Command(BaseCommand): TODO: Allow users to put in a directory as an argument so that the function can parse multiple files """ - help = 'Loads file into the speciality use vehicle incentives table' + + help = "Loads file into the speciality use vehicle incentives table" def add_arguments(self, parser): """ Currently only takes in an excel file as a required argument """ - parser.add_argument( - 'xls_file', help='Filename of the xls being imported' - ) + parser.add_argument("xls_file", help="Filename of the xls being imported") def handle(self, *args, **options): """ Function to parse the file and pass it to the import service """ - xls_file = options.get('xls_file') + xls_file = options.get("xls_file") if not path.exists(xls_file): - self.stdout.write(self.style.ERROR( - 'Cannot find {file}. ' - 'Please make sure the filename is correct.'.format( - file=xls_file + self.stdout.write( + self.style.ERROR( + "Cannot find {file}. " + "Please make sure the filename is correct.".format(file=xls_file) ) - )) + ) return False import_from_xls(xls_file) - self.stdout.write(self.style.SUCCESS( - 'Import complete' - )) + self.stdout.write(self.style.SUCCESS("Import complete")) diff --git a/django/api/migrations/0001_initial.py b/django/api/migrations/0001_initial.py index 52e319a0..3f96b76d 100644 --- a/django/api/migrations/0001_initial.py +++ b/django/api/migrations/0001_initial.py @@ -8,166 +8,337 @@ class Migration(migrations.Migration): initial = True - dependencies = [ - ] + dependencies = [] operations = [ migrations.CreateModel( - name='CreditClass', + name="CreditClass", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('effective_date', models.DateField(blank=True, null=True)), - ('expiration_date', models.DateField(blank=True, null=True)), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('credit_class', models.CharField(max_length=3, unique=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("effective_date", models.DateField(blank=True, null=True)), + ("expiration_date", models.DateField(blank=True, null=True)), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ("credit_class", models.CharField(max_length=3, unique=True)), ], options={ - 'db_table': 'credit_class_code', + "db_table": "credit_class_code", }, ), migrations.CreateModel( - name='IcbcUploadDate', + name="IcbcUploadDate", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('upload_date', models.DateField()), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ("upload_date", models.DateField()), ], options={ - 'db_table': 'icbc_upload_date', + "db_table": "icbc_upload_date", }, ), migrations.CreateModel( - name='ModelYear', + name="ModelYear", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('effective_date', models.DateField(blank=True, null=True)), - ('expiration_date', models.DateField(blank=True, null=True)), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('name', models.CharField(db_column='description', max_length=250, unique=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("effective_date", models.DateField(blank=True, null=True)), + ("expiration_date", models.DateField(blank=True, null=True)), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ( + "name", + models.CharField( + db_column="description", max_length=250, unique=True + ), + ), ], options={ - 'db_table': 'model_year', + "db_table": "model_year", }, ), migrations.CreateModel( - name='Organization', + name="Organization", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('name', models.CharField(db_column='organization_name', max_length=500, unique=True)), - ('short_name', models.CharField(db_column='short_name', max_length=64, null=True, unique=True)), - ('is_active', models.BooleanField(default=False)), - ('is_government', models.BooleanField(default=False)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ( + "name", + models.CharField( + db_column="organization_name", max_length=500, unique=True + ), + ), + ( + "short_name", + models.CharField( + db_column="short_name", max_length=64, null=True, unique=True + ), + ), + ("is_active", models.BooleanField(default=False)), + ("is_government", models.BooleanField(default=False)), ], options={ - 'db_table': 'organization', + "db_table": "organization", }, ), migrations.CreateModel( - name='VehicleClass', + name="VehicleClass", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('effective_date', models.DateField(blank=True, null=True)), - ('expiration_date', models.DateField(blank=True, null=True)), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('description', models.CharField(db_column='description', max_length=250)), - ('vehicle_class_code', models.CharField(max_length=3, unique=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("effective_date", models.DateField(blank=True, null=True)), + ("expiration_date", models.DateField(blank=True, null=True)), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ( + "description", + models.CharField(db_column="description", max_length=250), + ), + ("vehicle_class_code", models.CharField(max_length=3, unique=True)), ], options={ - 'db_table': 'vehicle_class_code', + "db_table": "vehicle_class_code", }, ), migrations.CreateModel( - name='ZevType', + name="ZevType", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('effective_date', models.DateField(blank=True, null=True)), - ('expiration_date', models.DateField(blank=True, null=True)), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('description', models.CharField(db_column='description', max_length=250)), - ('vehicle_zev_code', models.CharField(max_length=4, unique=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("effective_date", models.DateField(blank=True, null=True)), + ("expiration_date", models.DateField(blank=True, null=True)), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ( + "description", + models.CharField(db_column="description", max_length=250), + ), + ("vehicle_zev_code", models.CharField(max_length=4, unique=True)), ], options={ - 'db_table': 'vehicle_zev_type', + "db_table": "vehicle_zev_type", }, ), migrations.CreateModel( - name='IcbcVehicle', + name="IcbcVehicle", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('make', models.CharField(db_index=True, max_length=250)), - ('model_name', models.CharField(db_index=True, max_length=250)), - ('model_year', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.modelyear')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ("make", models.CharField(db_index=True, max_length=250)), + ("model_name", models.CharField(db_index=True, max_length=250)), + ( + "model_year", + models.ForeignKey( + on_delete=django.db.models.deletion.PROTECT, to="api.modelyear" + ), + ), ], options={ - 'db_table': 'icbc_vehicle', - 'unique_together': {('make', 'model_name', 'model_year')}, - 'index_together': {('make', 'model_name', 'model_year')}, + "db_table": "icbc_vehicle", + "unique_together": {("make", "model_name", "model_year")}, + "index_together": {("make", "model_name", "model_year")}, }, ), migrations.CreateModel( - name='IcbcRegistrationData', + name="IcbcRegistrationData", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('vin', models.CharField(db_index=True, max_length=20, unique=True)), - ('icbc_upload_date', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.icbcuploaddate')), - ('icbc_vehicle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.icbcvehicle')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ("vin", models.CharField(db_index=True, max_length=20, unique=True)), + ( + "icbc_upload_date", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to="api.icbcuploaddate", + ), + ), + ( + "icbc_vehicle", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to="api.icbcvehicle", + ), + ), ], options={ - 'db_table': 'icbc_registration_data', + "db_table": "icbc_registration_data", }, ), migrations.CreateModel( - name='Vehicle', + name="Vehicle", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('make', models.CharField(max_length=250)), - ('range', models.IntegerField()), - ('model_name', models.CharField(max_length=250)), - ('validation_status', models.CharField(default='DRAFT', max_length=20)), - ('weight_kg', models.DecimalField(decimal_places=0, max_digits=6)), - ('has_passed_us_06_test', models.BooleanField(default=False)), - ('credit_value', models.DecimalField(decimal_places=2, max_digits=20, null=True)), - ('is_active', models.BooleanField(default=True)), - ('credit_class', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='api.creditclass')), - ('model_year', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.modelyear')), - ('organization', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.organization')), - ('vehicle_class_code', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.vehicleclass')), - ('vehicle_zev_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.zevtype')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ("make", models.CharField(max_length=250)), + ("range", models.IntegerField()), + ("model_name", models.CharField(max_length=250)), + ("validation_status", models.CharField(default="DRAFT", max_length=20)), + ("weight_kg", models.DecimalField(decimal_places=0, max_digits=6)), + ("has_passed_us_06_test", models.BooleanField(default=False)), + ( + "credit_value", + models.DecimalField(decimal_places=2, max_digits=20, null=True), + ), + ("is_active", models.BooleanField(default=True)), + ( + "credit_class", + models.ForeignKey( + null=True, + on_delete=django.db.models.deletion.PROTECT, + related_name="+", + to="api.creditclass", + ), + ), + ( + "model_year", + models.ForeignKey( + on_delete=django.db.models.deletion.PROTECT, to="api.modelyear" + ), + ), + ( + "organization", + models.ForeignKey( + on_delete=django.db.models.deletion.PROTECT, + to="api.organization", + ), + ), + ( + "vehicle_class_code", + models.ForeignKey( + on_delete=django.db.models.deletion.PROTECT, + to="api.vehicleclass", + ), + ), + ( + "vehicle_zev_type", + models.ForeignKey( + on_delete=django.db.models.deletion.PROTECT, to="api.zevtype" + ), + ), ], options={ - 'db_table': 'vehicle', - 'unique_together': {('make', 'model_name', 'vehicle_zev_type', 'model_year')}, + "db_table": "vehicle", + "unique_together": { + ("make", "model_name", "vehicle_zev_type", "model_year") + }, }, ), ] diff --git a/django/api/migrations/0002_ldvrebates.py b/django/api/migrations/0002_ldvrebates.py index 8471b9ac..cab9f1e5 100644 --- a/django/api/migrations/0002_ldvrebates.py +++ b/django/api/migrations/0002_ldvrebates.py @@ -6,48 +6,110 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0001_initial'), + ("api", "0001_initial"), ] operations = [ migrations.CreateModel( - name='LdvRebates', + name="LdvRebates", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('submission_id', models.IntegerField(unique=True)), - ('casl_consent', models.BooleanField(default=False)), - ('date_approved', models.DateField(blank=True, null=True)), - ('submission_date', models.DateField(blank=True, null=True)), - ('company_name', models.CharField(blank=True, max_length=200, null=True)), - ('company_city', models.CharField(blank=True, max_length=200, null=True)), - ('applicant_name', models.CharField(blank=True, max_length=200, null=True)), - ('applicant_address_1', models.CharField(blank=True, max_length=200, null=True)), - ('applicant_address_2', models.CharField(blank=True, max_length=200, null=True)), - ('applicant_city', models.CharField(blank=True, max_length=100, null=True)), - ('applicant_postal_code', models.CharField(blank=True, max_length=10, null=True)), - ('applicant_phone', models.CharField(blank=True, max_length=15, null=True)), - ('applicant_email', models.CharField(blank=True, max_length=50, null=True)), - ('applicant_use', models.CharField(blank=True, max_length=20, null=True)), - ('applicant_type', models.CharField(blank=True, max_length=100, null=True)), - ('business_name', models.CharField(blank=True, max_length=100, null=True)), - ('business_number', models.CharField(blank=True, max_length=30, null=True)), - ('drivers_license', models.CharField(blank=True, max_length=30, null=True)), - ('province', models.CharField(blank=True, max_length=50, null=True)), - ('msrp', models.DecimalField(decimal_places=2, max_digits=20)), - ('other_incentives', models.DecimalField(decimal_places=2, max_digits=20)), - ('document_type', models.CharField(blank=True, max_length=40, null=True)), - ('vehicle', models.CharField(blank=True, max_length=200, null=True)), - ('incentive_amount', models.DecimalField(decimal_places=2, max_digits=20)), - ('vin', models.CharField(blank=True, max_length=255, null=True)), - ('delivered', models.BooleanField(default=False)), - ('consent_to_contact', models.BooleanField(default=False)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ("submission_id", models.IntegerField(unique=True)), + ("casl_consent", models.BooleanField(default=False)), + ("date_approved", models.DateField(blank=True, null=True)), + ("submission_date", models.DateField(blank=True, null=True)), + ( + "company_name", + models.CharField(blank=True, max_length=200, null=True), + ), + ( + "company_city", + models.CharField(blank=True, max_length=200, null=True), + ), + ( + "applicant_name", + models.CharField(blank=True, max_length=200, null=True), + ), + ( + "applicant_address_1", + models.CharField(blank=True, max_length=200, null=True), + ), + ( + "applicant_address_2", + models.CharField(blank=True, max_length=200, null=True), + ), + ( + "applicant_city", + models.CharField(blank=True, max_length=100, null=True), + ), + ( + "applicant_postal_code", + models.CharField(blank=True, max_length=10, null=True), + ), + ( + "applicant_phone", + models.CharField(blank=True, max_length=15, null=True), + ), + ( + "applicant_email", + models.CharField(blank=True, max_length=50, null=True), + ), + ( + "applicant_use", + models.CharField(blank=True, max_length=20, null=True), + ), + ( + "applicant_type", + models.CharField(blank=True, max_length=100, null=True), + ), + ( + "business_name", + models.CharField(blank=True, max_length=100, null=True), + ), + ( + "business_number", + models.CharField(blank=True, max_length=30, null=True), + ), + ( + "drivers_license", + models.CharField(blank=True, max_length=30, null=True), + ), + ("province", models.CharField(blank=True, max_length=50, null=True)), + ("msrp", models.DecimalField(decimal_places=2, max_digits=20)), + ( + "other_incentives", + models.DecimalField(decimal_places=2, max_digits=20), + ), + ( + "document_type", + models.CharField(blank=True, max_length=40, null=True), + ), + ("vehicle", models.CharField(blank=True, max_length=200, null=True)), + ( + "incentive_amount", + models.DecimalField(decimal_places=2, max_digits=20), + ), + ("vin", models.CharField(blank=True, max_length=255, null=True)), + ("delivered", models.BooleanField(default=False)), + ("consent_to_contact", models.BooleanField(default=False)), ], options={ - 'db_table': 'ldv_rebates', + "db_table": "ldv_rebates", }, ), ] diff --git a/django/api/migrations/0003_vindecodedinformation.py b/django/api/migrations/0003_vindecodedinformation.py index 300aab92..46fff5ad 100644 --- a/django/api/migrations/0003_vindecodedinformation.py +++ b/django/api/migrations/0003_vindecodedinformation.py @@ -6,26 +6,43 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0002_ldvrebates'), + ("api", "0002_ldvrebates"), ] operations = [ migrations.CreateModel( - name='VINDecodedInformation', + name="VINDecodedInformation", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('manufacturer', models.CharField(blank=True, max_length=500, null=True)), - ('make', models.CharField(blank=True, max_length=250, null=True)), - ('model', models.CharField(blank=True, max_length=250, null=True)), - ('model_year', models.IntegerField(blank=True, null=True)), - ('fuel_type_primary', models.CharField(blank=True, max_length=250, null=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ( + "manufacturer", + models.CharField(blank=True, max_length=500, null=True), + ), + ("make", models.CharField(blank=True, max_length=250, null=True)), + ("model", models.CharField(blank=True, max_length=250, null=True)), + ("model_year", models.IntegerField(blank=True, null=True)), + ( + "fuel_type_primary", + models.CharField(blank=True, max_length=250, null=True), + ), ], options={ - 'db_table': 'vin_decoded_information', + "db_table": "vin_decoded_information", }, ), ] diff --git a/django/api/migrations/0004_auto_20211116_1813.py b/django/api/migrations/0004_auto_20211116_1813.py index bd46ee19..84812e6f 100644 --- a/django/api/migrations/0004_auto_20211116_1813.py +++ b/django/api/migrations/0004_auto_20211116_1813.py @@ -6,58 +6,58 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0003_vindecodedinformation'), + ("api", "0003_vindecodedinformation"), ] operations = [ migrations.AlterField( - model_name='ldvrebates', - name='applicant_email', + model_name="ldvrebates", + name="applicant_email", field=models.CharField(blank=True, max_length=200, null=True), ), migrations.AlterField( - model_name='ldvrebates', - name='applicant_phone', + model_name="ldvrebates", + name="applicant_phone", field=models.CharField(blank=True, max_length=25, null=True), ), migrations.AlterField( - model_name='ldvrebates', - name='applicant_postal_code', + model_name="ldvrebates", + name="applicant_postal_code", field=models.CharField(blank=True, max_length=50, null=True), ), migrations.AlterField( - model_name='ldvrebates', - name='applicant_use', + model_name="ldvrebates", + name="applicant_use", field=models.CharField(blank=True, max_length=50, null=True), ), migrations.AlterField( - model_name='ldvrebates', - name='business_number', + model_name="ldvrebates", + name="business_number", field=models.CharField(blank=True, max_length=50, null=True), ), migrations.AlterField( - model_name='ldvrebates', - name='date_approved', + model_name="ldvrebates", + name="date_approved", field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AlterField( - model_name='ldvrebates', - name='document_type', + model_name="ldvrebates", + name="document_type", field=models.CharField(blank=True, max_length=50, null=True), ), migrations.AlterField( - model_name='ldvrebates', - name='drivers_license', + model_name="ldvrebates", + name="drivers_license", field=models.CharField(blank=True, max_length=50, null=True), ), migrations.AlterField( - model_name='ldvrebates', - name='submission_date', + model_name="ldvrebates", + name="submission_date", field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AlterField( - model_name='ldvrebates', - name='submission_id', + model_name="ldvrebates", + name="submission_id", field=models.IntegerField(), ), ] diff --git a/django/api/migrations/0005_vindecodedinformation_vin.py b/django/api/migrations/0005_vindecodedinformation_vin.py index 6e828dc6..ed268227 100644 --- a/django/api/migrations/0005_vindecodedinformation_vin.py +++ b/django/api/migrations/0005_vindecodedinformation_vin.py @@ -6,13 +6,13 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0004_auto_20211116_1813'), + ("api", "0004_auto_20211116_1813"), ] operations = [ migrations.AddField( - model_name='vindecodedinformation', - name='vin', + model_name="vindecodedinformation", + name="vin", field=models.CharField(default=0, max_length=20), preserve_default=False, ), diff --git a/django/api/migrations/0006_specialityusevehicleincentives.py b/django/api/migrations/0006_specialityusevehicleincentives.py index 937764bd..12e0cc60 100644 --- a/django/api/migrations/0006_specialityusevehicleincentives.py +++ b/django/api/migrations/0006_specialityusevehicleincentives.py @@ -6,31 +6,54 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0005_vindecodedinformation_vin'), + ("api", "0005_vindecodedinformation_vin"), ] operations = [ migrations.CreateModel( - name='SpecialityUseVehicleIncentives', + name="SpecialityUseVehicleIncentives", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('approvals', models.CharField(blank=True, max_length=20, null=True)), - ('date', models.CharField(blank=True, max_length=20, null=True)), - ('applicant_name', models.CharField(blank=True, max_length=250, null=True)), - ('max_incentive_amount_requested', models.IntegerField(blank=True, null=True)), - ('category', models.CharField(blank=True, max_length=250, null=True)), - ('applicant_type', models.CharField(blank=True, max_length=50, null=True)), - ('incentive_paid', models.IntegerField(blank=True, null=True)), - ('total_purchase_price', models.IntegerField(blank=True, null=True)), - ('manufacturer', models.CharField(blank=True, max_length=250, null=True)), - ('model', models.CharField(blank=True, max_length=250, null=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ("approvals", models.CharField(blank=True, max_length=20, null=True)), + ("date", models.CharField(blank=True, max_length=20, null=True)), + ( + "applicant_name", + models.CharField(blank=True, max_length=250, null=True), + ), + ( + "max_incentive_amount_requested", + models.IntegerField(blank=True, null=True), + ), + ("category", models.CharField(blank=True, max_length=250, null=True)), + ( + "applicant_type", + models.CharField(blank=True, max_length=50, null=True), + ), + ("incentive_paid", models.IntegerField(blank=True, null=True)), + ("total_purchase_price", models.IntegerField(blank=True, null=True)), + ( + "manufacturer", + models.CharField(blank=True, max_length=250, null=True), + ), + ("model", models.CharField(blank=True, max_length=250, null=True)), ], options={ - 'db_table': 'speciality_use_vehicle_incentives', + "db_table": "speciality_use_vehicle_incentives", }, ), ] diff --git a/django/api/migrations/0007_datasets.py b/django/api/migrations/0007_datasets.py index 2ef4100f..810af89a 100644 --- a/django/api/migrations/0007_datasets.py +++ b/django/api/migrations/0007_datasets.py @@ -6,22 +6,33 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0006_specialityusevehicleincentives'), + ("api", "0006_specialityusevehicleincentives"), ] operations = [ migrations.CreateModel( - name='Datasets', + name="Datasets", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('name', models.CharField(max_length=50, unique=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ("name", models.CharField(max_length=50, unique=True)), ], options={ - 'db_table': 'datasets', + "db_table": "datasets", }, ), ] diff --git a/django/api/migrations/0008_chargerrebates.py b/django/api/migrations/0008_chargerrebates.py index 2f92a16e..6a996256 100644 --- a/django/api/migrations/0008_chargerrebates.py +++ b/django/api/migrations/0008_chargerrebates.py @@ -6,32 +6,60 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0007_datasets'), + ("api", "0007_datasets"), ] operations = [ migrations.CreateModel( - name='ChargerRebates', + name="ChargerRebates", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('organization', models.CharField(blank=True, max_length=250, null=True)), - ('region', models.CharField(blank=True, max_length=200, null=True)), - ('city', models.CharField(blank=True, max_length=200, null=True)), - ('address', models.CharField(blank=True, max_length=200, null=True)), - ('postal_code', models.CharField(blank=True, max_length=200, null=True)), - ('number_of_fast_charging_stations', models.IntegerField(blank=True, null=True)), - ('in_service_date', models.CharField(blank=True, max_length=100, null=True)), - ('expected_in_service_date', models.DateField(blank=True, null=True)), - ('announced', models.CharField(blank=True, max_length=200, null=True)), - ('rebate_paid', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)), - ('notes', models.CharField(blank=True, max_length=250, null=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ( + "organization", + models.CharField(blank=True, max_length=250, null=True), + ), + ("region", models.CharField(blank=True, max_length=200, null=True)), + ("city", models.CharField(blank=True, max_length=200, null=True)), + ("address", models.CharField(blank=True, max_length=200, null=True)), + ( + "postal_code", + models.CharField(blank=True, max_length=200, null=True), + ), + ( + "number_of_fast_charging_stations", + models.IntegerField(blank=True, null=True), + ), + ( + "in_service_date", + models.CharField(blank=True, max_length=100, null=True), + ), + ("expected_in_service_date", models.DateField(blank=True, null=True)), + ("announced", models.CharField(blank=True, max_length=200, null=True)), + ( + "rebate_paid", + models.DecimalField( + blank=True, decimal_places=2, max_digits=20, null=True + ), + ), + ("notes", models.CharField(blank=True, max_length=250, null=True)), ], options={ - 'db_table': 'charger_rebates', + "db_table": "charger_rebates", }, ), ] diff --git a/django/api/migrations/0009_publiccharging.py b/django/api/migrations/0009_publiccharging.py index 6532743a..7ba704ba 100644 --- a/django/api/migrations/0009_publiccharging.py +++ b/django/api/migrations/0009_publiccharging.py @@ -6,42 +6,74 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0008_chargerrebates'), + ("api", "0008_chargerrebates"), ] operations = [ migrations.CreateModel( - name='PublicCharging', + name="PublicCharging", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('effective_date', models.DateField(blank=True, null=True)), - ('expiration_date', models.DateField(blank=True, null=True)), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('applicant_name', models.CharField(blank=True, max_length=200, null=True)), - ('city', models.CharField(blank=True, max_length=200, null=True)), - ('postal_code', models.CharField(blank=True, max_length=50, null=True)), - ('address', models.CharField(blank=True, max_length=200, null=True)), - ('charging_station_info', models.CharField(blank=True, max_length=500, null=True)), - ('between_25kw_and_50kw', models.IntegerField(blank=True, null=True)), - ('between_50kw_and_100kw', models.IntegerField(blank=True, null=True)), - ('over_100kw', models.IntegerField(blank=True, null=True)), - ('level_2_units', models.IntegerField(blank=True, null=True)), - ('level_2_ports', models.IntegerField(blank=True, null=True)), - ('estimated_budget', models.DecimalField(decimal_places=2, max_digits=20)), - ('adjusted_rebate', models.DecimalField(decimal_places=2, max_digits=20)), - ('rebate_percent_maximum', models.DecimalField(decimal_places=2, max_digits=3)), - ('pilot_project', models.BooleanField()), - ('region', models.CharField(blank=True, max_length=200, null=True)), - ('organization_type', models.CharField(blank=True, max_length=100, null=True)), - ('project_status', models.CharField(blank=True, max_length=200, null=True)), - ('review_number', models.IntegerField(blank=True, null=True)), - ('rebate_paid', models.DecimalField(decimal_places=2, max_digits=20)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("effective_date", models.DateField(blank=True, null=True)), + ("expiration_date", models.DateField(blank=True, null=True)), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ( + "applicant_name", + models.CharField(blank=True, max_length=200, null=True), + ), + ("city", models.CharField(blank=True, max_length=200, null=True)), + ("postal_code", models.CharField(blank=True, max_length=50, null=True)), + ("address", models.CharField(blank=True, max_length=200, null=True)), + ( + "charging_station_info", + models.CharField(blank=True, max_length=500, null=True), + ), + ("between_25kw_and_50kw", models.IntegerField(blank=True, null=True)), + ("between_50kw_and_100kw", models.IntegerField(blank=True, null=True)), + ("over_100kw", models.IntegerField(blank=True, null=True)), + ("level_2_units", models.IntegerField(blank=True, null=True)), + ("level_2_ports", models.IntegerField(blank=True, null=True)), + ( + "estimated_budget", + models.DecimalField(decimal_places=2, max_digits=20), + ), + ( + "adjusted_rebate", + models.DecimalField(decimal_places=2, max_digits=20), + ), + ( + "rebate_percent_maximum", + models.DecimalField(decimal_places=2, max_digits=3), + ), + ("pilot_project", models.BooleanField()), + ("region", models.CharField(blank=True, max_length=200, null=True)), + ( + "organization_type", + models.CharField(blank=True, max_length=100, null=True), + ), + ( + "project_status", + models.CharField(blank=True, max_length=200, null=True), + ), + ("review_number", models.IntegerField(blank=True, null=True)), + ("rebate_paid", models.DecimalField(decimal_places=2, max_digits=20)), ], options={ - 'db_table': 'public_charging', + "db_table": "public_charging", }, ), ] diff --git a/django/api/migrations/0010_chargerrebates_fix_columns.py b/django/api/migrations/0010_chargerrebates_fix_columns.py index 8eaaad69..a889d8a6 100644 --- a/django/api/migrations/0010_chargerrebates_fix_columns.py +++ b/django/api/migrations/0010_chargerrebates_fix_columns.py @@ -6,11 +6,17 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0009_publiccharging'), + ("api", "0009_publiccharging"), ] operations = [ - migrations.RunSQL('alter table public.charger_rebates alter column number_of_fast_charging_stations type varchar(100)'), - migrations.RunSQL('alter table public.charger_rebates alter column expected_in_service_date type varchar(200)'), - migrations.RunSQL('alter table public.charger_rebates alter column rebate_paid type varchar(200)'), + migrations.RunSQL( + "alter table public.charger_rebates alter column number_of_fast_charging_stations type varchar(100)" + ), + migrations.RunSQL( + "alter table public.charger_rebates alter column expected_in_service_date type varchar(200)" + ), + migrations.RunSQL( + "alter table public.charger_rebates alter column rebate_paid type varchar(200)" + ), ] diff --git a/django/api/migrations/0011_auto_20211216_1944.py b/django/api/migrations/0011_auto_20211216_1944.py index 8d2e7155..c26c14c2 100644 --- a/django/api/migrations/0011_auto_20211216_1944.py +++ b/django/api/migrations/0011_auto_20211216_1944.py @@ -6,13 +6,13 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0010_chargerrebates_fix_columns'), + ("api", "0010_chargerrebates_fix_columns"), ] operations = [ migrations.AlterField( - model_name='chargerrebates', - name='expected_in_service_date', + model_name="chargerrebates", + name="expected_in_service_date", field=models.CharField(blank=True, max_length=200, null=True), ), ] diff --git a/django/api/migrations/0012_hydrogrenfueling.py b/django/api/migrations/0012_hydrogrenfueling.py index a677e436..b105f33c 100644 --- a/django/api/migrations/0012_hydrogrenfueling.py +++ b/django/api/migrations/0012_hydrogrenfueling.py @@ -6,41 +6,77 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0011_auto_20211216_1944'), + ("api", "0011_auto_20211216_1944"), ] operations = [ migrations.CreateModel( - name='HydrogrenFueling', + name="HydrogrenFueling", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('effective_date', models.DateField(blank=True, null=True)), - ('expiration_date', models.DateField(blank=True, null=True)), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('station_number', models.IntegerField(blank=True, null=True)), - ('rfp_close_date', models.DateField(blank=True, null=True)), - ('station_name', models.CharField(blank=True, max_length=200, null=True)), - ('street_address', models.CharField(blank=True, max_length=200, null=True)), - ('city', models.CharField(blank=True, max_length=200, null=True)), - ('postal_code', models.CharField(blank=True, max_length=50, null=True)), - ('proponent', models.CharField(blank=True, max_length=100, null=True)), - ('location_partner', models.CharField(blank=True, max_length=100, null=True)), - ('capital_funding_awarded', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)), - ('om_funding_potential', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)), - ('daily_capacity', models.IntegerField(blank=True, null=True)), - ('bar_700', models.BooleanField(default=False)), - ('bar_350', models.BooleanField(default=False)), - ('status', models.CharField(blank=True, max_length=200, null=True)), - ('number_of_fueling_positions', models.IntegerField(blank=True, null=True)), - ('operational_date', models.DateField(blank=True, null=True)), - ('opening_date', models.DateField(blank=True, null=True)), - ('total_capital_cost', models.DecimalField(decimal_places=2, max_digits=20)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("effective_date", models.DateField(blank=True, null=True)), + ("expiration_date", models.DateField(blank=True, null=True)), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ("station_number", models.IntegerField(blank=True, null=True)), + ("rfp_close_date", models.DateField(blank=True, null=True)), + ( + "station_name", + models.CharField(blank=True, max_length=200, null=True), + ), + ( + "street_address", + models.CharField(blank=True, max_length=200, null=True), + ), + ("city", models.CharField(blank=True, max_length=200, null=True)), + ("postal_code", models.CharField(blank=True, max_length=50, null=True)), + ("proponent", models.CharField(blank=True, max_length=100, null=True)), + ( + "location_partner", + models.CharField(blank=True, max_length=100, null=True), + ), + ( + "capital_funding_awarded", + models.DecimalField( + blank=True, decimal_places=2, max_digits=20, null=True + ), + ), + ( + "om_funding_potential", + models.DecimalField( + blank=True, decimal_places=2, max_digits=20, null=True + ), + ), + ("daily_capacity", models.IntegerField(blank=True, null=True)), + ("bar_700", models.BooleanField(default=False)), + ("bar_350", models.BooleanField(default=False)), + ("status", models.CharField(blank=True, max_length=200, null=True)), + ( + "number_of_fueling_positions", + models.IntegerField(blank=True, null=True), + ), + ("operational_date", models.DateField(blank=True, null=True)), + ("opening_date", models.DateField(blank=True, null=True)), + ( + "total_capital_cost", + models.DecimalField(decimal_places=2, max_digits=20), + ), ], options={ - 'db_table': 'hydrogen_fueling', + "db_table": "hydrogen_fueling", }, ), ] diff --git a/django/api/migrations/0013_hydrogenfleets.py b/django/api/migrations/0013_hydrogenfleets.py index b6fc7f9c..322359ec 100644 --- a/django/api/migrations/0013_hydrogenfleets.py +++ b/django/api/migrations/0013_hydrogenfleets.py @@ -6,36 +6,65 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0012_hydrogrenfueling'), + ("api", "0012_hydrogrenfueling"), ] operations = [ migrations.CreateModel( - name='HydrogenFleets', + name="HydrogenFleets", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('application_number', models.IntegerField(blank=True, null=True)), - ('fleet_number', models.IntegerField(blank=True, null=True)), - ('application_date', models.CharField(blank=True, max_length=100, null=True)), - ('organization_name', models.CharField(blank=True, max_length=250, null=True)), - ('fleet_name', models.CharField(blank=True, max_length=250, null=True)), - ('steet_address', models.CharField(blank=True, max_length=250, null=True)), - ('city', models.CharField(blank=True, max_length=100, null=True)), - ('postal_code', models.CharField(blank=True, max_length=10, null=True)), - ('vin', models.CharField(blank=True, max_length=20, null=True)), - ('make', models.CharField(blank=True, max_length=100, null=True)), - ('model', models.CharField(blank=True, max_length=100, null=True)), - ('year', models.CharField(blank=True, max_length=100, null=True)), - ('purchase_date', models.CharField(blank=True, max_length=100, null=True)), - ('dealer_name', models.CharField(blank=True, max_length=250, null=True)), - ('rebate_amount', models.CharField(blank=True, max_length=250, null=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ("application_number", models.IntegerField(blank=True, null=True)), + ("fleet_number", models.IntegerField(blank=True, null=True)), + ( + "application_date", + models.CharField(blank=True, max_length=100, null=True), + ), + ( + "organization_name", + models.CharField(blank=True, max_length=250, null=True), + ), + ("fleet_name", models.CharField(blank=True, max_length=250, null=True)), + ( + "steet_address", + models.CharField(blank=True, max_length=250, null=True), + ), + ("city", models.CharField(blank=True, max_length=100, null=True)), + ("postal_code", models.CharField(blank=True, max_length=10, null=True)), + ("vin", models.CharField(blank=True, max_length=20, null=True)), + ("make", models.CharField(blank=True, max_length=100, null=True)), + ("model", models.CharField(blank=True, max_length=100, null=True)), + ("year", models.CharField(blank=True, max_length=100, null=True)), + ( + "purchase_date", + models.CharField(blank=True, max_length=100, null=True), + ), + ( + "dealer_name", + models.CharField(blank=True, max_length=250, null=True), + ), + ( + "rebate_amount", + models.CharField(blank=True, max_length=250, null=True), + ), ], options={ - 'db_table': 'hydrogen_fleets', + "db_table": "hydrogen_fleets", }, ), ] diff --git a/django/api/migrations/0014_datafleets.py b/django/api/migrations/0014_datafleets.py index 7afa757c..07ea9ff8 100644 --- a/django/api/migrations/0014_datafleets.py +++ b/django/api/migrations/0014_datafleets.py @@ -6,47 +6,115 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0013_hydrogenfleets'), + ("api", "0013_hydrogenfleets"), ] operations = [ migrations.CreateModel( - name='DataFleets', + name="DataFleets", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('current_stage', models.CharField(blank=True, max_length=250, null=True)), - ('rebate_value', models.CharField(blank=True, max_length=100, null=True)), - ('legal_name_of_organization_fleet', models.CharField(blank=True, max_length=500, null=True)), - ('business_category', models.CharField(blank=True, max_length=250, null=True)), - ('city', models.CharField(blank=True, max_length=250, null=True)), - ('postal_code', models.CharField(blank=True, max_length=100, null=True)), - ('applicant_first_name', models.CharField(blank=True, max_length=100, null=True)), - ('applicant_last_name', models.CharField(blank=True, max_length=100, null=True)), - ('email_address', models.CharField(blank=True, max_length=100, null=True)), - ('fleet_size_all', models.IntegerField(blank=True, null=True)), - ('fleet_size_light_duty', models.IntegerField(blank=True, null=True)), - ('total_number_of_evs', models.IntegerField(blank=True, null=True)), - ('total_number_of_light_duty_evs', models.IntegerField(blank=True, null=True)), - ('phev', models.IntegerField(blank=True, null=True)), - ('evse', models.IntegerField(blank=True, null=True)), - ('average_daily_travel_distance', models.CharField(blank=True, max_length=100, null=True)), - ('component_being_applyied_for', models.CharField(blank=True, max_length=250, null=True)), - ('estimated_cost', models.CharField(blank=True, max_length=100, null=True)), - ('type_of_charger_being_installing', models.CharField(blank=True, max_length=250, null=True)), - ('number_of_Level_2_Charging_Stations_being_applying_for', models.IntegerField(blank=True, null=True)), - ('number_of_level_3_dc_fast_charging_stations_being_applying_for', models.IntegerField(blank=True, null=True)), - ('application_form_fleets_completion_date_time', models.CharField(blank=True, max_length=100, null=True)), - ('pre_approval_date', models.CharField(blank=True, max_length=100, null=True)), - ('deadline', models.CharField(blank=True, max_length=250, null=True)), - ('application_number', models.CharField(blank=True, max_length=250, null=True)), - ('potential_rebate', models.CharField(blank=True, max_length=100, null=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ( + "current_stage", + models.CharField(blank=True, max_length=250, null=True), + ), + ( + "rebate_value", + models.CharField(blank=True, max_length=100, null=True), + ), + ( + "legal_name_of_organization_fleet", + models.CharField(blank=True, max_length=500, null=True), + ), + ( + "business_category", + models.CharField(blank=True, max_length=250, null=True), + ), + ("city", models.CharField(blank=True, max_length=250, null=True)), + ( + "postal_code", + models.CharField(blank=True, max_length=100, null=True), + ), + ( + "applicant_first_name", + models.CharField(blank=True, max_length=100, null=True), + ), + ( + "applicant_last_name", + models.CharField(blank=True, max_length=100, null=True), + ), + ( + "email_address", + models.CharField(blank=True, max_length=100, null=True), + ), + ("fleet_size_all", models.IntegerField(blank=True, null=True)), + ("fleet_size_light_duty", models.IntegerField(blank=True, null=True)), + ("total_number_of_evs", models.IntegerField(blank=True, null=True)), + ( + "total_number_of_light_duty_evs", + models.IntegerField(blank=True, null=True), + ), + ("phev", models.IntegerField(blank=True, null=True)), + ("evse", models.IntegerField(blank=True, null=True)), + ( + "average_daily_travel_distance", + models.CharField(blank=True, max_length=100, null=True), + ), + ( + "component_being_applyied_for", + models.CharField(blank=True, max_length=250, null=True), + ), + ( + "estimated_cost", + models.CharField(blank=True, max_length=100, null=True), + ), + ( + "type_of_charger_being_installing", + models.CharField(blank=True, max_length=250, null=True), + ), + ( + "number_of_Level_2_Charging_Stations_being_applying_for", + models.IntegerField(blank=True, null=True), + ), + ( + "number_of_level_3_dc_fast_charging_stations_being_applying_for", + models.IntegerField(blank=True, null=True), + ), + ( + "application_form_fleets_completion_date_time", + models.CharField(blank=True, max_length=100, null=True), + ), + ( + "pre_approval_date", + models.CharField(blank=True, max_length=100, null=True), + ), + ("deadline", models.CharField(blank=True, max_length=250, null=True)), + ( + "application_number", + models.CharField(blank=True, max_length=250, null=True), + ), + ( + "potential_rebate", + models.CharField(blank=True, max_length=100, null=True), + ), ], options={ - 'db_table': 'data_fleets', + "db_table": "data_fleets", }, ), ] diff --git a/django/api/migrations/0015_arcprojecttracking.py b/django/api/migrations/0015_arcprojecttracking.py index 490e02f5..8b5e0c22 100644 --- a/django/api/migrations/0015_arcprojecttracking.py +++ b/django/api/migrations/0015_arcprojecttracking.py @@ -6,36 +6,68 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0014_datafleets'), + ("api", "0014_datafleets"), ] operations = [ migrations.CreateModel( - name='ARCProjectTracking', + name="ARCProjectTracking", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('funding_call', models.CharField(blank=True, max_length=50, null=True)), - ('proponent', models.CharField(blank=True, max_length=500, null=True)), - ('reference_number', models.CharField(blank=True, max_length=50, null=True)), - ('project_title', models.CharField(blank=True, max_length=500, null=True)), - ('primary_location', models.CharField(blank=True, max_length=250, null=True)), - ('status', models.CharField(blank=True, max_length=250, null=True)), - ('arc_funding', models.IntegerField(blank=True, null=True)), - ('funds_issued', models.IntegerField(blank=True, null=True)), - ('start_date', models.CharField(blank=True, max_length=250, null=True)), - ('completion_date', models.CharField(blank=True, max_length=250, null=True)), - ('total_project_value', models.IntegerField(blank=True, null=True)), - ('zev_sub_sector', models.CharField(blank=True, max_length=250, null=True)), - ('on_road_off_road', models.CharField(blank=True, max_length=250, null=True)), - ('fuel_type', models.CharField(blank=True, max_length=250, null=True)), - ('publicly_announced', models.BooleanField(default=False)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ( + "funding_call", + models.CharField(blank=True, max_length=50, null=True), + ), + ("proponent", models.CharField(blank=True, max_length=500, null=True)), + ( + "reference_number", + models.CharField(blank=True, max_length=50, null=True), + ), + ( + "project_title", + models.CharField(blank=True, max_length=500, null=True), + ), + ( + "primary_location", + models.CharField(blank=True, max_length=250, null=True), + ), + ("status", models.CharField(blank=True, max_length=250, null=True)), + ("arc_funding", models.IntegerField(blank=True, null=True)), + ("funds_issued", models.IntegerField(blank=True, null=True)), + ("start_date", models.CharField(blank=True, max_length=250, null=True)), + ( + "completion_date", + models.CharField(blank=True, max_length=250, null=True), + ), + ("total_project_value", models.IntegerField(blank=True, null=True)), + ( + "zev_sub_sector", + models.CharField(blank=True, max_length=250, null=True), + ), + ( + "on_road_off_road", + models.CharField(blank=True, max_length=250, null=True), + ), + ("fuel_type", models.CharField(blank=True, max_length=250, null=True)), + ("publicly_announced", models.BooleanField(default=False)), ], options={ - 'db_table': 'arc_project_tracking', + "db_table": "arc_project_tracking", }, ), ] diff --git a/django/api/migrations/0016_scrapit.py b/django/api/migrations/0016_scrapit.py index 2cfaa3fc..1a4bcacb 100644 --- a/django/api/migrations/0016_scrapit.py +++ b/django/api/migrations/0016_scrapit.py @@ -6,32 +6,71 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0015_arcprojecttracking'), + ("api", "0015_arcprojecttracking"), ] operations = [ migrations.CreateModel( - name='ScrapIt', + name="ScrapIt", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('approval_number', models.IntegerField(blank=True, null=True)), - ('application_received_date', models.CharField(blank=True, max_length=250, null=True)), - ('completion_date', models.CharField(blank=True, max_length=250, null=True)), - ('postal_code', models.CharField(blank=True, max_length=250, null=True)), - ('vin', models.CharField(blank=True, max_length=250, null=True)), - ('application_city_fuel', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)), - ('incentive_type', models.CharField(blank=True, max_length=250, null=True)), - ('incentive_cost', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)), - ('cheque_number', models.CharField(blank=True, max_length=250, null=True)), - ('budget_code', models.CharField(blank=True, max_length=250, null=True)), - ('scrap_date', models.CharField(blank=True, max_length=250, null=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ("approval_number", models.IntegerField(blank=True, null=True)), + ( + "application_received_date", + models.CharField(blank=True, max_length=250, null=True), + ), + ( + "completion_date", + models.CharField(blank=True, max_length=250, null=True), + ), + ( + "postal_code", + models.CharField(blank=True, max_length=250, null=True), + ), + ("vin", models.CharField(blank=True, max_length=250, null=True)), + ( + "application_city_fuel", + models.DecimalField( + blank=True, decimal_places=2, max_digits=10, null=True + ), + ), + ( + "incentive_type", + models.CharField(blank=True, max_length=250, null=True), + ), + ( + "incentive_cost", + models.DecimalField( + blank=True, decimal_places=2, max_digits=10, null=True + ), + ), + ( + "cheque_number", + models.CharField(blank=True, max_length=250, null=True), + ), + ( + "budget_code", + models.CharField(blank=True, max_length=250, null=True), + ), + ("scrap_date", models.CharField(blank=True, max_length=250, null=True)), ], options={ - 'db_table': 'scrap_it', + "db_table": "scrap_it", }, ), ] diff --git a/django/api/migrations/0017_whitelistedusers.py b/django/api/migrations/0017_whitelistedusers.py index e58314d5..d06d72c1 100644 --- a/django/api/migrations/0017_whitelistedusers.py +++ b/django/api/migrations/0017_whitelistedusers.py @@ -6,22 +6,33 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0016_scrapit'), + ("api", "0016_scrapit"), ] operations = [ migrations.CreateModel( - name='WhitelistedUsers', + name="WhitelistedUsers", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('user', models.CharField(max_length=100, unique=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ("user", models.CharField(max_length=100, unique=True)), ], options={ - 'db_table': 'whitelisted_users', + "db_table": "whitelisted_users", }, ), ] diff --git a/django/api/migrations/0018_auto_20231201_2301.py b/django/api/migrations/0018_auto_20231201_2301.py index b6addc17..58d1ec08 100644 --- a/django/api/migrations/0018_auto_20231201_2301.py +++ b/django/api/migrations/0018_auto_20231201_2301.py @@ -6,13 +6,13 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0017_whitelistedusers'), + ("api", "0017_whitelistedusers"), ] operations = [ migrations.RenameField( - model_name='hydrogenfleets', - old_name='steet_address', - new_name='street_address', + model_name="hydrogenfleets", + old_name="steet_address", + new_name="street_address", ), ] diff --git a/django/api/migrations/0019_auto_20240223_1820.py b/django/api/migrations/0019_auto_20240223_1820.py index c4c78315..34384558 100644 --- a/django/api/migrations/0019_auto_20240223_1820.py +++ b/django/api/migrations/0019_auto_20240223_1820.py @@ -7,54 +7,101 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0018_auto_20231201_2301'), + ("api", "0018_auto_20231201_2301"), ] operations = [ migrations.CreateModel( - name='Permission', + name="Permission", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('description', models.CharField(max_length=100, unique=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ("description", models.CharField(max_length=100, unique=True)), ], options={ - 'db_table': 'permission', + "db_table": "permission", }, ), migrations.CreateModel( - name='User', + name="User", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('idir', models.CharField(max_length=100, unique=True)), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ("idir", models.CharField(max_length=100, unique=True)), ], options={ - 'db_table': 'user', + "db_table": "user", }, ), migrations.CreateModel( - name='UserPermission', + name="UserPermission", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), - ('create_user', models.CharField(default='SYSTEM', max_length=130)), - ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), - ('update_user', models.CharField(max_length=130, null=True)), - ('permission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='permission', to='api.permission')), - ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to='api.user')), + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ( + "create_timestamp", + models.DateTimeField(auto_now_add=True, null=True), + ), + ("create_user", models.CharField(default="SYSTEM", max_length=130)), + ("update_timestamp", models.DateTimeField(auto_now=True, null=True)), + ("update_user", models.CharField(max_length=130, null=True)), + ( + "permission", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="permission", + to="api.permission", + ), + ), + ( + "user", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="user", + to="api.user", + ), + ), ], options={ - 'db_table': 'user_permission', + "db_table": "user_permission", }, ), migrations.DeleteModel( - name='WhitelistedUsers', + name="WhitelistedUsers", ), ] diff --git a/django/api/migrations/0020_auto_20240311_2136.py b/django/api/migrations/0020_auto_20240311_2136.py index 4a943f82..0475c7d1 100644 --- a/django/api/migrations/0020_auto_20240311_2136.py +++ b/django/api/migrations/0020_auto_20240311_2136.py @@ -6,28 +6,28 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0019_auto_20240223_1820'), + ("api", "0019_auto_20240223_1820"), ] operations = [ migrations.RenameField( - model_name='datafleets', - old_name='component_being_applyied_for', - new_name='component_being_applied_for', + model_name="datafleets", + old_name="component_being_applyied_for", + new_name="component_being_applied_for", ), migrations.RenameField( - model_name='datafleets', - old_name='number_of_Level_2_Charging_Stations_being_applying_for', - new_name='number_of_level_2_charging_stations_being_applied_for', + model_name="datafleets", + old_name="number_of_Level_2_Charging_Stations_being_applying_for", + new_name="number_of_level_2_charging_stations_being_applied_for", ), migrations.RenameField( - model_name='datafleets', - old_name='number_of_level_3_dc_fast_charging_stations_being_applying_for', - new_name='number_of_level_3_dc_fast_charging_stations_being_applied_for', + model_name="datafleets", + old_name="number_of_level_3_dc_fast_charging_stations_being_applying_for", + new_name="number_of_level_3_dc_fast_charging_stations_being_applied_for", ), migrations.RenameField( - model_name='datafleets', - old_name='type_of_charger_being_installing', - new_name='type_of_charger_being_installed', + model_name="datafleets", + old_name="type_of_charger_being_installing", + new_name="type_of_charger_being_installed", ), ] diff --git a/django/api/migrations/0021_auto_20240326_2152.py b/django/api/migrations/0021_auto_20240326_2152.py index 7419b057..6ba7c96b 100644 --- a/django/api/migrations/0021_auto_20240326_2152.py +++ b/django/api/migrations/0021_auto_20240326_2152.py @@ -6,13 +6,13 @@ class Migration(migrations.Migration): dependencies = [ - ('api', '0020_auto_20240311_2136'), + ("api", "0020_auto_20240311_2136"), ] operations = [ migrations.AlterField( - model_name='specialityusevehicleincentives', - name='date', + model_name="specialityusevehicleincentives", + name="date", field=models.DateField(blank=True, max_length=20, null=True), ), ] diff --git a/django/api/models/__init__.py b/django/api/models/__init__.py index c9882bfa..2aebd7a9 100644 --- a/django/api/models/__init__.py +++ b/django/api/models/__init__.py @@ -20,4 +20,4 @@ from . import scrap_it from . import user from . import permission -from . import user_permission \ No newline at end of file +from . import user_permission diff --git a/django/api/models/arc_project_tracking.py b/django/api/models/arc_project_tracking.py index 5047f1e7..8c66bc92 100644 --- a/django/api/models/arc_project_tracking.py +++ b/django/api/models/arc_project_tracking.py @@ -4,101 +4,47 @@ class ARCProjectTracking(Auditable): - funding_call = models.CharField( - blank=True, - null=True, - max_length=50, - unique=False - ) + funding_call = models.CharField(blank=True, null=True, max_length=50, unique=False) - proponent = models.CharField( - blank=True, - null=True, - max_length=500, - unique=False - ) + proponent = models.CharField(blank=True, null=True, max_length=500, unique=False) reference_number = models.CharField( - blank=True, - null=True, - max_length=50, - unique=False + blank=True, null=True, max_length=50, unique=False ) project_title = models.CharField( - blank=True, - null=True, - max_length=500, - unique=False + blank=True, null=True, max_length=500, unique=False ) primary_location = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) - status = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False - ) + status = models.CharField(blank=True, null=True, max_length=250, unique=False) - arc_funding = models.IntegerField( - blank=True, - null=True - ) + arc_funding = models.IntegerField(blank=True, null=True) - funds_issued = models.IntegerField( - blank=True, - null=True - ) + funds_issued = models.IntegerField(blank=True, null=True) - start_date = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False - ) + start_date = models.CharField(blank=True, null=True, max_length=250, unique=False) completion_date = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) - total_project_value = models.IntegerField( - blank=True, - null=True - ) + total_project_value = models.IntegerField(blank=True, null=True) zev_sub_sector = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) on_road_off_road = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) - fuel_type = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False - ) + fuel_type = models.CharField(blank=True, null=True, max_length=250, unique=False) - publicly_announced = models.BooleanField( - default=False - ) + publicly_announced = models.BooleanField(default=False) class Meta: - db_table = "arc_project_tracking" \ No newline at end of file + db_table = "arc_project_tracking" diff --git a/django/api/models/charger_rebates.py b/django/api/models/charger_rebates.py index 6bf23c7f..baea6aee 100644 --- a/django/api/models/charger_rebates.py +++ b/django/api/models/charger_rebates.py @@ -4,80 +4,33 @@ class ChargerRebates(Auditable): - organization = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False - ) + organization = models.CharField(blank=True, null=True, max_length=250, unique=False) - region = models.CharField( - blank=True, - null=True, - max_length=200, - unique=False - ) + region = models.CharField(blank=True, null=True, max_length=200, unique=False) - city = models.CharField( - blank=True, - null=True, - max_length=200, - unique=False - ) + city = models.CharField(blank=True, null=True, max_length=200, unique=False) - address = models.CharField( - blank=True, - null=True, - max_length=200, - unique=False - ) + address = models.CharField(blank=True, null=True, max_length=200, unique=False) - postal_code = models.CharField( - blank=True, - null=True, - max_length=200, - unique=False - ) + postal_code = models.CharField(blank=True, null=True, max_length=200, unique=False) - number_of_fast_charging_stations = models.IntegerField( - blank=True, - null=True - ) + number_of_fast_charging_stations = models.IntegerField(blank=True, null=True) in_service_date = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False + blank=True, null=True, max_length=100, unique=False ) expected_in_service_date = models.CharField( - blank=True, - null=True, - max_length=200, - unique=False + blank=True, null=True, max_length=200, unique=False ) - announced = models.CharField( - blank=True, - null=True, - max_length=200, - unique=False - ) + announced = models.CharField(blank=True, null=True, max_length=200, unique=False) rebate_paid = models.DecimalField( - blank=True, - null=True, - max_digits=20, - decimal_places=2 + blank=True, null=True, max_digits=20, decimal_places=2 ) - notes = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False - ) + notes = models.CharField(blank=True, null=True, max_length=250, unique=False) class Meta: db_table = "charger_rebates" diff --git a/django/api/models/credit_class.py b/django/api/models/credit_class.py index 78f31e4a..4e005f1c 100644 --- a/django/api/models/credit_class.py +++ b/django/api/models/credit_class.py @@ -1,6 +1,7 @@ """ Credit Class model """ + from django.db import models from api.models.mixins.effective_dates import EffectiveDates @@ -12,12 +13,8 @@ class CreditClass(EffectiveDates, Auditable): A lookup table for credit classes. Initially, A or B, but with room to expand later. """ + class Meta: db_table = "credit_class_code" - credit_class = models.CharField( - blank=False, - max_length=3, - null=False, - unique=True - ) + credit_class = models.CharField(blank=False, max_length=3, null=False, unique=True) diff --git a/django/api/models/data_fleets.py b/django/api/models/data_fleets.py index c72b8fb5..045582ec 100644 --- a/django/api/models/data_fleets.py +++ b/django/api/models/data_fleets.py @@ -4,171 +4,88 @@ class DataFleets(Auditable): current_stage = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) - rebate_value = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False - ) + rebate_value = models.CharField(blank=True, null=True, max_length=100, unique=False) legal_name_of_organization_fleet = models.CharField( - blank=True, - null=True, - max_length=500, - unique=False + blank=True, null=True, max_length=500, unique=False ) business_category = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) - city = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False - ) + city = models.CharField(blank=True, null=True, max_length=250, unique=False) - postal_code = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False - ) + postal_code = models.CharField(blank=True, null=True, max_length=100, unique=False) applicant_first_name = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False + blank=True, null=True, max_length=100, unique=False ) applicant_last_name = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False + blank=True, null=True, max_length=100, unique=False ) email_address = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False + blank=True, null=True, max_length=100, unique=False ) - fleet_size_all = models.IntegerField( - blank=True, - null=True - ) + fleet_size_all = models.IntegerField(blank=True, null=True) - fleet_size_light_duty = models.IntegerField( - blank=True, - null=True - ) + fleet_size_light_duty = models.IntegerField(blank=True, null=True) - total_number_of_evs = models.IntegerField( - blank=True, - null=True - ) + total_number_of_evs = models.IntegerField(blank=True, null=True) - total_number_of_light_duty_evs = models.IntegerField( - blank=True, - null=True - ) + total_number_of_light_duty_evs = models.IntegerField(blank=True, null=True) - phev = models.IntegerField( - blank=True, - null=True - ) + phev = models.IntegerField(blank=True, null=True) - evse = models.IntegerField( - blank=True, - null=True - ) + evse = models.IntegerField(blank=True, null=True) average_daily_travel_distance = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False + blank=True, null=True, max_length=100, unique=False ) component_being_applied_for = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) estimated_cost = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False + blank=True, null=True, max_length=100, unique=False ) type_of_charger_being_installed = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) number_of_level_2_charging_stations_being_applied_for = models.IntegerField( - blank=True, - null=True + blank=True, null=True ) number_of_level_3_dc_fast_charging_stations_being_applied_for = models.IntegerField( - blank=True, - null=True + blank=True, null=True ) application_form_fleets_completion_date_time = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False + blank=True, null=True, max_length=100, unique=False ) pre_approval_date = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False + blank=True, null=True, max_length=100, unique=False ) - deadline = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False - ) + deadline = models.CharField(blank=True, null=True, max_length=250, unique=False) application_number = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) potential_rebate = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False + blank=True, null=True, max_length=100, unique=False ) - class Meta: db_table = "data_fleets" diff --git a/django/api/models/datasets.py b/django/api/models/datasets.py index f5242bb3..514e7a2a 100644 --- a/django/api/models/datasets.py +++ b/django/api/models/datasets.py @@ -11,4 +11,4 @@ class Datasets(Auditable): ) class Meta: - db_table = 'datasets' + db_table = "datasets" diff --git a/django/api/models/hydrogen_fleets.py b/django/api/models/hydrogen_fleets.py index 31651a37..9420eeb4 100644 --- a/django/api/models/hydrogen_fleets.py +++ b/django/api/models/hydrogen_fleets.py @@ -4,105 +4,44 @@ class HydrogenFleets(Auditable): - application_number = models.IntegerField( - blank=True, - null=True - ) + application_number = models.IntegerField(blank=True, null=True) - fleet_number = models.IntegerField( - blank=True, - null=True - ) + fleet_number = models.IntegerField(blank=True, null=True) application_date = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False + blank=True, null=True, max_length=100, unique=False ) organization_name = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) - fleet_name = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False - ) + fleet_name = models.CharField(blank=True, null=True, max_length=250, unique=False) street_address = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) - city = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False - ) + city = models.CharField(blank=True, null=True, max_length=100, unique=False) - postal_code = models.CharField( - blank=True, - null=True, - max_length=10, - unique=False - ) + postal_code = models.CharField(blank=True, null=True, max_length=10, unique=False) - vin = models.CharField( - blank=True, - null=True, - max_length=20, - unique=False - ) + vin = models.CharField(blank=True, null=True, max_length=20, unique=False) - make = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False - ) + make = models.CharField(blank=True, null=True, max_length=100, unique=False) - model = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False - ) + model = models.CharField(blank=True, null=True, max_length=100, unique=False) - year = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False - ) + year = models.CharField(blank=True, null=True, max_length=100, unique=False) purchase_date = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False + blank=True, null=True, max_length=100, unique=False ) - dealer_name = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False - ) + dealer_name = models.CharField(blank=True, null=True, max_length=250, unique=False) rebate_amount = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) class Meta: diff --git a/django/api/models/hydrogen_fueling.py b/django/api/models/hydrogen_fueling.py index 08d40845..50eb6e6a 100644 --- a/django/api/models/hydrogen_fueling.py +++ b/django/api/models/hydrogen_fueling.py @@ -1,6 +1,7 @@ """ Hydrogen Fueling """ + from django.db import models from api.models.mixins.effective_dates import EffectiveDates @@ -12,87 +13,25 @@ class HydrogrenFueling(EffectiveDates, Auditable): class Meta: db_table = "hydrogen_fueling" - station_number = models.IntegerField( - null=True, - blank=True - ) - rfp_close_date = models.DateField( - blank=True, - null=True - ) - station_name = models.CharField( - blank=True, - max_length=200, - null=True - ) - street_address = models.CharField( - blank=True, - max_length=200, - null=True - ) - city = models.CharField( - blank=True, - max_length=200, - null=True, - unique=False - ) - postal_code = models.CharField( - blank=True, - max_length=50, - null=True, - unique=False - ) - proponent = models.CharField( - blank=True, - max_length=100, - null=True - ) - location_partner = models.CharField( - max_length=100, - null=True, - blank=True - ) + station_number = models.IntegerField(null=True, blank=True) + rfp_close_date = models.DateField(blank=True, null=True) + station_name = models.CharField(blank=True, max_length=200, null=True) + street_address = models.CharField(blank=True, max_length=200, null=True) + city = models.CharField(blank=True, max_length=200, null=True, unique=False) + postal_code = models.CharField(blank=True, max_length=50, null=True, unique=False) + proponent = models.CharField(blank=True, max_length=100, null=True) + location_partner = models.CharField(max_length=100, null=True, blank=True) capital_funding_awarded = models.DecimalField( - null=True, - blank=True, - max_digits=20, - decimal_places=2 + null=True, blank=True, max_digits=20, decimal_places=2 ) om_funding_potential = models.DecimalField( - null=True, - blank=True, - max_digits=20, - decimal_places=2 - ) - daily_capacity = models.IntegerField( - null=True, - blank=True - ) - bar_700 = models.BooleanField( - default=False - ) - bar_350 = models.BooleanField( - default=False - ) - status = models.CharField( - blank=True, - max_length=200, - null=True, - unique=False - ) - number_of_fueling_positions = models.IntegerField( - null=True, - blank=True - ) - operational_date = models.DateField( - blank=True, - null=True - ) - opening_date = models.DateField( - blank=True, - null=True - ) - total_capital_cost = models.DecimalField( - max_digits=20, - decimal_places=2 - ) + null=True, blank=True, max_digits=20, decimal_places=2 + ) + daily_capacity = models.IntegerField(null=True, blank=True) + bar_700 = models.BooleanField(default=False) + bar_350 = models.BooleanField(default=False) + status = models.CharField(blank=True, max_length=200, null=True, unique=False) + number_of_fueling_positions = models.IntegerField(null=True, blank=True) + operational_date = models.DateField(blank=True, null=True) + opening_date = models.DateField(blank=True, null=True) + total_capital_cost = models.DecimalField(max_digits=20, decimal_places=2) diff --git a/django/api/models/icbc_registration_data.py b/django/api/models/icbc_registration_data.py index 05df30eb..6f6902b4 100644 --- a/django/api/models/icbc_registration_data.py +++ b/django/api/models/icbc_registration_data.py @@ -4,23 +4,15 @@ class IcbcRegistrationData(Auditable): icbc_vehicle = models.ForeignKey( - 'IcbcVehicle', - related_name=None, - on_delete=models.CASCADE + "IcbcVehicle", related_name=None, on_delete=models.CASCADE ) vin = models.CharField( - blank=False, - null=False, - unique=True, - max_length=20, - db_index=True + blank=False, null=False, unique=True, max_length=20, db_index=True ) icbc_upload_date = models.ForeignKey( - 'IcbcUploadDate', - related_name=None, - on_delete=models.CASCADE + "IcbcUploadDate", related_name=None, on_delete=models.CASCADE ) class Meta: diff --git a/django/api/models/icbc_upload_date.py b/django/api/models/icbc_upload_date.py index de51753d..8390c87a 100644 --- a/django/api/models/icbc_upload_date.py +++ b/django/api/models/icbc_upload_date.py @@ -3,11 +3,7 @@ class IcbcUploadDate(Auditable): - upload_date = models.DateField( - blank=False, - null=False, - auto_now=False - ) + upload_date = models.DateField(blank=False, null=False, auto_now=False) class Meta: - db_table = 'icbc_upload_date' + db_table = "icbc_upload_date" diff --git a/django/api/models/icbc_vehicle.py b/django/api/models/icbc_vehicle.py index 3c1a53e8..63275d6f 100644 --- a/django/api/models/icbc_vehicle.py +++ b/django/api/models/icbc_vehicle.py @@ -1,6 +1,7 @@ """ ICBC Vehicle Model """ + from django.db import models from auditable.models import Auditable @@ -9,33 +10,19 @@ class IcbcVehicle(Auditable): "All vehicle models that have been added from icbc registration" "spreadsheet." - make = models.CharField( - blank=False, - null=False, - max_length=250, - db_index=True - ) + make = models.CharField(blank=False, null=False, max_length=250, db_index=True) model_name = models.CharField( - blank=False, - max_length=250, - null=False, - db_index=True + blank=False, max_length=250, null=False, db_index=True ) model_year = models.ForeignKey( - 'ModelYear', + "ModelYear", related_name=None, on_delete=models.PROTECT, null=False, - db_index=True + db_index=True, ) class Meta: - db_table = 'icbc_vehicle' - unique_together = [[ - 'make', 'model_name', - 'model_year' - ]] - index_together = [[ - 'make', 'model_name', - 'model_year' - ]] + db_table = "icbc_vehicle" + unique_together = [["make", "model_name", "model_year"]] + index_together = [["make", "model_name", "model_year"]] diff --git a/django/api/models/ldv_rebates.py b/django/api/models/ldv_rebates.py index c73f7511..10bc2541 100644 --- a/django/api/models/ldv_rebates.py +++ b/django/api/models/ldv_rebates.py @@ -3,108 +3,50 @@ class LdvRebates(Auditable): - submission_id = models.IntegerField( - unique=False - ) - casl_consent = models.BooleanField( - default=False - ) + submission_id = models.IntegerField(unique=False) + casl_consent = models.BooleanField(default=False) date_approved = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False + blank=True, null=True, max_length=100, unique=False ) submission_date = models.CharField( - blank=True, - null=True, - max_length=100, - unique=False - ) - company_name = models.CharField( - blank=True, - max_length=200, - null=True, - unique=False - ) - company_city = models.CharField( - blank=True, - max_length=200, - null=True, - unique=False + blank=True, null=True, max_length=100, unique=False ) + company_name = models.CharField(blank=True, max_length=200, null=True, unique=False) + company_city = models.CharField(blank=True, max_length=200, null=True, unique=False) applicant_name = models.CharField( - blank=True, - max_length=200, - null=True, - unique=False + blank=True, max_length=200, null=True, unique=False ) applicant_address_1 = models.CharField( - blank=True, - max_length=200, - null=True, - unique=False + blank=True, max_length=200, null=True, unique=False ) applicant_address_2 = models.CharField( - blank=True, - max_length=200, - null=True, - unique=False + blank=True, max_length=200, null=True, unique=False ) applicant_city = models.CharField( - blank=True, - max_length=100, - null=True, - unique=False + blank=True, max_length=100, null=True, unique=False ) applicant_postal_code = models.CharField( - blank=True, - max_length=50, - null=True, - unique=False + blank=True, max_length=50, null=True, unique=False ) applicant_phone = models.CharField( - blank=True, - max_length=25, - null=True, - unique=False + blank=True, max_length=25, null=True, unique=False ) applicant_email = models.CharField( - blank=True, - max_length=200, - null=True, - unique=False - ) - applicant_use = models.CharField( - blank=True, - max_length=50, - null=True, - unique=False + blank=True, max_length=200, null=True, unique=False ) + applicant_use = models.CharField(blank=True, max_length=50, null=True, unique=False) applicant_type = models.CharField( - blank=True, - max_length=100, - null=True, - unique=False + blank=True, max_length=100, null=True, unique=False ) business_name = models.CharField( - blank=True, - max_length=100, - null=True, - unique=False + blank=True, max_length=100, null=True, unique=False ) business_number = models.CharField( - blank=True, - max_length=50, - null=True, - unique=False + blank=True, max_length=50, null=True, unique=False ) drivers_license = models.CharField( - blank=True, - max_length=50, - null=True, - unique=False + blank=True, max_length=50, null=True, unique=False ) province = models.CharField( blank=True, @@ -120,34 +62,15 @@ class LdvRebates(Auditable): max_digits=20, decimal_places=2, ) - document_type = models.CharField( - blank=True, - max_length=50, - null=True, - unique=False - ) - vehicle = models.CharField( - blank=True, - max_length=200, - null=True, - unique=False - ) + document_type = models.CharField(blank=True, max_length=50, null=True, unique=False) + vehicle = models.CharField(blank=True, max_length=200, null=True, unique=False) incentive_amount = models.DecimalField( max_digits=20, decimal_places=2, ) - vin = models.CharField( - blank=True, - max_length=255, - null=True, - unique=False - ) - delivered = models.BooleanField( - default=False - ) - consent_to_contact = models.BooleanField( - default=False - ) + vin = models.CharField(blank=True, max_length=255, null=True, unique=False) + delivered = models.BooleanField(default=False) + consent_to_contact = models.BooleanField(default=False) class Meta: db_table = "ldv_rebates" diff --git a/django/api/models/mixins/effective_dates.py b/django/api/models/mixins/effective_dates.py index 7ccc05ca..693f7b64 100644 --- a/django/api/models/mixins/effective_dates.py +++ b/django/api/models/mixins/effective_dates.py @@ -2,15 +2,9 @@ class EffectiveDates(models.Model): - effective_date = models.DateField( - blank=True, - null=True - ) + effective_date = models.DateField(blank=True, null=True) - expiration_date = models.DateField( - blank=True, - null=True - ) + expiration_date = models.DateField(blank=True, null=True) class Meta: abstract = True diff --git a/django/api/models/mixins/named.py b/django/api/models/mixins/named.py index fdba1a61..0cfb06b6 100644 --- a/django/api/models/mixins/named.py +++ b/django/api/models/mixins/named.py @@ -3,10 +3,7 @@ class Description(models.Model): description = models.CharField( - blank=False, - db_column="description", - max_length=250, - null=False + blank=False, db_column="description", max_length=250, null=False ) class Meta: @@ -15,10 +12,7 @@ class Meta: class Named(models.Model): name = models.CharField( - blank=False, - db_column="description", - max_length=250, - null=False + blank=False, db_column="description", max_length=250, null=False ) class Meta: @@ -27,11 +21,7 @@ class Meta: class UniquelyNamed(models.Model): name = models.CharField( - blank=False, - db_column="description", - unique=True, - null=False, - max_length=250 + blank=False, db_column="description", unique=True, null=False, max_length=250 ) class Meta: diff --git a/django/api/models/organization.py b/django/api/models/organization.py index b8bf1650..4582f0e9 100644 --- a/django/api/models/organization.py +++ b/django/api/models/organization.py @@ -5,25 +5,15 @@ class Organization(Auditable): name = models.CharField( - db_column="organization_name", - max_length=500, - null=False, - unique=True + db_column="organization_name", max_length=500, null=False, unique=True ) short_name = models.CharField( - db_column='short_name', - unique=True, - null=True, - max_length=64 + db_column="short_name", unique=True, null=True, max_length=64 ) - is_active = models.BooleanField( - default=False - ) - is_government = models.BooleanField( - default=False - ) + is_active = models.BooleanField(default=False) + is_government = models.BooleanField(default=False) class Meta: - db_table = 'organization' + db_table = "organization" diff --git a/django/api/models/permission.py b/django/api/models/permission.py index a9483873..78975b04 100644 --- a/django/api/models/permission.py +++ b/django/api/models/permission.py @@ -1,6 +1,7 @@ from django.db import models from auditable.models import Auditable + class Permission(Auditable): description = models.CharField( blank=False, @@ -10,6 +11,9 @@ class Permission(Auditable): ) class Meta: - db_table = 'permission' - db_table_comment = "Contains the list of permissions to grant access to " \ - "certain actions of areas for the system." \ No newline at end of file + db_table = "permission" + + db_table_comment = ( + "Contains the list of permissions to grant access to " + "certain actions of areas for the system." + ) diff --git a/django/api/models/public_charging.py b/django/api/models/public_charging.py index df957671..c4ee262a 100644 --- a/django/api/models/public_charging.py +++ b/django/api/models/public_charging.py @@ -1,6 +1,7 @@ """ Public Charging Model """ + from django.db import models from api.models.mixins.effective_dates import EffectiveDates @@ -13,23 +14,10 @@ class Meta: db_table = "public_charging" applicant_name = models.CharField( - blank=True, - max_length=200, - null=True, - unique=False - ) - city = models.CharField( - blank=True, - max_length=200, - null=True, - unique=False - ) - postal_code = models.CharField( - blank=True, - max_length=50, - null=True, - unique=False + blank=True, max_length=200, null=True, unique=False ) + city = models.CharField(blank=True, max_length=200, null=True, unique=False) + postal_code = models.CharField(blank=True, max_length=50, null=True, unique=False) address = models.CharField( blank=True, max_length=200, @@ -40,62 +28,21 @@ class Meta: max_length=500, null=True, ) - between_25kw_and_50kw = models.IntegerField( - null=True, - blank=True - ) - between_50kw_and_100kw = models.IntegerField( - null=True, - blank=True - ) - over_100kw = models.IntegerField( - null=True, - blank=True - ) - level_2_units = models.IntegerField( - null=True, - blank=True - ) - level_2_ports = models.IntegerField( - null=True, - blank=True - ) - estimated_budget = models.DecimalField( - max_digits=20, - decimal_places=2 - ) - adjusted_rebate = models.DecimalField( - max_digits=20, - decimal_places=2 - ) - rebate_percent_maximum = models.DecimalField( - max_digits=3, - decimal_places=2 - ) + between_25kw_and_50kw = models.IntegerField(null=True, blank=True) + between_50kw_and_100kw = models.IntegerField(null=True, blank=True) + over_100kw = models.IntegerField(null=True, blank=True) + level_2_units = models.IntegerField(null=True, blank=True) + level_2_ports = models.IntegerField(null=True, blank=True) + estimated_budget = models.DecimalField(max_digits=20, decimal_places=2) + adjusted_rebate = models.DecimalField(max_digits=20, decimal_places=2) + rebate_percent_maximum = models.DecimalField(max_digits=3, decimal_places=2) pilot_project = models.BooleanField() - region = models.CharField( - blank=True, - max_length=200, - null=True, - unique=False - ) + region = models.CharField(blank=True, max_length=200, null=True, unique=False) organization_type = models.CharField( - blank=True, - max_length=100, - null=True, - unique=False + blank=True, max_length=100, null=True, unique=False ) project_status = models.CharField( - blank=True, - max_length=200, - null=True, - unique=False - ) - review_number = models.IntegerField( - null=True, - blank=True - ) - rebate_paid = models.DecimalField( - max_digits=20, - decimal_places=2 + blank=True, max_length=200, null=True, unique=False ) + review_number = models.IntegerField(null=True, blank=True) + rebate_paid = models.DecimalField(max_digits=20, decimal_places=2) diff --git a/django/api/models/scrap_it.py b/django/api/models/scrap_it.py index 409f3753..1f8c9fd9 100644 --- a/django/api/models/scrap_it.py +++ b/django/api/models/scrap_it.py @@ -4,82 +4,39 @@ class ScrapIt(Auditable): - approval_number = models.IntegerField( - blank=True, - null=True - ) + approval_number = models.IntegerField(blank=True, null=True) application_received_date = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) completion_date = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) - postal_code = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False - ) + postal_code = models.CharField(blank=True, null=True, max_length=250, unique=False) - vin = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False - ) + vin = models.CharField(blank=True, null=True, max_length=250, unique=False) application_city_fuel = models.DecimalField( - blank=True, - null=True, - max_digits=10, - decimal_places=2, - unique=False + blank=True, null=True, max_digits=10, decimal_places=2, unique=False ) incentive_type = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) incentive_cost = models.DecimalField( - blank=True, - null=True, - max_digits=10, - decimal_places=2, - unique=False + blank=True, null=True, max_digits=10, decimal_places=2, unique=False ) cheque_number = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False + blank=True, null=True, max_length=250, unique=False ) - budget_code = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False - ) + budget_code = models.CharField(blank=True, null=True, max_length=250, unique=False) - scrap_date = models.CharField( - blank=True, - null=True, - max_length=250, - unique=False - ) + scrap_date = models.CharField(blank=True, null=True, max_length=250, unique=False) class Meta: - db_table = "scrap_it" \ No newline at end of file + db_table = "scrap_it" diff --git a/django/api/models/speciality_use_vehicle_incentives.py b/django/api/models/speciality_use_vehicle_incentives.py index 175e052e..13b74877 100644 --- a/django/api/models/speciality_use_vehicle_incentives.py +++ b/django/api/models/speciality_use_vehicle_incentives.py @@ -4,35 +4,15 @@ class SpecialityUseVehicleIncentives(Auditable): - approvals = models.CharField( - blank=True, - null=True, - max_length=20 - ) - date = models.DateField( - max_length=20, - null=True, - blank=True - ) - applicant_name = models.CharField( - blank=True, - null=True, - max_length=250 - ) + approvals = models.CharField(blank=True, null=True, max_length=20) + date = models.DateField(max_length=20, null=True, blank=True) + applicant_name = models.CharField(blank=True, null=True, max_length=250) max_incentive_amount_requested = models.IntegerField( null=True, blank=True, ) - category = models.CharField( - blank=True, - max_length=250, - null=True - ) - applicant_type = models.CharField( - blank=True, - max_length=50, - null=True - ) + category = models.CharField(blank=True, max_length=250, null=True) + applicant_type = models.CharField(blank=True, max_length=50, null=True) incentive_paid = models.IntegerField( null=True, blank=True, @@ -41,16 +21,8 @@ class SpecialityUseVehicleIncentives(Auditable): null=True, blank=True, ) - manufacturer = models.CharField( - blank=True, - max_length=250, - null=True - ) - model = models.CharField( - blank=True, - max_length=250, - null=True - ) + manufacturer = models.CharField(blank=True, max_length=250, null=True) + model = models.CharField(blank=True, max_length=250, null=True) class Meta: - db_table = 'speciality_use_vehicle_incentives' + db_table = "speciality_use_vehicle_incentives" diff --git a/django/api/models/user.py b/django/api/models/user.py index e62538b5..e9850b0b 100644 --- a/django/api/models/user.py +++ b/django/api/models/user.py @@ -1,6 +1,7 @@ from django.db import models from auditable.models import Auditable + class User(Auditable): idir = models.CharField( blank=False, @@ -10,5 +11,6 @@ class User(Auditable): ) class Meta: - db_table = 'user' - db_table_comment = "Contains the list of users in the system " \ No newline at end of file + db_table = "user" + + db_table_comment = "Contains the list of users in the system " diff --git a/django/api/models/user_permission.py b/django/api/models/user_permission.py index 57cfc706..6ccc0c75 100644 --- a/django/api/models/user_permission.py +++ b/django/api/models/user_permission.py @@ -1,18 +1,14 @@ from django.db import models from auditable.models import Auditable + class UserPermission(Auditable): - user = models.ForeignKey( - 'User', - related_name='user', - on_delete=models.CASCADE - ) + user = models.ForeignKey("User", related_name="user", on_delete=models.CASCADE) permission = models.ForeignKey( - 'Permission', - related_name='permission', - on_delete=models.CASCADE + "Permission", related_name="permission", on_delete=models.CASCADE ) class Meta: - db_table = 'user_permission' - db_table_comment = "Contains the relationship between user and permission tables " \ No newline at end of file + db_table = "user_permission" + + db_table_comment = "Contains the relationship between user and permission tables " diff --git a/django/api/models/vehicle.py b/django/api/models/vehicle.py index 045b27ff..a69d23b9 100644 --- a/django/api/models/vehicle.py +++ b/django/api/models/vehicle.py @@ -4,70 +4,30 @@ class Vehicle(Auditable): - make = models.CharField( - blank=False, - null=False, - max_length=250 - ) + make = models.CharField(blank=False, null=False, max_length=250) vehicle_zev_type = models.ForeignKey( - 'ZevType', - related_name=None, - on_delete=models.PROTECT + "ZevType", related_name=None, on_delete=models.PROTECT ) vehicle_class_code = models.ForeignKey( - 'VehicleClass', - related_name=None, - on_delete=models.PROTECT + "VehicleClass", related_name=None, on_delete=models.PROTECT ) range = models.IntegerField() - model_name = models.CharField( - blank=False, - max_length=250, - null=False - ) + model_name = models.CharField(blank=False, max_length=250, null=False) model_year = models.ForeignKey( - 'ModelYear', - related_name=None, - on_delete=models.PROTECT, - null=False - ) - validation_status = models.CharField( - max_length=20, - null=False, - default="DRAFT" + "ModelYear", related_name=None, on_delete=models.PROTECT, null=False ) + validation_status = models.CharField(max_length=20, null=False, default="DRAFT") organization = models.ForeignKey( - 'Organization', - related_name=None, - on_delete=models.PROTECT, - null=False - ) - weight_kg = models.DecimalField( - blank=False, - max_digits=6, - decimal_places=0 + "Organization", related_name=None, on_delete=models.PROTECT, null=False ) + weight_kg = models.DecimalField(blank=False, max_digits=6, decimal_places=0) credit_class = models.ForeignKey( - 'CreditClass', - related_name='+', - on_delete=models.PROTECT, - null=True - ) - has_passed_us_06_test = models.BooleanField( - default=False - ) - credit_value = models.DecimalField( - null=True, - decimal_places=2, - max_digits=20 - ) - is_active = models.BooleanField( - default=True + "CreditClass", related_name="+", on_delete=models.PROTECT, null=True ) + has_passed_us_06_test = models.BooleanField(default=False) + credit_value = models.DecimalField(null=True, decimal_places=2, max_digits=20) + is_active = models.BooleanField(default=True) class Meta: - db_table = 'vehicle' - unique_together = [[ - 'make', 'model_name', 'vehicle_zev_type', - 'model_year' - ]] + db_table = "vehicle" + unique_together = [["make", "model_name", "vehicle_zev_type", "model_year"]] diff --git a/django/api/models/vehicle_class.py b/django/api/models/vehicle_class.py index 456232fb..fec2f26b 100644 --- a/django/api/models/vehicle_class.py +++ b/django/api/models/vehicle_class.py @@ -7,11 +7,8 @@ class VehicleClass(Auditable, Description, EffectiveDates): vehicle_class_code = models.CharField( - blank=False, - max_length=3, - null=False, - unique=True + blank=False, max_length=3, null=False, unique=True ) class Meta: - db_table = 'vehicle_class_code' + db_table = "vehicle_class_code" diff --git a/django/api/models/vehicle_zev_type.py b/django/api/models/vehicle_zev_type.py index f496459f..102444cd 100644 --- a/django/api/models/vehicle_zev_type.py +++ b/django/api/models/vehicle_zev_type.py @@ -7,11 +7,8 @@ class ZevType(Auditable, Description, EffectiveDates): vehicle_zev_code = models.CharField( - blank=False, - max_length=4, - null=False, - unique=True + blank=False, max_length=4, null=False, unique=True ) class Meta: - db_table = 'vehicle_zev_type' + db_table = "vehicle_zev_type" diff --git a/django/api/models/vin_decoded_information.py b/django/api/models/vin_decoded_information.py index d0a6d669..dba4edb5 100644 --- a/django/api/models/vin_decoded_information.py +++ b/django/api/models/vin_decoded_information.py @@ -4,35 +4,15 @@ class VINDecodedInformation(Auditable): - vin = models.CharField( - blank=False, - null=False, - max_length=20 - ) - manufacturer = models.CharField( - max_length=500, - null=True, - blank=True - ) - make = models.CharField( - blank=True, - null=True, - max_length=250 - ) - model = models.CharField( - blank=True, - max_length=250, - null=True - ) + vin = models.CharField(blank=False, null=False, max_length=20) + manufacturer = models.CharField(max_length=500, null=True, blank=True) + make = models.CharField(blank=True, null=True, max_length=250) + model = models.CharField(blank=True, max_length=250, null=True) model_year = models.IntegerField( null=True, blank=True, ) - fuel_type_primary = models.CharField( - blank=True, - max_length=250, - null=True - ) + fuel_type_primary = models.CharField(blank=True, max_length=250, null=True) class Meta: - db_table = 'vin_decoded_information' + db_table = "vin_decoded_information" diff --git a/django/api/pagination.py b/django/api/pagination.py index 8b1b28d0..31895298 100644 --- a/django/api/pagination.py +++ b/django/api/pagination.py @@ -3,6 +3,7 @@ Further reading: https://www.django-rest-framework.org/api-guide/pagination/ """ + from rest_framework.pagination import PageNumberPagination @@ -10,6 +11,7 @@ class StandardResultsSetPagination(PageNumberPagination): """ Default page settings """ + page_size = 100 - page_size_query_param = 'page_size' + page_size_query_param = "page_size" max_page_size = 1000 diff --git a/django/api/serializers/datasets.py b/django/api/serializers/datasets.py index 3292f0cd..2e8152bb 100644 --- a/django/api/serializers/datasets.py +++ b/django/api/serializers/datasets.py @@ -1,4 +1,3 @@ - from rest_framework.serializers import ModelSerializer from api.models.datasets import Datasets @@ -8,4 +7,4 @@ class DatasetsSerializer(ModelSerializer): class Meta: model = Datasets - fields = ('name', 'id') + fields = ("name", "id") diff --git a/django/api/serializers/icbc_registration_data.py b/django/api/serializers/icbc_registration_data.py index 9775bcc0..b1264c64 100644 --- a/django/api/serializers/icbc_registration_data.py +++ b/django/api/serializers/icbc_registration_data.py @@ -2,6 +2,7 @@ Further reading: https://www.django-rest-framework.org/api-guide/serializers/ """ + from rest_framework.serializers import ModelSerializer from api.models.icbc_registration_data import IcbcRegistrationData @@ -12,8 +13,9 @@ class IcbcRegistrationDataSerializer(ModelSerializer): """ Default Serializer for ICBC Vehicle """ + icbc_vehicle = IcbcVehicleSerializer() class Meta: model = IcbcRegistrationData - fields = '__all__' + fields = "__all__" diff --git a/django/api/serializers/icbc_vehicle.py b/django/api/serializers/icbc_vehicle.py index d3943b09..ca50a1bf 100644 --- a/django/api/serializers/icbc_vehicle.py +++ b/django/api/serializers/icbc_vehicle.py @@ -2,6 +2,7 @@ Further reading: https://www.django-rest-framework.org/api-guide/serializers/ """ + from rest_framework.serializers import ModelSerializer, SlugRelatedField from api.models.icbc_vehicle import IcbcVehicle @@ -11,11 +12,9 @@ class IcbcVehicleSerializer(ModelSerializer): """ Default Serializer for ICBC Vehicle """ - model_year = SlugRelatedField( - read_only=True, - slug_field='name' - ) + + model_year = SlugRelatedField(read_only=True, slug_field="name") class Meta: model = IcbcVehicle - fields = '__all__' + fields = "__all__" diff --git a/django/api/serializers/permission.py b/django/api/serializers/permission.py index 73efc23e..ceeb9500 100644 --- a/django/api/serializers/permission.py +++ b/django/api/serializers/permission.py @@ -3,14 +3,18 @@ from api.models.permission import Permission from api.models.user_permission import UserPermission + class PermissionSerializer(ModelSerializer): description = SerializerMethodField() + def get_description(self, obj): permission = Permission.objects.filter(id=obj.permission_id).first() if permission: return permission.description + class Meta: model = Permission fields = ( - 'id', 'description', + "id", + "description", ) diff --git a/django/api/serializers/user.py b/django/api/serializers/user.py index 93946f77..3324cef3 100644 --- a/django/api/serializers/user.py +++ b/django/api/serializers/user.py @@ -2,36 +2,46 @@ Further reading: https://www.django-rest-framework.org/api-guide/serializers/ """ -from rest_framework.serializers import ModelSerializer, SerializerMethodField, ValidationError + +from rest_framework.serializers import ( + ModelSerializer, + SerializerMethodField, + ValidationError, +) from api.models.user import User from api.models.user_permission import UserPermission from api.services.permissions import get_permissions_representation + class UserSerializer(ModelSerializer): """ Default Serializer for User """ + user_permissions = SerializerMethodField() def get_user_permissions(self, obj): - user_permission = UserPermission.objects.select_related("permission").filter(user_id=obj.id) + user_permission = UserPermission.objects.select_related("permission").filter( + user_id=obj.id + ) permissions = [] for each in user_permission: permissions.append(each.permission) return get_permissions_representation(permissions) - + def validate_idir(self, value): if isinstance(value, str) and value.strip(): return value.strip().upper() raise ValidationError("IDIR error!") - + def create(self, validated_data): return User.objects.create(**validated_data) class Meta: model = User - fields = ('idir', 'user_permissions') + fields = ("idir", "user_permissions") + # requires permissions_map object class UserListSerializer(ModelSerializer): @@ -44,4 +54,4 @@ def get_user_permissions(self, obj): class Meta: model = User - fields = ('idir', 'user_permissions') + fields = ("idir", "user_permissions") diff --git a/django/api/services/datasheet_template_generator.py b/django/api/services/datasheet_template_generator.py index ea8f3402..5a5bdc3a 100644 --- a/django/api/services/datasheet_template_generator.py +++ b/django/api/services/datasheet_template_generator.py @@ -2,20 +2,21 @@ from io import BytesIO from api.constants import * + def generate_template(dataset_name): """ Generates an Excel spreadsheet template for a specified dataset. """ dataset_column_enum_map = { - 'ARC Project Tracking': ARCProjectTrackingColumns, - 'EV Charging Rebates': EVChargingRebatesColumns, - 'Data Fleets': DataFleetsColumns, - 'Hydrogen Fleets': HydrogenFleetsColumns, - 'Hydrogen Fueling': HydrogenFuelingColumns, - 'LDV Rebates': LDVRebatesColumns, - 'Public Charging': PublicChargingColumns, - 'Scrap It': ScrapItColumns, - 'Specialty Use Vehicle Incentive Program': SpecialityUseVehicleIncentiveProgramColumns, + "ARC Project Tracking": ARCProjectTrackingColumns, + "EV Charging Rebates": EVChargingRebatesColumns, + "Data Fleets": DataFleetsColumns, + "Hydrogen Fleets": HydrogenFleetsColumns, + "Hydrogen Fueling": HydrogenFuelingColumns, + "LDV Rebates": LDVRebatesColumns, + "Public Charging": PublicChargingColumns, + "Scrap It": ScrapItColumns, + "Specialty Use Vehicle Incentive Program": SpecialityUseVehicleIncentiveProgramColumns, } if dataset_name not in dataset_column_enum_map: @@ -26,24 +27,24 @@ def generate_template(dataset_name): df = pd.DataFrame(columns=columns) excel_buffer = BytesIO() - with pd.ExcelWriter(excel_buffer, engine='xlsxwriter') as writer: + with pd.ExcelWriter(excel_buffer, engine="xlsxwriter") as writer: sheet_name = dataset_name.replace(" ", "_") start_row = 0 custom_sheet_names = { - 'ARC Project Tracking': 'Project_Tracking', - 'Specialty Use Vehicle Incentive Program': 'Sheet1', - 'Public Charging': 'Project_applications', - 'LDV Rebates': 'Raw Data', - 'EV Charging Rebates': 'Updated', - 'Hydrogen Fueling': 'Station_Tracking', - 'Hydrogen Fleets': 'Fleets', - 'Scrap It': 'TOP OTHER TRANSACTIONS', + "ARC Project Tracking": "Project_Tracking", + "Specialty Use Vehicle Incentive Program": "Sheet1", + "Public Charging": "Project_applications", + "LDV Rebates": "Raw Data", + "EV Charging Rebates": "Updated", + "Hydrogen Fueling": "Station_Tracking", + "Hydrogen Fleets": "Fleets", + "Scrap It": "TOP OTHER TRANSACTIONS", } custom_start_rows = { - 'Public Charging': 2, - 'EV Charging Rebates': 2, - 'Scrap It': 5, + "Public Charging": 2, + "EV Charging Rebates": 2, + "Scrap It": 5, } sheet_name = custom_sheet_names.get(dataset_name, sheet_name) @@ -52,4 +53,4 @@ def generate_template(dataset_name): df.to_excel(writer, sheet_name=sheet_name, startrow=start_row, index=False) excel_buffer.seek(0) - return excel_buffer \ No newline at end of file + return excel_buffer diff --git a/django/api/services/ldv_rebates.py b/django/api/services/ldv_rebates.py index e7e9bcd9..8fb211ee 100644 --- a/django/api/services/ldv_rebates.py +++ b/django/api/services/ldv_rebates.py @@ -12,41 +12,21 @@ def trim_all_columns(df): def import_from_xls(excel_file): row_count = 1 - df = pd.read_excel(excel_file, 'Raw Data') + df = pd.read_excel(excel_file, "Raw Data") df = trim_all_columns(df) df = df.applymap(lambda s: s.upper() if type(s) == str else s) - df['CASL Consent'].replace( - to_replace=['YES', 'Y'], - value=True, - inplace=True - ) - df['CASL Consent'].replace( - to_replace=['NO', 'N'], - value=False, - inplace=True - ) - df['Delivered'].replace( - to_replace=['YES', 'Y'], - value=True, - inplace=True - ) - df['Delivered'].replace( - to_replace=['NO', 'N', 'OEM', 'INCENTIVE_FUNDS_AVAILABLE'], + df["CASL Consent"].replace(to_replace=["YES", "Y"], value=True, inplace=True) + df["CASL Consent"].replace(to_replace=["NO", "N"], value=False, inplace=True) + df["Delivered"].replace(to_replace=["YES", "Y"], value=True, inplace=True) + df["Delivered"].replace( + to_replace=["NO", "N", "OEM", "INCENTIVE_FUNDS_AVAILABLE"], value=False, - inplace=True + inplace=True, ) - df['Consent to Contact'].replace( - to_replace=['YES', 'Y'], - value=True, - inplace=True - ) - df['Consent to Contact'].replace( - to_replace=['NO', 'N'], - value=False, - inplace=True - ) - df.fillna('') + df["Consent to Contact"].replace(to_replace=["YES", "Y"], value=True, inplace=True) + df["Consent to Contact"].replace(to_replace=["NO", "N"], value=False, inplace=True) + df.fillna("") try: for _, row in df.iterrows(): @@ -77,8 +57,8 @@ def import_from_xls(excel_file): incentive_amount=row["Incentive Amount"], vin=row["VIN#"], delivered=row["Delivered"], - consent_to_contact=row["Consent to Contact"] + consent_to_contact=row["Consent to Contact"], ) except Exception as error: - return (error,'data',row_count) + return (error, "data", row_count) return True diff --git a/django/api/services/minio.py b/django/api/services/minio.py index 48c75d7f..de1b0fd5 100644 --- a/django/api/services/minio.py +++ b/django/api/services/minio.py @@ -7,14 +7,14 @@ settings.MINIO_ENDPOINT, access_key=settings.MINIO_ACCESS_KEY, secret_key=settings.MINIO_SECRET_KEY, - secure=settings.MINIO_USE_SSL + secure=settings.MINIO_USE_SSL, ) def get_refined_object_name(object_name): prefix = settings.MINIO_PREFIX if prefix: - return prefix + '/' + object_name + return prefix + "/" + object_name return object_name @@ -22,7 +22,7 @@ def minio_get_object(object_name): return MINIO.presigned_get_object( bucket_name=settings.MINIO_BUCKET_NAME, object_name=get_refined_object_name(object_name), - expires=timedelta(seconds=3600) + expires=timedelta(seconds=3600), ) @@ -30,7 +30,7 @@ def minio_put_object(object_name): return MINIO.presigned_put_object( bucket_name=settings.MINIO_BUCKET_NAME, object_name=get_refined_object_name(object_name), - expires=timedelta(seconds=7200) + expires=timedelta(seconds=7200), ) diff --git a/django/api/services/permissions.py b/django/api/services/permissions.py index fa597983..d4d52a49 100644 --- a/django/api/services/permissions.py +++ b/django/api/services/permissions.py @@ -2,6 +2,7 @@ from api.models.user_permission import UserPermission from api.models.permission import Permission + def create_permission_list(user): user = User.objects.filter(idir=user).first() user_permission = UserPermission.objects.filter(user_id=user.id) @@ -15,7 +16,9 @@ def create_permission_list(user): def get_permissions_map(users): result = {} - user_permissions = UserPermission.objects.select_related("user", "permission").filter(user__in=users) + user_permissions = UserPermission.objects.select_related( + "user", "permission" + ).filter(user__in=users) for each in user_permissions: user = each.user permission = each.permission @@ -30,4 +33,4 @@ def get_permissions_representation(permissions): if permissions is not None: for permission in permissions: result[permission.description] = True - return result \ No newline at end of file + return result diff --git a/django/api/services/spreadsheet_uploader.py b/django/api/services/spreadsheet_uploader.py index f350c7b5..6caa6452 100644 --- a/django/api/services/spreadsheet_uploader.py +++ b/django/api/services/spreadsheet_uploader.py @@ -3,25 +3,29 @@ import traceback from django.db import transaction + def get_field_default(model, field): field = model._meta.get_field(field) - + if callable(field.default): return field.default() return field.default + def get_nullable_fields(model): nullable_fields = {} for field in model._meta.get_fields(): - if hasattr(field, 'null') and field.null: + if hasattr(field, "null") and field.null: nullable_fields[field.name] = True return nullable_fields + def trim_all_columns(df): trim_strings = lambda x: x.strip() if isinstance(x, str) else x return df.applymap(trim_strings) + def extract_data(excel_file, sheet_name, header_row): try: df = pd.read_excel(excel_file, sheet_name, header=header_row) @@ -32,7 +36,13 @@ def extract_data(excel_file, sheet_name, header_row): raise -def transform_data(df, dataset_columns, column_mapping_enum, preparation_functions=[], validation_functions=[]): +def transform_data( + df, + dataset_columns, + column_mapping_enum, + preparation_functions=[], + validation_functions=[], +): required_columns = [col.value for col in dataset_columns] df = df[[col for col in df.columns if col in required_columns]] @@ -40,13 +50,13 @@ def transform_data(df, dataset_columns, column_mapping_enum, preparation_functio missing_columns = [col for col in required_columns if col not in df.columns] if missing_columns: raise ValueError(f"Missing columns: {', '.join(missing_columns)}") - + for prep_func in preparation_functions: df = prep_func(df) for validate in validation_functions: df = validate(df) - + column_mapping = {col.name: col.value for col in column_mapping_enum} # Need to use the inverse (keys) for mapping the columns to what the database expects in order to use enums inverse_column_mapping = {v: k for k, v in column_mapping.items()} @@ -54,6 +64,7 @@ def transform_data(df, dataset_columns, column_mapping_enum, preparation_functio return df + @transaction.atomic def load_data(df, model, field_types, replace_data, user): row_count = 0 @@ -73,7 +84,7 @@ def load_data(df, model, field_types, replace_data, user): expected_type = field_types.get(column) is_nullable = column in nullable_fields - if pd.isna(value) or value == '': + if pd.isna(value) or value == "": if is_nullable: row_dict[column] = None else: @@ -83,37 +94,52 @@ def load_data(df, model, field_types, replace_data, user): row_dict[column] = float(value) elif isinstance(value, float): row_dict[column] = round(value, 2) - elif isinstance(value, str) and value.strip() != '': + elif isinstance(value, str) and value.strip() != "": try: float_value = float(value) row_dict[column] = round(float_value, 2) except ValueError: - errors.append(f"Row {index + 1}: Unable to convert value to float for '{column}'. Value was '{value}'.") + errors.append( + f"Row {index + 1}: Unable to convert value to float for '{column}'. Value was '{value}'." + ) valid_row = False continue - elif expected_type == int and ((isinstance(value, str) and value.strip() != '') or isinstance(value, float)): + elif expected_type == int and ( + (isinstance(value, str) and value.strip() != "") + or isinstance(value, float) + ): try: row_dict[column] = int(value) except ValueError: - errors.append(f"Row {index + 1}: Unable to convert value to int for '{column}'. Value was '{value}'.") + errors.append( + f"Row {index + 1}: Unable to convert value to int for '{column}'. Value was '{value}'." + ) valid_row = False continue - elif expected_type == Decimal and ((isinstance(value, int) or isinstance(value, float))): + elif expected_type == Decimal and ( + (isinstance(value, int) or isinstance(value, float)) + ): try: - decimal_value = Decimal(value).quantize(Decimal('0.01'), rounding=ROUND_HALF_UP) + decimal_value = Decimal(value).quantize( + Decimal("0.01"), rounding=ROUND_HALF_UP + ) row_dict[column] = decimal_value except ValueError: - errors.append(f"Row {index + 1}: Unable to convert value to int for '{column}'. Value was '{value}'.") + errors.append( + f"Row {index + 1}: Unable to convert value to int for '{column}'. Value was '{value}'." + ) valid_row = False continue - elif not isinstance(value, expected_type) and value != '': - errors.append(f"Row {index + 1}: Incorrect type for '{column}'. Expected {expected_type.__name__}, got {type(value).__name__}.") + elif not isinstance(value, expected_type) and value != "": + errors.append( + f"Row {index + 1}: Incorrect type for '{column}'. Expected {expected_type.__name__}, got {type(value).__name__}." + ) valid_row = False continue if valid_row: try: - row_dict['update_user'] = user + row_dict["update_user"] = user model_instance = model(**row_dict) model_instance.full_clean() model_instance.save() @@ -126,45 +152,58 @@ def load_data(df, model, field_types, replace_data, user): return { "row_count": row_count, "records_inserted": records_inserted, - "errors": errors + "errors": errors, } -def import_from_xls(excel_file, sheet_name, model, dataset_columns, header_row, column_mapping_enum, field_types, replace_data, user, preparation_functions=[], validation_functions=[]): +def import_from_xls( + excel_file, + sheet_name, + model, + dataset_columns, + header_row, + column_mapping_enum, + field_types, + replace_data, + user, + preparation_functions=[], + validation_functions=[], +): try: df = extract_data(excel_file, sheet_name, header_row) - df = transform_data(df, dataset_columns, column_mapping_enum, preparation_functions, validation_functions) + df = transform_data( + df, + dataset_columns, + column_mapping_enum, + preparation_functions, + validation_functions, + ) result = load_data(df, model, field_types, replace_data, user) - total_rows = result['row_count'] - inserted_rows = result['records_inserted'] + total_rows = result["row_count"] + inserted_rows = result["records_inserted"] - if result['errors'] and result['records_inserted'] > 0: + if result["errors"] and result["records_inserted"] > 0: return { "success": True, "message": f"{inserted_rows} out of {total_rows} rows successfully inserted with some errors encountered.", - "errors": result['errors'], - "rows_processed": result['row_count'] + "errors": result["errors"], + "rows_processed": result["row_count"], } - elif len(result['errors']) > 0: + elif len(result["errors"]) > 0: return { "success": False, "message": "Errors encountered with no successful insertions.", - "errors": result['errors'], - "rows_processed": result['row_count'] + "errors": result["errors"], + "rows_processed": result["row_count"], } else: return { "success": True, "message": f"All {inserted_rows} records successfully inserted out of {total_rows}.", - "rows_processed": result['row_count'] + "rows_processed": result["row_count"], } except Exception as error: traceback.print_exc() error_msg = f"Unexpected error: {str(error)}" - return { - "success": False, - "errors": [str(error)], - "rows_processed": 0 - } - + return {"success": False, "errors": [str(error)], "rows_processed": 0} diff --git a/django/api/services/spreadsheet_uploader_prep.py b/django/api/services/spreadsheet_uploader_prep.py index 5395fa9c..dfda4ed1 100644 --- a/django/api/services/spreadsheet_uploader_prep.py +++ b/django/api/services/spreadsheet_uploader_prep.py @@ -2,88 +2,105 @@ import numpy as np import pandas as pd + def prepare_arc_project_tracking(df): - df['Publicly Announced'] = df['Publicly Announced'].replace({'No': False, 'N': False, 'Yes': True, 'Y': True}) + df["Publicly Announced"] = df["Publicly Announced"].replace( + {"No": False, "N": False, "Yes": True, "Y": True} + ) return df + def prepare_hydrogen_fleets(df): df.applymap(lambda s: s.upper() if type(s) == str else s) - df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) + df.apply(lambda x: x.fillna(0) if x.dtype.kind in "biufc" else x.fillna("")) return df + def prepare_hydrogen_fueling(df): decimal_columns = ["Capital Funding Awarded", "O&M Funding Potential"] - for column in ['700 Bar', '350 Bar']: - df[column].replace(to_replace=['NO', 'N'], value=False, inplace=True) - df[column].replace(to_replace=['YES', 'Y'], value=True, inplace=True) + for column in ["700 Bar", "350 Bar"]: + df[column].replace(to_replace=["NO", "N"], value=False, inplace=True) + df[column].replace(to_replace=["YES", "Y"], value=True, inplace=True) for field in decimal_columns: try: - df[field] = df[field].apply(lambda x: round(Decimal(x), 2) if pd.notnull(x) else None) + df[field] = df[field].apply( + lambda x: round(Decimal(x), 2) if pd.notnull(x) else None + ) except: - print({f'{field} Should be a header row'}) + print({f"{field} Should be a header row"}) return df + def prepare_ldv_rebates(df): replacements = { - "CASL Consent": {'YES': True, 'Y': True, 'NO': False, 'N': False}, - "Delivered": {'YES': True, 'Y': True, 'NO': False, 'N': False, 'OEM': False, 'INCENTIVE_FUNDS_AVAILABLE': False}, - "Consent to Contact": {'YES': True, 'Y': True, 'NO': False, 'N': False} + "CASL Consent": {"YES": True, "Y": True, "NO": False, "N": False}, + "Delivered": { + "YES": True, + "Y": True, + "NO": False, + "N": False, + "OEM": False, + "INCENTIVE_FUNDS_AVAILABLE": False, + }, + "Consent to Contact": {"YES": True, "Y": True, "NO": False, "N": False}, } for column, replacement_dict in replacements.items(): df[column].replace(replacement_dict, inplace=True) - df.fillna('') + df.fillna("") return df + def prepare_public_charging(df): - + df = df.applymap(lambda s: s.upper() if type(s) == str else s) - - df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) - - df['Pilot Project (Y/N)'].replace(to_replace=['NO', 'N'], value=False, inplace=True) - df['Pilot Project (Y/N)'].replace(to_replace=['YES', 'Y'], value=True, inplace=True) + + df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in "biufc" else x.fillna("")) + + df["Pilot Project (Y/N)"].replace(to_replace=["NO", "N"], value=False, inplace=True) + df["Pilot Project (Y/N)"].replace(to_replace=["YES", "Y"], value=True, inplace=True) return df + def prepare_scrap_it(df): df = df.applymap(lambda s: s.upper() if type(s) == str else s) - df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) + df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in "biufc" else x.fillna("")) return df + def applicant_type(row): if isinstance((row["Fleet"]), str): - return 'Fleet' + return "Fleet" elif isinstance((row["Individual"]), str): - return 'Individual' + return "Individual" else: - return '' + return "" + def prepare_speciality_use_vehicle_incentives(df): df = df.applymap(lambda s: s.upper() if type(s) == str else s) - num_columns = df.select_dtypes(include=['number']).columns.tolist() + num_columns = df.select_dtypes(include=["number"]).columns.tolist() df[num_columns] = df[num_columns].fillna(0) non_num_columns = df.columns.difference(num_columns) - df[non_num_columns] = df[non_num_columns].fillna('') + df[non_num_columns] = df[non_num_columns].fillna("") - df['Applicant Type'] = df.apply(lambda row: applicant_type(row), axis=1) + df["Applicant Type"] = df.apply(lambda row: applicant_type(row), axis=1) - if 'Fleet' in df.columns: - df = df.drop(columns=['Fleet']) - - if 'Individual' in df.columns: - df = df.drop(columns=['Individual']) - - return df + if "Fleet" in df.columns: + df = df.drop(columns=["Fleet"]) + if "Individual" in df.columns: + df = df.drop(columns=["Individual"]) + return df diff --git a/django/api/services/user.py b/django/api/services/user.py index f1c62f37..66a4584b 100644 --- a/django/api/services/user.py +++ b/django/api/services/user.py @@ -21,5 +21,7 @@ def update_permissions(user_permissions): if value == True: permission_objects.append(permissions_map.get(description)) for permission_object in permission_objects: - user_permissions_to_add.append(UserPermission(user=user, permission=permission_object)) + user_permissions_to_add.append( + UserPermission(user=user, permission=permission_object) + ) UserPermission.objects.bulk_create(user_permissions_to_add) diff --git a/django/api/services/vin_decoder.py b/django/api/services/vin_decoder.py index b1fa8474..23e17c8b 100644 --- a/django/api/services/vin_decoder.py +++ b/django/api/services/vin_decoder.py @@ -12,41 +12,41 @@ def parse_vin(vin, item): - us_market_data = item.get('us_market_data') - common_data = us_market_data.get('common_us_data') + us_market_data = item.get("us_market_data") + common_data = us_market_data.get("common_us_data") if common_data is None: - supplemental_data = item.get('supplemental_data') - common_data = supplemental_data.get('common_supplemental_data') + supplemental_data = item.get("supplemental_data") + common_data = supplemental_data.get("common_supplemental_data") - basic_data = common_data.get('basic_data') - engines = common_data.get('engines') + basic_data = common_data.get("basic_data") + engines = common_data.get("engines") model_year = None - if basic_data.get('year'): - model_year = int(basic_data.get('year')) + if basic_data.get("year"): + model_year = int(basic_data.get("year")) - query_error = item.get('query_error') + query_error = item.get("query_error") - if query_error.get('error_code') == '' and len(engines) > 0: + if query_error.get("error_code") == "" and len(engines) > 0: return VINDecodedInformation.objects.create( - fuel_type_primary=engines[0].get('fuel_type'), - make=basic_data.get('make'), - manufacturer=basic_data.get('country_of_manufacture'), + fuel_type_primary=engines[0].get("fuel_type"), + make=basic_data.get("make"), + manufacturer=basic_data.get("country_of_manufacture"), model_year=model_year, - model=basic_data.get('model'), - vin=vin + model=basic_data.get("model"), + vin=vin, ) return None def decoder(): - url = 'https://api.dataonesoftware.com/webservices/vindecoder/decode' + url = "https://api.dataonesoftware.com/webservices/vindecoder/decode" json_response = None - vin_queryset = IcbcRegistrationData.objects.values_list( - 'vin', flat=True - ).order_by('vin') + vin_queryset = IcbcRegistrationData.objects.values_list("vin", flat=True).order_by( + "vin" + ) pages = Paginator(vin_queryset, 50) @@ -56,29 +56,22 @@ def decoder(): index = 0 for vin in page.object_list: index += 1 - query_requests.update({ - 'query_request_' + str(index): { - 'vin': vin - } - }) + query_requests.update({"query_request_" + str(index): {"vin": vin}}) decoder_query_object = { - 'decoder_settings': { - 'display': 'full', - 'version': '7.2.0', - 'common_data': 'on', - 'common_data_packs': { - 'basic_data': 'on', - 'engines': 'on' - } + "decoder_settings": { + "display": "full", + "version": "7.2.0", + "common_data": "on", + "common_data_packs": {"basic_data": "on", "engines": "on"}, }, - 'query_requests': query_requests + "query_requests": query_requests, } post_data = { - 'access_key_id': settings.DECODER_ACCESS_KEY, - 'secret_access_key': settings.DECODER_SECRET_KEY, - 'decoder_query': json.dumps(decoder_query_object) + "access_key_id": settings.DECODER_ACCESS_KEY, + "secret_access_key": settings.DECODER_SECRET_KEY, + "decoder_query": json.dumps(decoder_query_object), } try: response = requests.post(url, data=post_data) @@ -88,13 +81,13 @@ def decoder(): json_response = response.json() - results = json_response.get('query_responses') + results = json_response.get("query_responses") if results: for request in results: item = results.get(request) query_request = query_requests.get(request) - vin = query_request.get('vin') if query_request else None + vin = query_request.get("vin") if query_request else None parse_vin(vin, item) diff --git a/django/api/services/vin_decoder_old.py b/django/api/services/vin_decoder_old.py index 8e70114c..b201b904 100644 --- a/django/api/services/vin_decoder_old.py +++ b/django/api/services/vin_decoder_old.py @@ -10,20 +10,20 @@ def decoder(): - url = 'https://vpic.nhtsa.dot.gov/api/vehicles/DecodeVINValuesBatch/' + url = "https://vpic.nhtsa.dot.gov/api/vehicles/DecodeVINValuesBatch/" json_response = None model_year = None - vin_queryset = IcbcRegistrationData.objects.values_list( - 'vin', flat=True - ).order_by('vin') + vin_queryset = IcbcRegistrationData.objects.values_list("vin", flat=True).order_by( + "vin" + ) pages = Paginator(vin_queryset, 50) for each in pages: page = pages.page(each.number) - vin_batch = ';'.join(page.object_list) - post_fields = {'format': 'json', 'data': vin_batch} + vin_batch = ";".join(page.object_list) + post_fields = {"format": "json", "data": vin_batch} try: response = requests.post(url, data=post_fields) if not response.status_code == 200: @@ -31,18 +31,18 @@ def decoder(): return json_response = response.json() - results = json_response['Results'] + results = json_response["Results"] if results: for item in results: - if item.get('ModelYear'): - model_year = int(item.get('ModelYear')) + if item.get("ModelYear"): + model_year = int(item.get("ModelYear")) VINDecodedInformation.objects.create( - fuel_type_primary=item.get('FuelTypePrimary'), - make=item.get('Make'), - manufacturer=item.get('Manufacturer'), + fuel_type_primary=item.get("FuelTypePrimary"), + make=item.get("Make"), + manufacturer=item.get("Manufacturer"), model_year=model_year, - model=item.get('Model'), - vin=item.get('VIN') + model=item.get("Model"), + vin=item.get("VIN"), ) except requests.exceptions.RequestException as error: LOGGER.error("Error: %s", error) diff --git a/django/api/settings.py b/django/api/settings.py index 31dfbe1e..1a6ddbb1 100644 --- a/django/api/settings.py +++ b/django/api/settings.py @@ -9,6 +9,7 @@ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ + import os import sys @@ -22,111 +23,117 @@ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.getenv( - 'DJANGO_SECRET_KEY', - '#8+m(ba_(ra1=lo+-7jyp#x49l27guk*i4)w@xp7j9b9umkwh^' + "DJANGO_SECRET_KEY", "#8+m(ba_(ra1=lo+-7jyp#x49l27guk*i4)w@xp7j9b9umkwh^" ) # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True -TESTING = 'test' in sys.argv +TESTING = "test" in sys.argv -ALLOWED_HOSTS = [os.getenv('ALLOWED_HOSTS', '*')] +ALLOWED_HOSTS = [os.getenv("ALLOWED_HOSTS", "*")] -CORS_ORIGIN_ALLOW_ALL = os.getenv('CORS_ORIGIN_ALLOW_ALL', 'False') == 'True' -CORS_ORIGIN_WHITELIST = [ - os.getenv('CORS_ORIGIN_WHITELIST', 'https://localhost:3000') -] +CORS_ORIGIN_ALLOW_ALL = os.getenv("CORS_ORIGIN_ALLOW_ALL", "False") == "True" +CORS_ORIGIN_WHITELIST = [os.getenv("CORS_ORIGIN_WHITELIST", "https://localhost:3000")] # Application definition INSTALLED_APPS = [ - 'api.apps.ApiConfig', - 'tfrs.apps.ApiConfig', - 'metabase.apps.MetabaseConfig', - 'corsheaders', - 'django_filters', - 'django.contrib.admin', - 'django.contrib.auth', - 'django.contrib.contenttypes', - 'django.contrib.messages', - 'django.contrib.sessions', - 'django.contrib.staticfiles', - 'rest_framework', + "api.apps.ApiConfig", + "tfrs.apps.ApiConfig", + "metabase.apps.MetabaseConfig", + "corsheaders", + "django_filters", + "django.contrib.admin", + "django.contrib.auth", + "django.contrib.contenttypes", + "django.contrib.messages", + "django.contrib.sessions", + "django.contrib.staticfiles", + "rest_framework", ] MIDDLEWARE = [ - 'corsheaders.middleware.CorsMiddleware', - 'django.middleware.security.SecurityMiddleware', - 'django.contrib.sessions.middleware.SessionMiddleware', - 'django.middleware.common.CommonMiddleware', - 'django.middleware.csrf.CsrfViewMiddleware', - 'django.contrib.auth.middleware.AuthenticationMiddleware', - 'django.contrib.messages.middleware.MessageMiddleware', - 'django.middleware.clickjacking.XFrameOptionsMiddleware', - 'whitenoise.middleware.WhiteNoiseMiddleware', + "corsheaders.middleware.CorsMiddleware", + "django.middleware.security.SecurityMiddleware", + "django.contrib.sessions.middleware.SessionMiddleware", + "django.middleware.common.CommonMiddleware", + "django.middleware.csrf.CsrfViewMiddleware", + "django.contrib.auth.middleware.AuthenticationMiddleware", + "django.contrib.messages.middleware.MessageMiddleware", + "django.middleware.clickjacking.XFrameOptionsMiddleware", + "whitenoise.middleware.WhiteNoiseMiddleware", ] -ROOT_URLCONF = 'api.urls' - -TEMPLATES = [{ - 'BACKEND': 'django.template.backends.django.DjangoTemplates', - 'DIRS': [os.path.join(BASE_DIR, "../", "frontend", "public")], - 'APP_DIRS': True, - 'OPTIONS': { - 'context_processors': [ - 'django.template.context_processors.debug', - 'django.template.context_processors.request', - 'django.contrib.auth.context_processors.auth', - 'django.contrib.messages.context_processors.messages', - ], - }, -}] +ROOT_URLCONF = "api.urls" + +TEMPLATES = [ + { + "BACKEND": "django.template.backends.django.DjangoTemplates", + "DIRS": [os.path.join(BASE_DIR, "../", "frontend", "public")], + "APP_DIRS": True, + "OPTIONS": { + "context_processors": [ + "django.template.context_processors.debug", + "django.template.context_processors.request", + "django.contrib.auth.context_processors.auth", + "django.contrib.messages.context_processors.messages", + ], + }, + } +] -WSGI_APPLICATION = 'api.wsgi.application' +WSGI_APPLICATION = "api.wsgi.application" # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { - 'default': { - 'ENGINE': os.getenv('DB_ENGINE', 'django.db.backends.postgresql'), - 'NAME': os.getenv('DB_NAME', 'postgres'), - 'USER': os.getenv('DB_USER', 'postgres'), - 'PASSWORD': os.getenv('DB_PASSWORD', 'postgres'), - 'HOST': os.getenv('DB_HOST', 'db'), - 'PORT': os.getenv('DB_PORT', '5432'), + "default": { + "ENGINE": os.getenv("DB_ENGINE", "django.db.backends.postgresql"), + "NAME": os.getenv("DB_NAME", "postgres"), + "USER": os.getenv("DB_USER", "postgres"), + "PASSWORD": os.getenv("DB_PASSWORD", "postgres"), + "HOST": os.getenv("DB_HOST", "db"), + "PORT": os.getenv("DB_PORT", "5432"), }, - 'metabase': { - 'ENGINE': os.getenv('METABASE_DB_ENGINE', 'django.db.backends.postgresql'), - 'NAME': os.getenv('METABASE_DB_NAME', 'metabase'), - 'USER': os.getenv('METABASE_DB_USER', 'postgres'), - 'PASSWORD': os.getenv('METABASE_DB_PASSWORD', 'postgres'), - 'HOST': os.getenv('METABASE_DB_HOST', 'db'), - 'PORT': os.getenv('METABASE_DB_PORT', '5432'), + "metabase": { + "ENGINE": os.getenv("METABASE_DB_ENGINE", "django.db.backends.postgresql"), + "NAME": os.getenv("METABASE_DB_NAME", "metabase"), + "USER": os.getenv("METABASE_DB_USER", "postgres"), + "PASSWORD": os.getenv("METABASE_DB_PASSWORD", "postgres"), + "HOST": os.getenv("METABASE_DB_HOST", "db"), + "PORT": os.getenv("METABASE_DB_PORT", "5432"), }, } -DATABASE_ROUTERS = ['metabase.db_router.MetabaseRouter',] +DATABASE_ROUTERS = [ + "metabase.db_router.MetabaseRouter", +] # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators -AUTH_PASSWORD_VALIDATORS = [{ - 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', -}, { - 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', -}, { - 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', -}, { - 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', -}] +AUTH_PASSWORD_VALIDATORS = [ + { + "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", + }, + { + "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", + }, + { + "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", + }, + { + "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", + }, +] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ -LANGUAGE_CODE = 'en-us' -TIME_ZONE = 'UTC' +LANGUAGE_CODE = "en-us" +TIME_ZONE = "UTC" USE_I18N = True USE_L10N = True USE_TZ = True @@ -135,43 +142,40 @@ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.1/howto/static-files/ STATICFILES_DIRS = [os.path.join(BASE_DIR, "../", "frontend", "public")] -STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' -STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') +STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage" +STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles") -STATIC_URL = '/static/' +STATIC_URL = "/static/" WHITENOISE_ROOT = os.path.join(BASE_DIR, "../", "frontend", "public", "root") # Django Rest Framework Settings REST_FRAMEWORK = { - 'DEFAULT_AUTHENTICATION_CLASSES': [ - 'api.keycloak_authentication.KeycloakAuthentication', + "DEFAULT_AUTHENTICATION_CLASSES": [ + "api.keycloak_authentication.KeycloakAuthentication", ], - 'DEFAULT_PERMISSION_CLASSES': ( - 'rest_framework.permissions.IsAuthenticated',), - 'COERCE_DECIMAL_TO_STRING': False, - 'DEFAULT_PAGINATION_CLASS': 'api.pagination.StandardResultsSetPagination', - 'DEFAULT_FILTER_BACKENDS': [ - 'django_filters.rest_framework.DjangoFilterBackend', - 'api.filters.order_by.RelatedOrderingFilter', + "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",), + "COERCE_DECIMAL_TO_STRING": False, + "DEFAULT_PAGINATION_CLASS": "api.pagination.StandardResultsSetPagination", + "DEFAULT_FILTER_BACKENDS": [ + "django_filters.rest_framework.DjangoFilterBackend", + "api.filters.order_by.RelatedOrderingFilter", ], } -KEYCLOAK_CLIENT_ID = os.getenv('KEYCLOAK_CLIENT_ID') -KEYCLOAK_REALM = os.getenv('KEYCLOAK_REALM') -KEYCLOAK_URL = os.getenv('KEYCLOAK_URL', 'http://localhost:8080') +KEYCLOAK_CLIENT_ID = os.getenv("KEYCLOAK_CLIENT_ID") +KEYCLOAK_REALM = os.getenv("KEYCLOAK_REALM") +KEYCLOAK_URL = os.getenv("KEYCLOAK_URL", "http://localhost:8080") -MINIO_ACCESS_KEY = os.getenv('MINIO_ROOT_USER') -MINIO_SECRET_KEY = os.getenv('MINIO_ROOT_PASSWORD') -MINIO_BUCKET_NAME = os.getenv('MINIO_BUCKET_NAME', 'cthub') -MINIO_ENDPOINT = os.getenv('MINIO_ENDPOINT', None) -MINIO_USE_SSL = bool( - os.getenv('MINIO_USE_SSL', 'False').lower() in ['true', 1] -) -MINIO_PREFIX = os.getenv('MINIO_PREFIX') +MINIO_ACCESS_KEY = os.getenv("MINIO_ROOT_USER") +MINIO_SECRET_KEY = os.getenv("MINIO_ROOT_PASSWORD") +MINIO_BUCKET_NAME = os.getenv("MINIO_BUCKET_NAME", "cthub") +MINIO_ENDPOINT = os.getenv("MINIO_ENDPOINT", None) +MINIO_USE_SSL = bool(os.getenv("MINIO_USE_SSL", "False").lower() in ["true", 1]) +MINIO_PREFIX = os.getenv("MINIO_PREFIX") -DECODER_ACCESS_KEY = os.getenv('DECODER_ACCESS_KEY') -DECODER_SECRET_KEY = os.getenv('DECODER_SECRET_KEY') +DECODER_ACCESS_KEY = os.getenv("DECODER_ACCESS_KEY") +DECODER_SECRET_KEY = os.getenv("DECODER_SECRET_KEY") diff --git a/django/api/tests/base_test.py b/django/api/tests/base_test.py index 75c2678a..926e3ef2 100644 --- a/django/api/tests/base_test.py +++ b/django/api/tests/base_test.py @@ -1,6 +1,7 @@ """ Default instructions for the test cases """ + from django.test import TestCase @@ -8,5 +9,5 @@ class BaseTestCase(TestCase): """ Load the following fixtures for each test case """ - fixtures = [ - ] + + fixtures = [] diff --git a/django/api/urls.py b/django/api/urls.py index 1d2c48db..89abbbf9 100644 --- a/django/api/urls.py +++ b/django/api/urls.py @@ -13,6 +13,7 @@ 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ + from django.contrib import admin from django.urls import path, include from rest_framework import routers @@ -24,22 +25,14 @@ ROUTER = routers.SimpleRouter(trailing_slash=False) -ROUTER.register( - r'icbc-data', IcbcViewset, basename='icbc-data' -) +ROUTER.register(r"icbc-data", IcbcViewset, basename="icbc-data") -ROUTER.register( - r'uploads', UploadViewset, basename='uploads' -) +ROUTER.register(r"uploads", UploadViewset, basename="uploads") -ROUTER.register( - r'minio', MinioViewSet, basename='minio' -) -ROUTER.register( - r'users', UserViewSet -) +ROUTER.register(r"minio", MinioViewSet, basename="minio") +ROUTER.register(r"users", UserViewSet) urlpatterns = [ - path('admin/', admin.site.urls), - path('api/', include(ROUTER.urls)), + path("admin/", admin.site.urls), + path("api/", include(ROUTER.urls)), ] diff --git a/django/api/viewsets/icbc_data.py b/django/api/viewsets/icbc_data.py index e5000eb4..e6f10153 100644 --- a/django/api/viewsets/icbc_data.py +++ b/django/api/viewsets/icbc_data.py @@ -1,14 +1,14 @@ """ Viewset for ICBC Data """ + from rest_framework.mixins import ListModelMixin from rest_framework.permissions import AllowAny from rest_framework.viewsets import GenericViewSet from api.filters.icbc_data import IcbcDataFilter from api.models.icbc_registration_data import IcbcRegistrationData -from api.serializers.icbc_registration_data import \ - IcbcRegistrationDataSerializer +from api.serializers.icbc_registration_data import IcbcRegistrationDataSerializer class IcbcViewset(GenericViewSet, ListModelMixin): @@ -16,15 +16,14 @@ class IcbcViewset(GenericViewSet, ListModelMixin): This will build the list view and tie it with the serializer and permissions """ + permission_classes = (AllowAny,) - http_method_names = ['get'] + http_method_names = ["get"] filterset_class = IcbcDataFilter - ordering_fields = '__all_related__' - ordering = ('icbc_vehicle__make',) + ordering_fields = "__all_related__" + ordering = ("icbc_vehicle__make",) - serializer_classes = { - 'default': IcbcRegistrationDataSerializer - } + serializer_classes = {"default": IcbcRegistrationDataSerializer} def get_queryset(self): queryset = IcbcRegistrationData.objects.all() @@ -35,4 +34,4 @@ def get_serializer_class(self): if self.action in list(self.serializer_classes.keys()): return self.serializer_classes.get(self.action) - return self.serializer_classes.get('default') + return self.serializer_classes.get("default") diff --git a/django/api/viewsets/minio.py b/django/api/viewsets/minio.py index 833f7b5c..dfa4ee53 100644 --- a/django/api/viewsets/minio.py +++ b/django/api/viewsets/minio.py @@ -8,17 +8,15 @@ from api.decorators.permission import check_upload_permission from api.services.minio import minio_put_object + class MinioViewSet(GenericViewSet): permission_classes = (AllowAny,) - http_method_names = ['get'] + http_method_names = ["get"] - @action(detail=False, methods=['get']) + @action(detail=False, methods=["get"]) @method_decorator(check_upload_permission()) def put(self, request): object_name = uuid.uuid4().hex url = minio_put_object(object_name) - return Response({ - 'url': url, - 'minio_object_name': object_name - }) + return Response({"url": url, "minio_object_name": object_name}) diff --git a/django/api/viewsets/upload.py b/django/api/viewsets/upload.py index 5a70faa8..03c29e20 100644 --- a/django/api/viewsets/upload.py +++ b/django/api/viewsets/upload.py @@ -17,26 +17,37 @@ import api.constants as constants from api.services.spreadsheet_uploader_prep import * + class UploadViewset(GenericViewSet): permission_classes = (AllowAny,) - http_method_names = ['post', 'put', 'get'] + http_method_names = ["post", "put", "get"] - @action(detail=False, methods=['get']) + @action(detail=False, methods=["get"]) def datasets_list(self, request): - - incomplete_datasets = ['LDV Rebates', 'Specialty Use Vehicle Incentive Program', 'Public Charging', 'EV Charging Rebates', 'Hydrogen Fueling', 'Hydrogen Fleets', 'ARC Project Tracking', 'Data Fleets', 'Scrap It'] + + incomplete_datasets = [ + "LDV Rebates", + "Specialty Use Vehicle Incentive Program", + "Public Charging", + "EV Charging Rebates", + "Hydrogen Fueling", + "Hydrogen Fleets", + "ARC Project Tracking", + "Data Fleets", + "Scrap It", + ] datasets = Datasets.objects.all().exclude(name__in=incomplete_datasets) serializer = DatasetsSerializer(datasets, many=True, read_only=True) return Response(serializer.data) - @action(detail=False, methods=['post']) + @action(detail=False, methods=["post"]) @method_decorator(check_upload_permission()) def import_data(self, request): - filename = request.data.get('filename') - dataset_selected = request.data.get('datasetSelected') - replace_data = request.data.get('replace', False) + filename = request.data.get("filename") + dataset_selected = request.data.get("datasetSelected") + replace_data = request.data.get("replace", False) try: url = minio_get_object(filename) @@ -44,55 +55,62 @@ def import_data(self, request): config = constants.DATASET_CONFIG.get(dataset_selected) if not config: - return Response(f"Dataset '{dataset_selected}' is not supported.", status=status.HTTP_400_BAD_REQUEST) - model = config['model'] - columns = config.get('columns') - mapping = config.get('column_mapping') - sheet_name = config.get('sheet_name', 'Sheet1') # Default to 'Sheet1' if not specified - preparation_functions = config.get('preparation_functions', []) - validation_functions = config.get('validation_functions', []) - header_row = config.get('header_row', 0) - + return Response( + f"Dataset '{dataset_selected}' is not supported.", + status=status.HTTP_400_BAD_REQUEST, + ) + model = config["model"] + columns = config.get("columns") + mapping = config.get("column_mapping") + sheet_name = config.get( + "sheet_name", "Sheet1" + ) # Default to 'Sheet1' if not specified + preparation_functions = config.get("preparation_functions", []) + validation_functions = config.get("validation_functions", []) + header_row = config.get("header_row", 0) result = import_from_xls( excel_file=filename, sheet_name=sheet_name, model=model, - header_row = header_row, + header_row=header_row, preparation_functions=preparation_functions, validation_functions=validation_functions, dataset_columns=columns, column_mapping_enum=mapping, field_types=constants.FIELD_TYPES.get(dataset_selected), replace_data=replace_data, - user = request.user + user=request.user, ) - if not result['success']: + if not result["success"]: return Response(result, status=status.HTTP_400_BAD_REQUEST) return Response(result, status=status.HTTP_201_CREATED) except Exception as e: - return Response(f"An error occurred: {str(e)}", status=status.HTTP_400_BAD_REQUEST) - - finally: + return Response( + f"An error occurred: {str(e)}", status=status.HTTP_400_BAD_REQUEST + ) + + finally: os.remove(filename) minio_remove_object(filename) - - - @action(detail=False, methods=['get']) + + @action(detail=False, methods=["get"]) def download_dataset(self, request): - dataset_name = request.GET.get('datasetSelected') + dataset_name = request.GET.get("datasetSelected") if not dataset_name: return HttpResponse("Dataset name is required.", status=400) - + try: excel_file = generate_template(dataset_name) response = HttpResponse( excel_file.read(), - content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' + content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ) + response["Content-Disposition"] = ( + f'attachment; filename="{dataset_name}.xlsx"' ) - response['Content-Disposition'] = f'attachment; filename="{dataset_name}.xlsx"' return response except ValueError as e: - return HttpResponse(str(e), status=400) \ No newline at end of file + return HttpResponse(str(e), status=400) diff --git a/django/api/viewsets/user.py b/django/api/viewsets/user.py index 326ff90e..f4e23aef 100644 --- a/django/api/viewsets/user.py +++ b/django/api/viewsets/user.py @@ -11,39 +11,40 @@ from api.services.user import update_permissions from api.services.permissions import get_permissions_map + class UserViewSet(GenericViewSet, CreateModelMixin, DestroyModelMixin): """ This viewset automatically provides `list`, `create`, `retrieve`, and `update` actions. """ + permission_classes = (AllowAny,) - http_method_names = ['get', 'post', 'put', 'patch', 'delete'] + http_method_names = ["get", "post", "put", "patch", "delete"] queryset = User.objects.all() - lookup_field = 'idir' + lookup_field = "idir" serializer_classes = { - 'default': UserSerializer, + "default": UserSerializer, } - def get_serializer_class(self): if self.action in list(self.serializer_classes.keys()): return self.serializer_classes[self.action] - return self.serializer_classes['default'] - + return self.serializer_classes["default"] @method_decorator(check_admin_permission()) def destroy(self, request, idir=None): if request.user == idir: - return Response('you cannot delete your own idir', status=status.HTTP_400_BAD_REQUEST ) + return Response( + "you cannot delete your own idir", status=status.HTTP_400_BAD_REQUEST + ) return super().destroy(self, request) - @method_decorator(check_admin_permission()) def create(self, request): return super().create(request) - @action(detail=False, methods=['put']) + @action(detail=False, methods=["put"]) @method_decorator(check_admin_permission()) def update_permissions(self, request): user_permissions = request.data @@ -51,8 +52,8 @@ def update_permissions(self, request): update_permissions(user_permissions) except Exception as e: return Response(str(e), status=status.HTTP_400_BAD_REQUEST) - return Response('User permissions were updated!', status=status.HTTP_200_OK) - + return Response("User permissions were updated!", status=status.HTTP_200_OK) + @action(detail=False) def current(self, request): """ @@ -61,9 +62,11 @@ def current(self, request): user = User.objects.filter(idir=request.user).first() serializer = self.get_serializer(user) return Response(serializer.data) - + @method_decorator(check_admin_permission()) def list(self, request): - users = User.objects.all().order_by('idir') - serializer = UserListSerializer(users, many=True, context={"permissions_map": get_permissions_map(users)}) - return Response(serializer.data) \ No newline at end of file + users = User.objects.all().order_by("idir") + serializer = UserListSerializer( + users, many=True, context={"permissions_map": get_permissions_map(users)} + ) + return Response(serializer.data) diff --git a/django/api/wsgi.py b/django/api/wsgi.py index 0d8bcb9d..2613f828 100644 --- a/django/api/wsgi.py +++ b/django/api/wsgi.py @@ -11,6 +11,6 @@ from django.core.wsgi import get_wsgi_application -os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api.settings') +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.settings") application = get_wsgi_application() diff --git a/django/requirements.txt b/django/requirements.txt index b66bee95..f17c6e41 100644 --- a/django/requirements.txt +++ b/django/requirements.txt @@ -1,3 +1,4 @@ +black==24.3.0 Django==3.1.6 psycopg2-binary==2.8.6 djangorestframework==3.12.2 diff --git a/frontend/package.json b/frontend/package.json index c444fd99..d528a73f 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -32,6 +32,7 @@ }, "devDependencies": { "css-loader": "^5.2.7", + "prettier": "3.2.5", "sass": "^1.43.4", "sass-loader": "^11.1.1", "source-map-loader": "^2.0.2", diff --git a/frontend/src/app/components/AlertDialog.js b/frontend/src/app/components/AlertDialog.js index c0b052c4..fe9977b0 100644 --- a/frontend/src/app/components/AlertDialog.js +++ b/frontend/src/app/components/AlertDialog.js @@ -1,11 +1,11 @@ -import PropTypes from 'prop-types'; -import * as React from 'react'; -import Button from '@mui/material/Button'; -import Dialog from '@mui/material/Dialog'; -import DialogActions from '@mui/material/DialogActions'; -import DialogContent from '@mui/material/DialogContent'; -import DialogContentText from '@mui/material/DialogContentText'; -import DialogTitle from '@mui/material/DialogTitle'; +import PropTypes from "prop-types"; +import * as React from "react"; +import Button from "@mui/material/Button"; +import Dialog from "@mui/material/Dialog"; +import DialogActions from "@mui/material/DialogActions"; +import DialogContent from "@mui/material/DialogContent"; +import DialogContentText from "@mui/material/DialogContentText"; +import DialogTitle from "@mui/material/DialogTitle"; const AlertDialog = (props) => { const { @@ -15,37 +15,35 @@ const AlertDialog = (props) => { cancelText, handleCancel, confirmText, - handleConfirm + handleConfirm, } = props; if (!open) { - return null + return null; } return (
{ - handleCancel() + handleCancel(); }} aria-labelledby="alert-dialog-title" aria-describedby="alert-dialog-description" > - - {title} - + {title} {dialogue} -
); -} +}; AlertDialog.defaultProps = { - dialogue: '', - title: '', + dialogue: "", + title: "", }; AlertDialog.propTypes = { open: PropTypes.bool.isRequired, @@ -72,7 +70,7 @@ AlertDialog.propTypes = { cancelText: PropTypes.string.isRequired, handleCancel: PropTypes.func.isRequired, confirmText: PropTypes.string.isRequired, - handleConfirm: PropTypes.func.isRequired + handleConfirm: PropTypes.func.isRequired, }; -export default AlertDialog +export default AlertDialog; diff --git a/frontend/src/app/components/App.js b/frontend/src/app/components/App.js index 3153bd5d..9c42460a 100644 --- a/frontend/src/app/components/App.js +++ b/frontend/src/app/components/App.js @@ -1,24 +1,20 @@ -import React from 'react'; -import { - BrowserRouter as Router, - Route, - Switch, -} from 'react-router-dom'; +import React from "react"; +import { BrowserRouter as Router, Route, Switch } from "react-router-dom"; -import IcbcDataRouter from '../../icbc_data/router'; -import UploadRouter from '../../uploads/router'; -import DashboardContainer from '../../dashboard/DashboardContainer'; -import useKeycloak from '../utilities/useKeycloak' -import Login from './Login' -import Layout from './Layout' -import { ENABLE_KEYCLOAK } from '../../config'; +import IcbcDataRouter from "../../icbc_data/router"; +import UploadRouter from "../../uploads/router"; +import DashboardContainer from "../../dashboard/DashboardContainer"; +import useKeycloak from "../utilities/useKeycloak"; +import Login from "./Login"; +import Layout from "./Layout"; +import { ENABLE_KEYCLOAK } from "../../config"; const App = () => { - const keycloak = useKeycloak() - + const keycloak = useKeycloak(); + if (ENABLE_KEYCLOAK && !keycloak.authenticated) { - const redirectUri = window.location.href - return + const redirectUri = window.location.href; + return ; } return ( diff --git a/frontend/src/app/components/Footer.js b/frontend/src/app/components/Footer.js index 677de896..cc6b28b1 100644 --- a/frontend/src/app/components/Footer.js +++ b/frontend/src/app/components/Footer.js @@ -1,42 +1,42 @@ -import React from 'react' +import React from "react"; const Footer = () => { - return ( -
- ); - }; - - export default Footer; \ No newline at end of file + return ( + + ); +}; + +export default Footer; diff --git a/frontend/src/app/components/Header.js b/frontend/src/app/components/Header.js index d4fb7cbd..c18cdbd7 100644 --- a/frontend/src/app/components/Header.js +++ b/frontend/src/app/components/Header.js @@ -1,25 +1,25 @@ -import React from 'react'; -import logo from '../styles/images/BCID_H_rgb_rev.png'; -import Logout from './Logout' +import React from "react"; +import logo from "../styles/images/BCID_H_rgb_rev.png"; +import Logout from "./Logout"; const Header = () => { - return ( -
-
- -
- -
+ return ( + + ); +}; -export default Header; \ No newline at end of file +export default Header; diff --git a/frontend/src/app/components/KeycloakProvider.js b/frontend/src/app/components/KeycloakProvider.js index 269213e6..e0e5bd22 100644 --- a/frontend/src/app/components/KeycloakProvider.js +++ b/frontend/src/app/components/KeycloakProvider.js @@ -1,28 +1,33 @@ -import React, { useState, useEffect } from 'react' -import { KeycloakContext } from '../../contexts' -import { ENABLE_KEYCLOAK } from '../../config' +import React, { useState, useEffect } from "react"; +import { KeycloakContext } from "../../contexts"; +import { ENABLE_KEYCLOAK } from "../../config"; -const KeycloakProvider = ({authClient, initOptions, LoadingComponent, children}) => { - const [loading, setLoading] = useState(ENABLE_KEYCLOAK ? true : false) - const [keycloak, setKeycloak] = useState({}) +const KeycloakProvider = ({ + authClient, + initOptions, + LoadingComponent, + children, +}) => { + const [loading, setLoading] = useState(ENABLE_KEYCLOAK ? true : false); + const [keycloak, setKeycloak] = useState({}); useEffect(() => { if (ENABLE_KEYCLOAK) { authClient.init(initOptions).then(() => { - setKeycloak(authClient) - setLoading(false) - }) + setKeycloak(authClient); + setLoading(false); + }); } - }, [authClient, initOptions]) + }, [authClient, initOptions]); if (loading) { - return + return ; } return ( {children} - ) -} + ); +}; -export default KeycloakProvider \ No newline at end of file +export default KeycloakProvider; diff --git a/frontend/src/app/components/Layout.js b/frontend/src/app/components/Layout.js index b8c1aad8..4898eb4e 100644 --- a/frontend/src/app/components/Layout.js +++ b/frontend/src/app/components/Layout.js @@ -1,15 +1,15 @@ -import React from 'react' -import Header from './Header' -import Footer from './Footer' +import React from "react"; +import Header from "./Header"; +import Footer from "./Footer"; const Layout = ({ children }) => { return (
-
{children}
+
{children}
- ) -} + ); +}; -export default Layout \ No newline at end of file +export default Layout; diff --git a/frontend/src/app/components/Loading.js b/frontend/src/app/components/Loading.js index a6d7fe57..d59e9005 100644 --- a/frontend/src/app/components/Loading.js +++ b/frontend/src/app/components/Loading.js @@ -1,10 +1,8 @@ -import React from 'react'; -import { CircularProgress } from '@mui/material'; +import React from "react"; +import { CircularProgress } from "@mui/material"; -const Loading = ({ color = 'inherit' }) => { - return ( - - ); +const Loading = ({ color = "inherit" }) => { + return ; }; export default Loading; diff --git a/frontend/src/app/components/Login.js b/frontend/src/app/components/Login.js index 3e7f6f3f..f268add5 100644 --- a/frontend/src/app/components/Login.js +++ b/frontend/src/app/components/Login.js @@ -1,16 +1,16 @@ -import React from 'react'; -import PropTypes from 'prop-types'; -import useKeycloak from '../../app/utilities/useKeycloak'; +import React from "react"; +import PropTypes from "prop-types"; +import useKeycloak from "../../app/utilities/useKeycloak"; const Login = (props) => { const { redirectUri } = props; const loginOptions = { - idpHint: 'idir' - } + idpHint: "idir", + }; if (redirectUri) { - loginOptions.redirectUri = redirectUri + loginOptions.redirectUri = redirectUri; } - const keycloak = useKeycloak() + const keycloak = useKeycloak(); return (
@@ -21,7 +21,12 @@ const Login = (props) => {
- @@ -33,7 +38,7 @@ const Login = (props) => { }; Login.propTypes = { - redirectUri: PropTypes.string + redirectUri: PropTypes.string, }; export default Login; diff --git a/frontend/src/app/components/Logout.js b/frontend/src/app/components/Logout.js index 31a4031f..1cd54a80 100644 --- a/frontend/src/app/components/Logout.js +++ b/frontend/src/app/components/Logout.js @@ -1,25 +1,25 @@ -import React from 'react'; -import useKeycloak from '../utilities/useKeycloak' +import React from "react"; +import useKeycloak from "../utilities/useKeycloak"; const Logout = () => { const keycloak = useKeycloak(); if (keycloak.authenticated) { const kcToken = keycloak.tokenParsed; return ( -
- {'Logged in as: ' + kcToken.idir_username + ' |'} +
+ {"Logged in as: " + kcToken.idir_username + " |"}
- ) + ); } - return null -} + return null; +}; -export default Logout +export default Logout; diff --git a/frontend/src/app/components/ReactTable.js b/frontend/src/app/components/ReactTable.js index 4b01c049..dec5cf63 100644 --- a/frontend/src/app/components/ReactTable.js +++ b/frontend/src/app/components/ReactTable.js @@ -1,84 +1,84 @@ -import PropTypes from 'prop-types'; -import React, { useEffect, useState } from 'react'; +import PropTypes from "prop-types"; +import React, { useEffect, useState } from "react"; import { useAsyncDebounce, useFilters, usePagination, useSortBy, useTable, -} from 'react-table'; +} from "react-table"; // material-ui core components -import CircularProgress from '@mui/material/CircularProgress'; -import IconButton from '@mui/material/IconButton'; -import InputAdornment from '@mui/material/InputAdornment'; -import Popover from '@mui/material/Popover'; -import Table from '@mui/material/Table'; -import TableBody from '@mui/material/TableBody'; -import TableCell from '@mui/material/TableCell'; -import TableContainer from '@mui/material/TableContainer'; -import TableHead from '@mui/material/TableHead'; -import TablePagination from '@mui/material/TablePagination'; -import TableRow from '@mui/material/TableRow'; -import TableSortLabel from '@mui/material/TableSortLabel'; -import TextField from '@mui/material/TextField'; -import Tooltip from '@mui/material/Tooltip'; -import { makeStyles } from '@mui/styles'; +import CircularProgress from "@mui/material/CircularProgress"; +import IconButton from "@mui/material/IconButton"; +import InputAdornment from "@mui/material/InputAdornment"; +import Popover from "@mui/material/Popover"; +import Table from "@mui/material/Table"; +import TableBody from "@mui/material/TableBody"; +import TableCell from "@mui/material/TableCell"; +import TableContainer from "@mui/material/TableContainer"; +import TableHead from "@mui/material/TableHead"; +import TablePagination from "@mui/material/TablePagination"; +import TableRow from "@mui/material/TableRow"; +import TableSortLabel from "@mui/material/TableSortLabel"; +import TextField from "@mui/material/TextField"; +import Tooltip from "@mui/material/Tooltip"; +import { makeStyles } from "@mui/styles"; // material-ui icons -import FilterListIcon from '@mui/icons-material/FilterList'; -import MoreVert from '@mui/icons-material/MoreVert'; +import FilterListIcon from "@mui/icons-material/FilterList"; +import MoreVert from "@mui/icons-material/MoreVert"; // components -import ReactTablePagination from './ReactTablePagination'; +import ReactTablePagination from "./ReactTablePagination"; const useStyles = makeStyles(() => ({ moreOptions: { - alignItems: 'center', - display: 'inline-flex', - height: '100%', - position: 'absolute', + alignItems: "center", + display: "inline-flex", + height: "100%", + position: "absolute", right: 0, top: 0, - zIndex: '999', - '&.active': { - opacity: '1 !important', + zIndex: "999", + "&.active": { + opacity: "1 !important", }, }, cellMoreOptions: { - '& $moreOptions': { + "& $moreOptions": { opacity: 0, }, - '&:hover $moreOptions': { + "&:hover $moreOptions": { opacity: 0.5, }, }, popoverContainer: { - padding: '0.75rem', + padding: "0.75rem", }, popoverInputFilter: { - '& input': { - padding: '0.75rem', + "& input": { + padding: "0.75rem", }, }, reactTable: { - position: 'relative', - '& caption': { - backgroundColor: 'rgba(255, 255, 255, 0.7)', + position: "relative", + "& caption": { + backgroundColor: "rgba(255, 255, 255, 0.7)", bottom: 0, left: 0, padding: 0, - position: 'absolute', + position: "absolute", right: 0, top: 0, zIndex: 999, }, - '& caption > div': { - alignItems: 'center', - display: 'flex', - height: '100%', - justifyContent: 'center', - width: '100%', + "& caption > div": { + alignItems: "center", + display: "flex", + height: "100%", + justifyContent: "center", + width: "100%", }, }, })); @@ -111,26 +111,27 @@ const ReactTable = (props) => { page, prepareRow, setPageSize, - state: { - pageIndex, - pageSize, - sortBy, - }, - } = useTable({ - columns, - data, - disableSortBy: !sortable, - disableSortRemove: true, - initialState: { - filters: [], - pageIndex: 0, - sortBy: defaultSortBy, + state: { pageIndex, pageSize, sortBy }, + } = useTable( + { + columns, + data, + disableSortBy: !sortable, + disableSortRemove: true, + initialState: { + filters: [], + pageIndex: 0, + sortBy: defaultSortBy, + }, + manualFilters: true, + manualPagination: true, + manualSortBy: true, + pageCount: controlledPageCount, }, - manualFilters: true, - manualPagination: true, - manualSortBy: true, - pageCount: controlledPageCount, - }, useFilters, useSortBy, usePagination); + useFilters, + useSortBy, + usePagination, + ); const handleFilterColumn = useAsyncDebounce((event) => { const { value } = event.target; @@ -140,7 +141,7 @@ const ReactTable = (props) => { filterBy = filterColumn.id; } - const foundIndex = filters.findIndex((filter) => (filter.id === filterBy)); + const foundIndex = filters.findIndex((filter) => filter.id === filterBy); if (foundIndex >= 0) { filters[foundIndex].value = value; @@ -156,20 +157,24 @@ const ReactTable = (props) => { const handleOpenMoreOptions = (event) => { event.stopPropagation(); - event.currentTarget.parentNode.classList.add('active'); + event.currentTarget.parentNode.classList.add("active"); setAnchorEl(event.currentTarget); }; const handleClose = () => { - anchorEl.parentNode.classList.remove('active'); + anchorEl.parentNode.classList.remove("active"); setAnchorEl(null); }; useEffect(() => { onFetchData({ - columns, pageIndex, pageSize, sortBy, filters, + columns, + pageIndex, + pageSize, + sortBy, + filters, }); }, [onFetchData, pageIndex, pageSize, sortBy, filters]); @@ -178,7 +183,12 @@ const ReactTable = (props) => { return ( <> - +
{loading && (
@@ -192,15 +202,17 @@ const ReactTable = (props) => { {headerGroup.headers.map((column) => ( - {column.render('Header')} + {column.render("Header")} {filterable && column.filterable !== false && ( @@ -226,23 +238,24 @@ const ReactTable = (props) => { - {page && page.map((row) => { - prepareRow(row); + {page && + page.map((row) => { + prepareRow(row); - return ( - - {row.cells.map((cell) => ( - - {cell.render('Cell')} - - ))} - - ); - })} + return ( + + {row.cells.map((cell) => ( + + {cell.render("Cell")} + + ))} + + ); + })}
@@ -265,23 +278,32 @@ const ReactTable = (props) => {
filter.id === filterColumn.filterBy || filter.id === filterColumn.id, - ) ? filters.find((filter) => filter.id === filterColumn.filterBy || filter.id === filterColumn.id).value : '' + filterColumn && + filters.find( + (filter) => + filter.id === filterColumn.filterBy || + filter.id === filterColumn.id, + ) + ? filters.find( + (filter) => + filter.id === filterColumn.filterBy || + filter.id === filterColumn.id, + ).value + : "" } InputLabelProps={{ shrink: true, @@ -294,7 +316,7 @@ const ReactTable = (props) => { ), }} label={`Filter by ${filterColumn && filterColumn.Header}`} - onChange={(event) => (handleFilterColumn(event))} + onChange={(event) => handleFilterColumn(event)} placeholder={filterColumn && filterColumn.Header} type="text" variant="outlined" @@ -308,7 +330,7 @@ const ReactTable = (props) => { ReactTable.defaultProps = { defaultSortBy: [], filterable: true, - size: 'small', + size: "small", sortable: true, }; diff --git a/frontend/src/app/components/ReactTablePagination.js b/frontend/src/app/components/ReactTablePagination.js index 3a5246fb..665e372b 100644 --- a/frontend/src/app/components/ReactTablePagination.js +++ b/frontend/src/app/components/ReactTablePagination.js @@ -1,31 +1,26 @@ -import PropTypes from 'prop-types'; -import React from 'react'; -import IconButton from '@mui/material/IconButton'; -import MenuItem from '@mui/material/MenuItem'; -import Select from '@mui/material/Select'; -import { makeStyles } from '@mui/styles'; +import PropTypes from "prop-types"; +import React from "react"; +import IconButton from "@mui/material/IconButton"; +import MenuItem from "@mui/material/MenuItem"; +import Select from "@mui/material/Select"; +import { makeStyles } from "@mui/styles"; -import KeyboardArrowLeft from '@mui/icons-material/KeyboardArrowLeft'; -import KeyboardArrowRight from '@mui/icons-material/KeyboardArrowRight'; +import KeyboardArrowLeft from "@mui/icons-material/KeyboardArrowLeft"; +import KeyboardArrowRight from "@mui/icons-material/KeyboardArrowRight"; const useStyles = makeStyles(() => ({ pagination: { flexShrink: 0, - '& .MuiSelect-select': { - paddingBottom: '0.5rem', - paddingTop: '0.5rem', + "& .MuiSelect-select": { + paddingBottom: "0.5rem", + paddingTop: "0.5rem", }, }, })); const ReactTablePagination = (props) => { const classes = useStyles(); - const { - count, - onPageChange, - page, - rowsPerPage, - } = props; + const { count, onPageChange, page, rowsPerPage } = props; const pagesCount = Math.ceil(count / rowsPerPage); @@ -53,7 +48,9 @@ const ReactTablePagination = (props) => { value={page} > {Array.from(Array(pagesCount).keys()).map((value) => ( - {(value + 1)} + + {value + 1} + ))} diff --git a/frontend/src/app/styles/App.scss b/frontend/src/app/styles/App.scss index ed163a71..86664c1e 100644 --- a/frontend/src/app/styles/App.scss +++ b/frontend/src/app/styles/App.scss @@ -2,16 +2,16 @@ Base styling for "App" ie General Container */ -$bg-alt-black: #1E1F21; +$bg-alt-black: #1e1f21; $bg-primary: #244074; $bg-black: #000; -$bg-white: #FFF; +$bg-white: #fff; $default-text-color: #000; $default-table-border-color: rgba(249, 249, 249, 0.2); -$default-table-color: #F9F9F9; -$highlight: #0078FD; -$default-link-blue: #568DBA; -$default-background-grey: #F2F2F2; +$default-table-color: #f9f9f9; +$highlight: #0078fd; +$default-link-blue: #568dba; +$default-background-grey: #f2f2f2; $md: 991px; $button-background-blue: #003366; @@ -38,7 +38,7 @@ $button-background-blue: #003366; } .logo { - background: url('../static/logo.png') center center no-repeat; + background: url("../static/logo.png") center center no-repeat; background-size: 100% auto; height: 100px; width: 200px; @@ -50,18 +50,19 @@ $button-background-blue: #003366; } } - body { background-color: $bg-white; color: $default-text-color; - font-family: 'Roboto', 'Open Sans', sans-serif; + font-family: "Roboto", "Open Sans", sans-serif; font-weight: 400; height: 100%; margin: 0; } -h2, h3, h4 { - font-family: 'Roboto', 'Open Sans', sans-serif; +h2, +h3, +h4 { + font-family: "Roboto", "Open Sans", sans-serif; color: #003366; font-weight: 500; } @@ -87,7 +88,6 @@ h4 { &:disabled { background-color: white !important; - } } diff --git a/frontend/src/app/styles/FileUpload.scss b/frontend/src/app/styles/FileUpload.scss index 29c37000..ee6608c5 100644 --- a/frontend/src/app/styles/FileUpload.scss +++ b/frontend/src/app/styles/FileUpload.scss @@ -2,7 +2,7 @@ background-color: $default-background-grey; } .bordered { - border: 1px solid #E0E0E0; + border: 1px solid #e0e0e0; padding: 0.5rem; } #dataset-select { @@ -10,19 +10,17 @@ margin-top: 1rem; padding: 1rem; background-color: $default-background-grey; - } #trash-button { height: 15px; } .upload-list { font-weight: normal; - } .upload-row { background-color: $default-background-grey; - padding: .5rem; - margin: .5rem; + padding: 0.5rem; + margin: 0.5rem; } .file-upload { border: 1px dashed $button-background-blue; @@ -30,7 +28,6 @@ padding: 1rem; text-align: center; flex-direction: column; - } .upload-bar { diff --git a/frontend/src/app/styles/Footer.scss b/frontend/src/app/styles/Footer.scss index 80595d1a..9065df47 100644 --- a/frontend/src/app/styles/Footer.scss +++ b/frontend/src/app/styles/Footer.scss @@ -1,66 +1,71 @@ footer { - position: relative; - width: 100%; - bottom: 0; - background-color: #036; - border-top: 2px solid #fcba19; + position: relative; + width: 100%; + bottom: 0; + background-color: #036; + border-top: 2px solid #fcba19; + color: #fff; + font-family: + ‘BCSans’, + ‘Noto Sans’, + Verdana, + Arial, + sans-serif; + .container { + display: flex; + justify-content: center; + flex-direction: column; + text-align: center; + height: 46px; + } + ul { + display: flex; + flex-direction: row; + flex-wrap: wrap; + margin: 0; color: #fff; - font-family: ‘BCSans’, ‘Noto Sans’, Verdana, Arial, sans-serif; - .container { - display: flex; - justify-content: center; - flex-direction: column; - text-align: center; - height: 46px; + list-style: none; + align-items: center; + height: 100%; + li a { + font-size: 0.813em; + font-weight: normal; /* 400 */ + color: #fff; + border-right: 1px solid #4b5e7e; + padding-left: 5px; + padding-right: 5px; + text-decoration: none; } - ul { - display: flex; - flex-direction: row; - flex-wrap: wrap; - margin: 0; + a:hover { color: #fff; - list-style: none; - align-items: center; - height: 100%; - li a { - font-size: 0.813em; - font-weight: normal; /* 400 */ - color: #fff; - border-right: 1px solid #4b5e7e; - padding-left: 5px; - padding-right: 5px; - text-decoration: none; - } - a:hover { - color: #fff; - text-decoration: underline; - } - a:focus { - outline: 4px solid #3b99fc; - outline-offset: 1px; - } + text-decoration: underline; } - } - @media (max-width: 600px) { - .footer { - .container { - ul li a { - font-size: 0.8rem; - line-height: 0.9rem; - } - } + a:focus { + outline: 4px solid #3b99fc; + outline-offset: 1px; } } - @media (max-width: 485px) { - .footer { - height: 3.5rem; - .container { - justify-content: space-evenly; +} +@media (max-width: 600px) { + .footer { + .container { + ul li a { + font-size: 0.8rem; + line-height: 0.9rem; } } } - @media (max-width: 282px) { - .footer { - height: 6rem; +} +@media (max-width: 485px) { + .footer { + height: 3.5rem; + .container { + justify-content: space-evenly; } - } \ No newline at end of file + } +} +@media (max-width: 282px) { + .footer { + height: 6rem; + } +} diff --git a/frontend/src/app/styles/Header.scss b/frontend/src/app/styles/Header.scss index d287abc6..64efbb0b 100644 --- a/frontend/src/app/styles/Header.scss +++ b/frontend/src/app/styles/Header.scss @@ -1,108 +1,108 @@ .cthub-banner { - background-color: $banner-blue; - display: flex; - border-bottom: 3px solid $border-orange; - flex-direction: row; - justify-content: space-between; - align-items: center; - padding-left: 6rem; - padding-right: 6rem; + background-color: $banner-blue; + display: flex; + border-bottom: 3px solid $border-orange; + flex-direction: row; + justify-content: space-between; + align-items: center; + padding-left: 6rem; + padding-right: 6rem; +} + +.cthub-banner .left, +.cthub-banner .right { + display: flex; + flex-direction: row; + align-items: center; + font-size: 1.25rem; + a { + color: $white; + text-decoration: none; + font-weight: bold; } - - .cthub-banner .left, - .cthub-banner .right { - display: flex; - flex-direction: row; - align-items: center; - font-size: 1.25rem; - a { + img { + height: 4.5rem; + width: auto; + } + .logout { + color: $white; + font-size: 1rem; + .logoutButton { + cursor: pointer; + border: none; + background: none; + font: inherit; + color: inherit; + } + } +} + +.page-header { + background-color: $banner-blue; + width: 100%; + //height: 191px; + + .title { + padding-left: 6rem; + h1 { color: $white; - text-decoration: none; - font-weight: bold; } + } +} + +@media (max-width: 992px) { + .cthub-banner { + font-weight: normal; + flex-direction: column; img { - height: 4.5rem; - width: auto; + margin-left: 1rem; + height: 3rem; } .logout { - color: $white; - font-size: 1rem; - .logoutButton { - cursor: pointer; - border: none; - background: none; - font: inherit; - color: inherit; - } + font-size: 0.75rem; } } - .page-header { - background-color: $banner-blue; width: 100%; - //height: 191px; - + position: relative; + margin: 0; + height: 180px; .title { - padding-left: 6rem; + margin: 5px 0 0 1rem; + padding-left: 0; h1 { - color: $white; + font-size: 2rem; } } } - - @media (max-width: 992px) { - .cthub-banner { - font-weight: normal; - flex-direction: column; - img { - margin-left: 1rem; - height: 3rem; - } - .logout { - font-size: 0.75rem; - } - } - .page-header { - width: 100%; - position: relative; - margin: 0; - height: 180px; - .title { - margin: 5px 0 0 1rem; - padding-left: 0; - h1 { - font-size: 2rem; - } +} +@media (max-width: 688px) { + .page-header { + .title { + h1 { + font-size: 1.75rem; } } } - @media (max-width: 688px) { - .page-header { - .title { - h1 { - font-size: 1.75rem; - } - } +} +@media (max-width: 380px) { + .page-header { + a { + font-size: 10pt; } - } - @media (max-width: 380px) { - .page-header { - a { - font-size: 10pt; - } - .title { - h1 { - font-size: 1.5rem; - } + .title { + h1 { + font-size: 1.5rem; } } } - @media (max-width: 330px) { - .page-header { - .title { - h1 { - font-size: 1.2rem; - } +} +@media (max-width: 330px) { + .page-header { + .title { + h1 { + font-size: 1.2rem; } } - } \ No newline at end of file + } +} diff --git a/frontend/src/app/styles/Login.scss b/frontend/src/app/styles/Login.scss index 42b414ba..aec718fa 100644 --- a/frontend/src/app/styles/Login.scss +++ b/frontend/src/app/styles/Login.scss @@ -1,5 +1,5 @@ #login-page { - background-image: url('./images/tof.jpg'); + background-image: url("./images/tof.jpg"); background-position-x: center; background-position-y: top; background-repeat: no-repeat; @@ -17,7 +17,7 @@ padding: 0 5rem; position: fixed; width: 100%; - + .text { color: $bg-primary; font-size: 2.5rem; @@ -50,7 +50,7 @@ } .brand-logo { - background-image: url('./images/BCID_H_rgb_pos.png'); + background-image: url("./images/BCID_H_rgb_pos.png"); background-position: left; background-repeat: no-repeat; background-size: contain; @@ -79,7 +79,6 @@ text-decoration: none; width: 100%; } - } .flex-container { @@ -100,9 +99,9 @@ #main-content { width: 30rem; - } + } } - + @media (max-width: $md) { #header { background-color: $bg-white; @@ -113,7 +112,7 @@ padding: 1rem; position: relative; z-index: 999; - + .text { align-self: center; font-size: 4vw; @@ -128,9 +127,9 @@ } .display-name { - height: 100%; - min-width: 5rem; - width: 10vw; + height: 100%; + min-width: 5rem; + width: 10vw; } #main-content { @@ -150,7 +149,7 @@ .brand-logo { align-self: center; - background-image: url('./images/BCID_V_rgb_pos.png'); + background-image: url("./images/BCID_V_rgb_pos.png"); background-position: center; background-size: contain; height: 20vh; @@ -160,4 +159,4 @@ } } } -} \ No newline at end of file +} diff --git a/frontend/src/app/styles/ReactTable.scss b/frontend/src/app/styles/ReactTable.scss index 1ff731ca..0c7e532d 100644 --- a/frontend/src/app/styles/ReactTable.scss +++ b/frontend/src/app/styles/ReactTable.scss @@ -16,7 +16,7 @@ Base styling for any React Table font-size: 1rem; font-weight: 700; text-transform: uppercase; - + &:not([disabled]):hover { opacity: 0.65; } @@ -49,7 +49,7 @@ Base styling for any React Table &.-sort-desc { box-shadow: inset 0 -3px 0 0 $highlight !important; } - } + } } .rt-tr-group { @@ -66,4 +66,4 @@ Base styling for any React Table padding: 1rem 0.5rem; } } -} \ No newline at end of file +} diff --git a/frontend/src/app/styles/Roboto.scss b/frontend/src/app/styles/Roboto.scss index c675214e..53159c92 100644 --- a/frontend/src/app/styles/Roboto.scss +++ b/frontend/src/app/styles/Roboto.scss @@ -3,19 +3,19 @@ Roboto Webfont */ @font-face { - font-family: 'Roboto'; - src: url('./fonts/Roboto-Regular.ttf') format('ttf'); + font-family: "Roboto"; + src: url("./fonts/Roboto-Regular.ttf") format("ttf"); } @font-face { - font-family: 'Roboto'; + font-family: "Roboto"; font-weight: bold; - src: url('./fonts/Roboto-Bold.ttf') format('ttf'); + src: url("./fonts/Roboto-Bold.ttf") format("ttf"); } @font-face { - font-family: 'Roboto'; + font-family: "Roboto"; font-style: italic; font-weight: bold; - src: url('./fonts/Roboto-BoldItalic.ttf') format('woff'); -} \ No newline at end of file + src: url("./fonts/Roboto-BoldItalic.ttf") format("woff"); +} diff --git a/frontend/src/app/styles/Users.scss b/frontend/src/app/styles/Users.scss index 20bd7092..c5c574b0 100644 --- a/frontend/src/app/styles/Users.scss +++ b/frontend/src/app/styles/Users.scss @@ -7,25 +7,23 @@ background-color: $default-background-grey; align-content: space-around; width: 10%; - .checkbox { + .checkbox { width: 40%; } - @media (max-width:991px) { + @media (max-width: 991px) { width: 20%; -} - @media (max-width:599px) { + } + @media (max-width: 599px) { width: 40%; -} - + } } .user-input { - margin-left: 10px; - margin-right: 10px; - background-color: white; + margin-left: 10px; + margin-right: 10px; + background-color: white; } .button-dark-blue { background-color: #003366 !important; } - diff --git a/frontend/src/app/styles/index.scss b/frontend/src/app/styles/index.scss index 5845cf87..a5b22410 100644 --- a/frontend/src/app/styles/index.scss +++ b/frontend/src/app/styles/index.scss @@ -1,9 +1,9 @@ -@import 'variables.scss'; -@import 'App.scss'; -@import 'Login.scss'; -@import 'ReactTable.scss'; -@import 'FileUpload.scss'; -@import 'Roboto.scss'; -@import 'Users.scss'; -@import 'Header.scss'; -@import 'Footer.scss'; +@import "variables.scss"; +@import "App.scss"; +@import "Login.scss"; +@import "ReactTable.scss"; +@import "FileUpload.scss"; +@import "Roboto.scss"; +@import "Users.scss"; +@import "Header.scss"; +@import "Footer.scss"; diff --git a/frontend/src/app/styles/variables.scss b/frontend/src/app/styles/variables.scss index 3d386ad6..af1bdba0 100644 --- a/frontend/src/app/styles/variables.scss +++ b/frontend/src/app/styles/variables.scss @@ -17,4 +17,4 @@ $default-background-grey: #f2f2f2; $background-light-blue: #e7f4f7; -$table-border: rgba(49, 49, 50, 0.33); \ No newline at end of file +$table-border: rgba(49, 49, 50, 0.33); diff --git a/frontend/src/app/utilities/getFileSize.js b/frontend/src/app/utilities/getFileSize.js index 5dcfefd2..9d899322 100644 --- a/frontend/src/app/utilities/getFileSize.js +++ b/frontend/src/app/utilities/getFileSize.js @@ -1,13 +1,14 @@ const getFileSize = (bytes) => { if (bytes === 0) { - return '0 bytes'; + return "0 bytes"; } const k = 1000; - const sizes = ['bytes', 'KB', 'MB', 'GB', 'TB']; + const sizes = ["bytes", "KB", "MB", "GB", "TB"]; let i = Math.floor(Math.log(bytes) / Math.log(k)); - if (i > 4) { // nothing bigger than a terrabyte + if (i > 4) { + // nothing bigger than a terrabyte i = 4; } diff --git a/frontend/src/app/utilities/props.js b/frontend/src/app/utilities/props.js index ab5157ea..5f87dffe 100644 --- a/frontend/src/app/utilities/props.js +++ b/frontend/src/app/utilities/props.js @@ -1,4 +1,4 @@ -import PropTypes from 'prop-types'; +import PropTypes from "prop-types"; const CustomPropTypes = { keycloak: PropTypes.shape({ diff --git a/frontend/src/app/utilities/reactTable.js b/frontend/src/app/utilities/reactTable.js index 074b8ad6..4569ab0c 100644 --- a/frontend/src/app/utilities/reactTable.js +++ b/frontend/src/app/utilities/reactTable.js @@ -4,13 +4,13 @@ const findSortBy = (columns, id) => { for (let i = 0; i < columns.length; i += 1) { const column = columns[i]; - if ('columns' in column) { + if ("columns" in column) { value = findSortBy(column.columns, id); if (value !== id) { return value; } - } else if (column.id === id && 'sortBy' in column) { + } else if (column.id === id && "sortBy" in column) { return column.sortBy; } } @@ -39,13 +39,11 @@ const getOrderBy = (state) => { } sortByFields.forEach((value) => { - orderBy.push(`${arr.desc ? '-' : ''}${value}`); + orderBy.push(`${arr.desc ? "-" : ""}${value}`); }); }); - return orderBy.join(','); + return orderBy.join(","); }; -export { - findSortBy, getFilters, getOrderBy, -}; +export { findSortBy, getFilters, getOrderBy }; diff --git a/frontend/src/app/utilities/useAxios.js b/frontend/src/app/utilities/useAxios.js index 1b932994..5de66a49 100644 --- a/frontend/src/app/utilities/useAxios.js +++ b/frontend/src/app/utilities/useAxios.js @@ -1,30 +1,30 @@ -import axios from 'axios' -import useKeycloak from './useKeycloak' -import { API_BASE } from '../../config'; +import axios from "axios"; +import useKeycloak from "./useKeycloak"; +import { API_BASE } from "../../config"; const useAxios = (useDefault = false, opts = {}) => { - const keycloak = useKeycloak() + const keycloak = useKeycloak(); if (useDefault) { - return axios.create(opts) + return axios.create(opts); } const instance = axios.create({ baseURL: API_BASE, ...opts, - }) + }); instance.interceptors.request.use(async (config) => { if (keycloak.authenticated) { try { - await keycloak.updateToken(30) - config.headers = { - 'Authorization': `Bearer ${keycloak.token}`, - } - } catch(error) { + await keycloak.updateToken(30); + config.headers = { + Authorization: `Bearer ${keycloak.token}`, + }; + } catch (error) { // do something here? } } - return config - }) - return instance -} + return config; + }); + return instance; +}; -export default useAxios \ No newline at end of file +export default useAxios; diff --git a/frontend/src/app/utilities/useKeycloak.js b/frontend/src/app/utilities/useKeycloak.js index f56bdf77..bb306cba 100644 --- a/frontend/src/app/utilities/useKeycloak.js +++ b/frontend/src/app/utilities/useKeycloak.js @@ -1,9 +1,9 @@ -import { useContext } from 'react' -import { KeycloakContext } from '../../contexts' +import { useContext } from "react"; +import { KeycloakContext } from "../../contexts"; const useKeycloak = () => { - const keycloak = useContext(KeycloakContext) - return keycloak -} + const keycloak = useContext(KeycloakContext); + return keycloak; +}; -export default useKeycloak \ No newline at end of file +export default useKeycloak; diff --git a/frontend/src/contexts.js b/frontend/src/contexts.js index 2b7751c7..42218924 100644 --- a/frontend/src/contexts.js +++ b/frontend/src/contexts.js @@ -1,3 +1,3 @@ -import { createContext } from 'react' +import { createContext } from "react"; -export const KeycloakContext = createContext({}) \ No newline at end of file +export const KeycloakContext = createContext({}); diff --git a/frontend/src/dashboard/DashboardContainer.js b/frontend/src/dashboard/DashboardContainer.js index f49ec102..3f7da555 100644 --- a/frontend/src/dashboard/DashboardContainer.js +++ b/frontend/src/dashboard/DashboardContainer.js @@ -1,9 +1,9 @@ -import { withRouter, useHistory } from 'react-router-dom'; +import { withRouter, useHistory } from "react-router-dom"; const DashboardContainer = () => { const history = useHistory(); - history.push('/upload'); + history.push("/upload"); return null; }; diff --git a/frontend/src/dashboard/router.js b/frontend/src/dashboard/router.js index 7d740664..01f50893 100644 --- a/frontend/src/dashboard/router.js +++ b/frontend/src/dashboard/router.js @@ -1,14 +1,11 @@ -import React from 'react'; -import { Route } from 'react-router-dom'; +import React from "react"; +import { Route } from "react-router-dom"; -import DashboardContainer from './DashboardContainer'; +import DashboardContainer from "./DashboardContainer"; const Router = () => ( <> - + ); diff --git a/frontend/src/icbc_data/IcbcDataContainer.js b/frontend/src/icbc_data/IcbcDataContainer.js index a7e26afc..8a666f5d 100644 --- a/frontend/src/icbc_data/IcbcDataContainer.js +++ b/frontend/src/icbc_data/IcbcDataContainer.js @@ -1,10 +1,10 @@ -import React, { useCallback, useRef, useState } from 'react'; -import { withRouter } from 'react-router-dom'; +import React, { useCallback, useRef, useState } from "react"; +import { withRouter } from "react-router-dom"; -import { getFilters, getOrderBy } from '../app/utilities/reactTable'; -import IcbcDataTable from './components/IcbcDataTable'; -import ROUTES from './routes'; -import useAxios from '../app/utilities/useAxios'; +import { getFilters, getOrderBy } from "../app/utilities/reactTable"; +import IcbcDataTable from "./components/IcbcDataTable"; +import ROUTES from "./routes"; +import useAxios from "../app/utilities/useAxios"; const IcbcDataContainer = () => { const [data, setData] = useState([]); @@ -12,7 +12,7 @@ const IcbcDataContainer = () => { const [pageCount, setPageCount] = useState(-1); const [totalRowsCount, setTotalRowsCount] = useState(0); const fetchIdRef = useRef(0); - const axios = useAxios() + const axios = useAxios(); const onFetchData = useCallback((state) => { setLoading(true); diff --git a/frontend/src/icbc_data/components/IcbcDataTable.js b/frontend/src/icbc_data/components/IcbcDataTable.js index a4bb67fa..c6e5e19e 100644 --- a/frontend/src/icbc_data/components/IcbcDataTable.js +++ b/frontend/src/icbc_data/components/IcbcDataTable.js @@ -1,49 +1,52 @@ -import PropTypes from 'prop-types'; -import React from 'react'; +import PropTypes from "prop-types"; +import React from "react"; -import ReactTable from '../../app/components/ReactTable'; +import ReactTable from "../../app/components/ReactTable"; const IcbcDataTable = (props) => { - const columns = [{ - accessor: 'icbc_vehicle.model_year', - align: 'center', - filterBy: 'icbc_vehicle__model_year__name', - Header: 'Year', - id: 'year', - sortBy: 'icbc_vehicle__model_year__name', - width: 50, - }, { - accessor: 'icbc_vehicle.make', - filterBy: 'icbc_vehicle__make', - Header: 'Make', - headerAlign: 'left', - id: 'make', - sortBy: 'icbc_vehicle__make', - width: 100, - }, { - accessor: 'icbc_vehicle.model_name', - filterBy: 'icbc_vehicle__model_name', - Header: 'Model', - headerAlign: 'left', - id: 'Model', - sortBy: 'icbc_vehicle__model_name', - width: 100, - }, { - accessor: 'vin', - Header: 'VIN', - headerAlign: 'left', - id: 'vin', - }]; + const columns = [ + { + accessor: "icbc_vehicle.model_year", + align: "center", + filterBy: "icbc_vehicle__model_year__name", + Header: "Year", + id: "year", + sortBy: "icbc_vehicle__model_year__name", + width: 50, + }, + { + accessor: "icbc_vehicle.make", + filterBy: "icbc_vehicle__make", + Header: "Make", + headerAlign: "left", + id: "make", + sortBy: "icbc_vehicle__make", + width: 100, + }, + { + accessor: "icbc_vehicle.model_name", + filterBy: "icbc_vehicle__model_name", + Header: "Model", + headerAlign: "left", + id: "Model", + sortBy: "icbc_vehicle__model_name", + width: 100, + }, + { + accessor: "vin", + Header: "VIN", + headerAlign: "left", + id: "vin", + }, + ]; - const { - data, loading, onFetchData, pageCount, totalRowsCount, - } = props; + const { data, loading, onFetchData, pageCount, totalRowsCount } = props; return ( ([ - +const Router = () => [ + , -]); +]; export default Router; diff --git a/frontend/src/icbc_data/routes.js b/frontend/src/icbc_data/routes.js index 5433c012..fdce9647 100644 --- a/frontend/src/icbc_data/routes.js +++ b/frontend/src/icbc_data/routes.js @@ -1,5 +1,5 @@ const ROUTES = { - LIST: '/api/icbc-data', + LIST: "/api/icbc-data", }; export default ROUTES; diff --git a/frontend/src/index.js b/frontend/src/index.js index 667ea39e..420c26eb 100644 --- a/frontend/src/index.js +++ b/frontend/src/index.js @@ -1,23 +1,23 @@ -import React from 'react'; -import ReactDOM from 'react-dom' -import Keycloak from 'keycloak-js'; +import React from "react"; +import ReactDOM from "react-dom"; +import Keycloak from "keycloak-js"; -import KeycloakProvider from './app/components/KeycloakProvider'; -import App from './app/components/App'; -import Loading from './app/components/Loading'; -import { KEYCLOAK_CLIENT_ID, KEYCLOAK_REALM, KEYCLOAK_URL } from './config'; +import KeycloakProvider from "./app/components/KeycloakProvider"; +import App from "./app/components/App"; +import Loading from "./app/components/Loading"; +import { KEYCLOAK_CLIENT_ID, KEYCLOAK_REALM, KEYCLOAK_URL } from "./config"; -import './app/styles/index.scss'; +import "./app/styles/index.scss"; const keycloak = new Keycloak({ clientId: KEYCLOAK_CLIENT_ID, realm: KEYCLOAK_REALM, - url: KEYCLOAK_URL -}) + url: KEYCLOAK_URL, +}); const keycloakInitOptions = { - onLoad: 'check-sso', - pkceMethod: 'S256' -} + onLoad: "check-sso", + pkceMethod: "S256", +}; ReactDOM.render( , - document.getElementById('root') -) + document.getElementById("root"), +); diff --git a/frontend/src/uploads/UploadContainer.js b/frontend/src/uploads/UploadContainer.js index 44532f6a..315400aa 100644 --- a/frontend/src/uploads/UploadContainer.js +++ b/frontend/src/uploads/UploadContainer.js @@ -1,27 +1,25 @@ -import { withRouter } from 'react-router-dom'; -import React, { useState, useEffect } from 'react'; -import { - Paper, Alert, Stack, -} from '@mui/material'; -import ROUTES_UPLOAD from './routes'; -import ROUTES_USERS from '../users/routes'; -import UploadPage from './components/UploadPage'; -import AlertDialog from '../app/components/AlertDialog'; -import UsersContainer from '../users/UsersContainer'; -import Loading from '../app/components/Loading'; -import useAxios from '../app/utilities/useAxios'; +import { withRouter } from "react-router-dom"; +import React, { useState, useEffect } from "react"; +import { Paper, Alert, Stack } from "@mui/material"; +import ROUTES_UPLOAD from "./routes"; +import ROUTES_USERS from "../users/routes"; +import UploadPage from "./components/UploadPage"; +import AlertDialog from "../app/components/AlertDialog"; +import UsersContainer from "../users/UsersContainer"; +import Loading from "../app/components/Loading"; +import useAxios from "../app/utilities/useAxios"; const UploadContainer = () => { const [uploadFiles, setUploadFiles] = useState([]); // array of objects for files to be uploaded const [datasetList, setDatasetList] = useState([{}]); // holds the array of names of datasets const [loading, setLoading] = useState(false); const [refresh, setRefresh] = useState(false); // Used for page refresh instead of loading progress - const [datasetSelected, setDatasetSelected] = useState(''); // string identifying which dataset is being uploaded + const [datasetSelected, setDatasetSelected] = useState(""); // string identifying which dataset is being uploaded const [replaceData, setReplaceData] = useState(false); // if true, we will replace all const [alertContent, setAlertContent] = useState(); const [alert, setAlert] = useState(false); - const [currentUser, setCurrentUser] = useState(''); - const [alertSeverity, setAlertSeverity] = useState(''); + const [currentUser, setCurrentUser] = useState(""); + const [alertSeverity, setAlertSeverity] = useState(""); const [openDialog, setOpenDialog] = useState(false); const [adminUser, setAdminUser] = useState(false); const axios = useAxios(); @@ -33,7 +31,11 @@ const UploadContainer = () => { setDatasetList(response.data); setRefresh(false); axios.get(ROUTES_USERS.CURRENT).then((currentUserResp) => { - if (currentUserResp.data && currentUserResp.data.user_permissions && currentUserResp.data.user_permissions.admin === true) { + if ( + currentUserResp.data && + currentUserResp.data.user_permissions && + currentUserResp.data.user_permissions.admin === true + ) { setAdminUser(true); setCurrentUser(currentUserResp.data.idir); } @@ -43,75 +45,88 @@ const UploadContainer = () => { const showError = (error) => { const { response: errorResponse } = error; - setAlertContent(`${errorResponse.data.message}\n${errorResponse.data.errors ? 'Errors: ' + errorResponse.data.errors.join('\n') : ''}`); - setAlertSeverity('error'); + setAlertContent( + `${errorResponse.data.message}\n${errorResponse.data.errors ? "Errors: " + errorResponse.data.errors.join("\n") : ""}`, + ); + setAlertSeverity("error"); setAlert(true); }; - const doUpload = () => uploadFiles.forEach((file) => { - setLoading(true) - const uploadPromises = uploadFiles.map((file) => { - return axios.get(ROUTES_UPLOAD.MINIO_URL).then((response) => { - const { url: uploadUrl, minio_object_name: filename } = response.data; - return axiosDefault.put(uploadUrl, file).then(() => { - let replace = false; - if (replaceData === true) { - replace = true; - } - return axios.post(ROUTES_UPLOAD.UPLOAD, { - filename, - datasetSelected, - replace, + const doUpload = () => + uploadFiles.forEach((file) => { + setLoading(true); + const uploadPromises = uploadFiles.map((file) => { + return axios.get(ROUTES_UPLOAD.MINIO_URL).then((response) => { + const { url: uploadUrl, minio_object_name: filename } = response.data; + return axiosDefault.put(uploadUrl, file).then(() => { + let replace = false; + if (replaceData === true) { + replace = true; + } + return axios.post(ROUTES_UPLOAD.UPLOAD, { + filename, + datasetSelected, + replace, + }); }); }); }); - }); - - Promise.all(uploadPromises).then((responses) => { - - const errorCheck = responses.some(response => response.data.success) - - setAlertSeverity(errorCheck ? 'success' : 'error') - const message = responses.map(response => - `${response.data.message}${response.data.errors ? '\nErrors: ' + response.data.errors.join('\n') : ''}` - ).join('\n'); - - setAlertContent(message); - setAlert(true); - setUploadFiles([]); - }).catch((error) => { - showError(error); - }).finally(() => { - setLoading(false); + Promise.all(uploadPromises) + .then((responses) => { + const errorCheck = responses.some( + (response) => response.data.success, + ); + + setAlertSeverity(errorCheck ? "success" : "error"); + + const message = responses + .map( + (response) => + `${response.data.message}${response.data.errors ? "\nErrors: " + response.data.errors.join("\n") : ""}`, + ) + .join("\n"); + + setAlertContent(message); + setAlert(true); + setUploadFiles([]); + }) + .catch((error) => { + showError(error); + }) + .finally(() => { + setLoading(false); + }); }); - }); const downloadSpreadsheet = () => { - axios.get(ROUTES_UPLOAD.DOWNLOAD_SPREADSHEET, { - params: { - datasetSelected, - }, - responseType: 'blob', - }).then((response) => { - const url = window.URL.createObjectURL(new Blob([response.data])); - const link = document.createElement('a'); - - link.href = url; - link.setAttribute('download', `${datasetSelected}.xlsx`); - document.body.appendChild(link); - link.click(); - - link.parentNode.removeChild(link); - window.URL.revokeObjectURL(url); - }).catch((error) => { - showError(error); - }); + axios + .get(ROUTES_UPLOAD.DOWNLOAD_SPREADSHEET, { + params: { + datasetSelected, + }, + responseType: "blob", + }) + .then((response) => { + const url = window.URL.createObjectURL(new Blob([response.data])); + const link = document.createElement("a"); + + link.href = url; + link.setAttribute("download", `${datasetSelected}.xlsx`); + document.body.appendChild(link); + link.click(); + + link.parentNode.removeChild(link); + window.URL.revokeObjectURL(url); + }) + .catch((error) => { + showError(error); + }); }; const handleRadioChange = (event) => { const choice = event.target.value; - if (choice === 'replace') { + if (choice === "replace") { setOpenDialog(true); } else { setReplaceData(false); @@ -119,13 +134,13 @@ const UploadContainer = () => { }; const handleReplaceDataConfirm = () => { - setReplaceData(true) - setOpenDialog(false) - } + setReplaceData(true); + setOpenDialog(false); + }; const handleReplaceDataCancel = () => { - setOpenDialog(false) - } + setOpenDialog(false); + }; useEffect(() => { refreshList(true); @@ -135,13 +150,17 @@ const UploadContainer = () => { return ; } - const alertElement = alert && alertContent && alertSeverity ? - {alertContent.split('\n').map((line, index) => ( - - {line} -
-
- ))}
: null + const alertElement = + alert && alertContent && alertSeverity ? ( + + {alertContent.split("\n").map((line, index) => ( + + {line} +
+
+ ))} +
+ ) : null; return (
@@ -149,11 +168,13 @@ const UploadContainer = () => { <> @@ -184,5 +205,5 @@ const UploadContainer = () => {
); -} +}; export default withRouter(UploadContainer); diff --git a/frontend/src/uploads/components/FileDrop.js b/frontend/src/uploads/components/FileDrop.js index 7780eba4..31650e1d 100644 --- a/frontend/src/uploads/components/FileDrop.js +++ b/frontend/src/uploads/components/FileDrop.js @@ -1,38 +1,32 @@ -import PropTypes from 'prop-types'; -import React, { useCallback, useState } from 'react'; -import { Box, Button } from '@mui/material'; -import UploadIcon from '@mui/icons-material/Upload'; -import { useDropzone } from 'react-dropzone'; +import PropTypes from "prop-types"; +import React, { useCallback, useState } from "react"; +import { Box, Button } from "@mui/material"; +import UploadIcon from "@mui/icons-material/Upload"; +import { useDropzone } from "react-dropzone"; const FileDrop = (props) => { - const { - disabled, - setFiles, - setAlert, - } = props; - const [dropMessage, setDropMessage] = useState(''); + const { disabled, setFiles, setAlert } = props; + const [dropMessage, setDropMessage] = useState(""); const onDrop = useCallback((files) => { - setAlert(false) - setDropMessage(''); + setAlert(false); + setDropMessage(""); setFiles(files); }, []); const { getRootProps, getInputProps } = useDropzone({ onDrop }); - const uploadBoxClassNames = disabled ? "file-upload disabled" : "file-upload" + const uploadBoxClassNames = disabled ? "file-upload disabled" : "file-upload"; return (

- Drag and Drop files here or - {' '} -
+ Drag and Drop files here or
- + - {dropMessage && ( -
{dropMessage}
- )} + {dropMessage &&
{dropMessage}
}
); diff --git a/frontend/src/uploads/components/FileDropArea.js b/frontend/src/uploads/components/FileDropArea.js index 680a088f..97dd05e0 100644 --- a/frontend/src/uploads/components/FileDropArea.js +++ b/frontend/src/uploads/components/FileDropArea.js @@ -1,20 +1,15 @@ -import React from 'react'; -import PropTypes from 'prop-types'; -import { Box, Button, Grid } from '@mui/material'; -import ClearIcon from '@mui/icons-material/Clear'; -import FileDrop from './FileDrop'; -import getFileSize from '../../app/utilities/getFileSize'; +import React from "react"; +import PropTypes from "prop-types"; +import { Box, Button, Grid } from "@mui/material"; +import ClearIcon from "@mui/icons-material/Clear"; +import FileDrop from "./FileDrop"; +import getFileSize from "../../app/utilities/getFileSize"; const FileDropArea = (props) => { - const { - disabled, - setUploadFiles, - uploadFiles, - setAlert, - } = props; + const { disabled, setUploadFiles, uploadFiles, setAlert } = props; const removeFile = (removedFile) => { - const found = uploadFiles.findIndex((file) => (file === removedFile)); + const found = uploadFiles.findIndex((file) => file === removedFile); uploadFiles.splice(found, 1); setUploadFiles([...uploadFiles]); }; @@ -38,7 +33,7 @@ const FileDropArea = (props) => { type="button" id="trash-button" > - + @@ -57,20 +52,18 @@ const FileDropArea = (props) => {
{uploadFiles.length > 0 && ( - - - -

Filename

+ + + +

Filename

+
+ +

Size

+
+ + {uploadFiles.map((file) => FormRow(file))} - -

Size

-
- - {uploadFiles.map((file) => ( - FormRow(file) - ))} - -
+
)}
diff --git a/frontend/src/uploads/components/UploadPage.js b/frontend/src/uploads/components/UploadPage.js index 330ff4a4..6228e42b 100644 --- a/frontend/src/uploads/components/UploadPage.js +++ b/frontend/src/uploads/components/UploadPage.js @@ -1,11 +1,18 @@ -import PropTypes from 'prop-types'; -import React from 'react'; +import PropTypes from "prop-types"; +import React from "react"; import { - Box, Button, MenuItem, Select, Radio, RadioGroup, FormControlLabel, FormControl, -} from '@mui/material'; -import UploadIcon from '@mui/icons-material/Upload'; -import FileDropArea from './FileDropArea'; -import Loading from '../../app/components/Loading'; + Box, + Button, + MenuItem, + Select, + Radio, + RadioGroup, + FormControlLabel, + FormControl, +} from "@mui/material"; +import UploadIcon from "@mui/icons-material/Upload"; +import FileDropArea from "./FileDropArea"; +import Loading from "../../app/components/Loading"; const UploadPage = (props) => { const { @@ -34,19 +41,23 @@ const UploadPage = (props) => { {alertElement}
-

- Select Program     -

+

Select Program    

- {datasetSelected && } - + {datasetSelected && ( + + )}
@@ -58,13 +69,13 @@ const UploadPage = (props) => { defaultValue="add" > } label="Add to existing data (default)" /> } label="Replace existing data (data cannot be restored, proceed only if you are certain that the new file contains all required data)." @@ -74,13 +85,20 @@ const UploadPage = (props) => {
- + {loading ? ( ) : ( @@ -112,10 +130,8 @@ UploadPage.propTypes = { setUploadFiles: PropTypes.func.isRequired, doUpload: PropTypes.func.isRequired, setDatasetSelected: PropTypes.func.isRequired, - replaceData: PropTypes.oneOfType([ - PropTypes.string, - PropTypes.bool, - ]).isRequired, + replaceData: PropTypes.oneOfType([PropTypes.string, PropTypes.bool]) + .isRequired, handleRadioChange: PropTypes.func.isRequired, downloadSpreadsheet: PropTypes.func.isRequired, setAlert: PropTypes.func.isRequired, diff --git a/frontend/src/uploads/router.js b/frontend/src/uploads/router.js index 3575a73d..19d0271d 100644 --- a/frontend/src/uploads/router.js +++ b/frontend/src/uploads/router.js @@ -1,16 +1,12 @@ -import React from 'react'; -import { Route } from 'react-router-dom'; +import React from "react"; +import { Route } from "react-router-dom"; -import UploadContainer from './UploadContainer'; +import UploadContainer from "./UploadContainer"; -const Router = () => ([ - +const Router = () => [ + , -]); +]; export default Router; diff --git a/frontend/src/uploads/routes.js b/frontend/src/uploads/routes.js index 6d3299f5..3a41f837 100644 --- a/frontend/src/uploads/routes.js +++ b/frontend/src/uploads/routes.js @@ -1,10 +1,10 @@ -const API_BASE_PATH = '/api/uploads'; +const API_BASE_PATH = "/api/uploads"; const UPLOAD = { - MINIO_URL: '/api/minio/put', + MINIO_URL: "/api/minio/put", UPLOAD: `${API_BASE_PATH}/import_data`, LIST: `${API_BASE_PATH}/datasets_list`, // backend route for retrieving list of datasets (eg ldv_rebates) - DOWNLOAD_SPREADSHEET: `${API_BASE_PATH}/download_dataset` + DOWNLOAD_SPREADSHEET: `${API_BASE_PATH}/download_dataset`, }; export default UPLOAD; diff --git a/frontend/src/users/UsersContainer.js b/frontend/src/users/UsersContainer.js index 5f0381bc..38711305 100644 --- a/frontend/src/users/UsersContainer.js +++ b/frontend/src/users/UsersContainer.js @@ -1,50 +1,52 @@ -import { withRouter } from 'react-router-dom'; -import PropTypes from 'prop-types'; -import { Alert } from '@mui/material'; -import React, { useState, useEffect, useCallback } from 'react'; -import { produce } from 'immer'; -import ROUTES_USERS from './routes'; -import UsersPage from './components/UsersPage'; -import useAxios from '../app/utilities/useAxios'; -import AlertDialog from '../app/components/AlertDialog'; -import Loading from '../app/components/Loading'; +import { withRouter } from "react-router-dom"; +import PropTypes from "prop-types"; +import { Alert } from "@mui/material"; +import React, { useState, useEffect, useCallback } from "react"; +import { produce } from "immer"; +import ROUTES_USERS from "./routes"; +import UsersPage from "./components/UsersPage"; +import useAxios from "../app/utilities/useAxios"; +import AlertDialog from "../app/components/AlertDialog"; +import Loading from "../app/components/Loading"; const UsersContainer = (props) => { - const { - currentUser, - } = props; + const { currentUser } = props; const [loading, setLoading] = useState(false); const [users, setUsers] = useState([]); - const [newUser, setNewUser] = useState(''); - const [message, setMessage] = useState(''); - const [messageSeverity, setMessageSeverity] = useState(''); - const [userToDelete, setUserToDelete] = useState(''); + const [newUser, setNewUser] = useState(""); + const [message, setMessage] = useState(""); + const [messageSeverity, setMessageSeverity] = useState(""); + const [userToDelete, setUserToDelete] = useState(""); const [openDialog, setOpenDialog] = useState(false); const axios = useAxios(); const handleAddNewUser = () => { - axios.post(ROUTES_USERS.CREATE, { idir: newUser }) + axios + .post(ROUTES_USERS.CREATE, { idir: newUser }) .then((response) => { const userAdded = response.data.idir; - setMessageSeverity('success'); + setMessageSeverity("success"); setMessage(`${userAdded} was added to the user list`); - const userObject = { idir: userAdded, user_permissions: { admin: false, uploader: false } }; + const userObject = { + idir: userAdded, + user_permissions: { admin: false, uploader: false }, + }; setUsers( produce((draft) => { draft.push(userObject); draft.sort((a, b) => a.idir.localeCompare(b.idir)); }), ); - setNewUser('') + setNewUser(""); }) .catch((error) => { - setMessageSeverity('error'); - setMessage('new user could not be added, sorry!'); + setMessageSeverity("error"); + setMessage("new user could not be added, sorry!"); }); }; const handleCheckboxChange = useCallback((event) => { - setMessage(''); + setMessage(""); const idir = event.target.name; const permissionType = event.target.id; const { checked } = event.target; @@ -59,43 +61,47 @@ const UsersContainer = (props) => { const handleDeleteUserClick = (idir) => { setUserToDelete(idir); setOpenDialog(true); - } + }; const handleDeleteUser = () => { - axios.delete(ROUTES_USERS.DETAILS.replace(/:id/g, userToDelete)) + axios + .delete(ROUTES_USERS.DETAILS.replace(/:id/g, userToDelete)) .then((response) => { - setMessageSeverity('success'); + setMessageSeverity("success"); setMessage(`${userToDelete} was deleted from the user table`); setUsers( produce((draft) => { - const indexOfUserToRemove = draft.findIndex((user) => user.idir === userToDelete); + const indexOfUserToRemove = draft.findIndex( + (user) => user.idir === userToDelete, + ); draft.splice(indexOfUserToRemove, 1); }), ); }) .catch((error) => { - setMessageSeverity('error'); - setMessage('something went wrong when deleting the user, sorry!'); + setMessageSeverity("error"); + setMessage("something went wrong when deleting the user, sorry!"); }) .finally(() => { - setUserToDelete(''); - setOpenDialog(false) + setUserToDelete(""); + setOpenDialog(false); }); - } + }; const handleDeleteUserCancel = () => { - setUserToDelete(''); + setUserToDelete(""); setOpenDialog(false); - } + }; const handleSubmitUserUpdates = () => { - axios.put(ROUTES_USERS.UPDATE, users) + axios + .put(ROUTES_USERS.UPDATE, users) .then((response) => { - setMessageSeverity('success'); + setMessageSeverity("success"); setMessage(response.data); }) .catch((error) => { - setMessageSeverity('error'); + setMessageSeverity("error"); setMessage(error.data); }); }; @@ -109,20 +115,20 @@ const UsersContainer = (props) => { }, []); if (loading) { - return + return ; } return (
{message && {message}} - + { const { @@ -24,10 +30,32 @@ const UsersPage = (props) => { return ( - { handleCheckboxChange(event); }} /> - + { + handleCheckboxChange(event); + }} + /> + - { handleCheckboxChange(event); }} /> + { + handleCheckboxChange(event); + }} + /> @@ -35,14 +63,22 @@ const UsersPage = (props) => { {user.idir} - + { handleXClick(user.idir); }} + onClick={() => { + handleXClick(user.idir); + }} > - + @@ -58,20 +94,47 @@ const UsersPage = (props) => {

Admin

- - + + -

- IDIR Username -

+

IDIR Username

- { setNewUser(event.target.value); setMessage(''); }} /> + { + setNewUser(event.target.value); + setMessage(""); + }} + /> - + - @@ -81,7 +144,13 @@ const UsersPage = (props) => {
- +

Upload

@@ -89,11 +158,20 @@ const UsersPage = (props) => {

Admin

- {users.map((user) => ( - userRow(user) - ))} - - @@ -103,7 +181,7 @@ const UsersPage = (props) => { ); }; UsersPage.defaultProps = { - newUser: '', + newUser: "", }; UsersPage.propTypes = { diff --git a/frontend/src/users/routes.js b/frontend/src/users/routes.js index 6cb6c026..8d6d7793 100644 --- a/frontend/src/users/routes.js +++ b/frontend/src/users/routes.js @@ -1,4 +1,4 @@ -const API_BASE_PATH = '/api/users'; +const API_BASE_PATH = "/api/users"; const USERS = { LIST: API_BASE_PATH, From f4910f3d26160519254783d8ef7cbc9b247b41b1 Mon Sep 17 00:00:00 2001 From: JulianForeman <71847719+JulianForeman@users.noreply.github.com> Date: Mon, 22 Apr 2024 09:21:48 -0700 Subject: [PATCH 133/152] Task: Backend Tests for Uploader #165 (#276) * Writing backend tests for the uploader service * Another test * Successful upload test --- django/api/tests/__init__.py | 0 django/api/tests/test_spreadsheet_uploader.py | 230 ++++++++++++++++++ 2 files changed, 230 insertions(+) create mode 100644 django/api/tests/__init__.py create mode 100644 django/api/tests/test_spreadsheet_uploader.py diff --git a/django/api/tests/__init__.py b/django/api/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/django/api/tests/test_spreadsheet_uploader.py b/django/api/tests/test_spreadsheet_uploader.py new file mode 100644 index 00000000..f151678f --- /dev/null +++ b/django/api/tests/test_spreadsheet_uploader.py @@ -0,0 +1,230 @@ +import io +from decimal import Decimal +from django.test import TestCase +import pandas as pd +from api.models.scrap_it import ScrapIt +from api.services.spreadsheet_uploader import import_from_xls +from api.constants import ScrapItColumnMapping, ScrapItColumns +from api.services.spreadsheet_uploader_prep import prepare_scrap_it + +class UploadTests(TestCase): + def setUp(self): + + self.field_types = { + 'approval_number': int, + 'application_received_date': str, + 'completion_date': str, + 'postal_code': str, + 'vin': str, + 'application_city_fuel': Decimal, + 'incentive_type': str, + 'incentive_cost': Decimal, + 'cheque_number': str, + 'budget_code': str, + 'scrap_date': str + } + + def test_wrong_cell_types(self): + data = { + 'approval_number': [1], + 'application_recieved_date': ['Monday'], + 'completion_date': ['Tuesday'], + 'postal_code': ['ABCDEFG'], + 'vin': [123], + 'application_city_fuel': ['Zero Point One'], + 'incentive_type': ['A'], + 'incentive_cost': [0.50], + 'cheque_number': ['string'], + 'budget_code': ['string'], + 'scrap_date': ['string'] + } + + rename_columns = { + 'approval_number': 'Approval Num', + 'application_recieved_date': "App Recv'd Date", + 'completion_date': 'Completion Date', + 'postal_code': 'Postal Code', + 'vin': 'VIN', + 'application_city_fuel': 'App City Fuel', + 'incentive_type': 'Incentive Type', + 'incentive_cost': 'Incentive Cost', + 'cheque_number': 'Cheque #', + 'budget_code': 'Budget Code', + 'scrap_date': 'Scrap Date' + } + + df = pd.DataFrame(data) + df.rename(columns=rename_columns, inplace=True) + excel_buffer = io.BytesIO() + df.to_excel(excel_writer=excel_buffer, sheet_name='TOP OTHER TRANSACTIONS', index=False) + excel_buffer.seek(0) + + response = import_from_xls( + excel_file=excel_buffer, + sheet_name='TOP OTHER TRANSACTIONS', + model=ScrapIt, + dataset_columns=ScrapItColumns, + header_row=0, + column_mapping_enum=ScrapItColumnMapping, + field_types=self.field_types, + replace_data=False, + user='Tester', + preparation_functions=[prepare_scrap_it] + ) + + self.assertFalse(response['success']) + self.assertIn("Row 1: Incorrect type for 'vin'", response['errors'][0]) + self.assertIn("Row 1: Incorrect type for 'application_city_fuel'", response['errors'][1]) + + def test_missing_columns(self): + + data = { + 'approval_number': [1], + 'application_recieved_date': ['Monday'], + 'completion_date': ['Tuesday'], + 'postal_code': ['ABCDEFG'], + 'incentive_type': ['A'], + 'incentive_cost': [0.50], + 'cheque_number': ['string'], + 'budget_code': ['string'], + 'scrap_date': ['string'] + } + + rename_columns = { + 'approval_number': 'Approval Num', + 'application_recieved_date': "App Recv'd Date", + 'completion_date': 'Completion Date', + 'postal_code': 'Postal Code', + 'incentive_type': 'Incentive Type', + 'incentive_cost': 'Incentive Cost', + 'cheque_number': 'Cheque #', + 'budget_code': 'Budget Code', + 'scrap_date': 'Scrap Date' + } + + df = pd.DataFrame(data) + df.rename(columns=rename_columns, inplace=True) + excel_buffer = io.BytesIO() + df.to_excel(excel_writer=excel_buffer, sheet_name='TOP OTHER TRANSACTIONS', index=False) + excel_buffer.seek(0) + + response = import_from_xls( + excel_file=excel_buffer, + sheet_name='TOP OTHER TRANSACTIONS', + model=ScrapIt, + dataset_columns=ScrapItColumns, + header_row=0, + column_mapping_enum=ScrapItColumnMapping, + field_types=self.field_types, + replace_data=False, + user='Tester', + preparation_functions=[prepare_scrap_it] + ) + + self.assertFalse(response["success"]) + + self.assertIn("Missing columns: VIN, App City Fuel", response["errors"][0]) + + def test_missing_worksheet(self): + + data = { + 'approval_number': [1], + 'application_recieved_date': ['Monday'], + 'completion_date': ['Tuesday'], + 'postal_code': ['ABCDEFG'], + 'vin': ['string'], + 'application_city_fuel': [0.50], + 'incentive_type': ['A'], + 'incentive_cost': [0.50], + 'cheque_number': ['string'], + 'budget_code': ['string'], + 'scrap_date': ['string'] + } + + rename_columns = { + 'approval_number': 'Approval Num', + 'application_recieved_date': "App Recv'd Date", + 'completion_date': 'Completion Date', + 'postal_code': 'Postal Code', + 'vin': 'VIN', + 'application_city_fuel': 'App City Fuel', + 'incentive_type': 'Incentive Type', + 'incentive_cost': 'Incentive Cost', + 'cheque_number': 'Cheque #', + 'budget_code': 'Budget Code', + 'scrap_date': 'Scrap Date' + } + + df = pd.DataFrame(data) + df.rename(columns=rename_columns, inplace=True) + excel_buffer = io.BytesIO() + df.to_excel(excel_writer=excel_buffer, sheet_name='Wrong Sheet Name', index=False) + excel_buffer.seek(0) + + response = import_from_xls( + excel_file=excel_buffer, + sheet_name='TOP OTHER TRANSACTIONS', + model=ScrapIt, + dataset_columns=ScrapItColumns, + header_row=0, + column_mapping_enum=ScrapItColumnMapping, + field_types=self.field_types, + replace_data=False, + user='Tester', + preparation_functions=[prepare_scrap_it] + ) + + self.assertFalse(response["success"]) + self.assertIn("Worksheet named 'TOP OTHER TRANSACTIONS' not found", response["errors"][0]) + + def test_successful_upload(self): + + data = { + 'approval_number': [1], + 'application_recieved_date': ['Monday'], + 'completion_date': ['Tuesday'], + 'postal_code': ['ABCDEFG'], + 'vin': ['string'], + 'application_city_fuel': [0.50], + 'incentive_type': ['A'], + 'incentive_cost': [0.50], + 'cheque_number': ['string'], + 'budget_code': ['string'], + 'scrap_date': ['string'] + } + + rename_columns = { + 'approval_number': 'Approval Num', + 'application_recieved_date': "App Recv'd Date", + 'completion_date': 'Completion Date', + 'postal_code': 'Postal Code', + 'vin': 'VIN', + 'application_city_fuel': 'App City Fuel', + 'incentive_type': 'Incentive Type', + 'incentive_cost': 'Incentive Cost', + 'cheque_number': 'Cheque #', + 'budget_code': 'Budget Code', + 'scrap_date': 'Scrap Date' + } + + df = pd.DataFrame(data) + df.rename(columns=rename_columns, inplace=True) + excel_buffer = io.BytesIO() + df.to_excel(excel_writer=excel_buffer, sheet_name='TOP OTHER TRANSACTIONS', index=False) + excel_buffer.seek(0) + + response = import_from_xls( + excel_file=excel_buffer, + sheet_name='TOP OTHER TRANSACTIONS', + model=ScrapIt, + dataset_columns=ScrapItColumns, + header_row=0, + column_mapping_enum=ScrapItColumnMapping, + field_types=self.field_types, + replace_data=False, + user='Tester', + preparation_functions=[prepare_scrap_it] + ) + + self.assertTrue(response["success"]) + self.assertIn('All 1 records successfully inserted out of 1.', response['message']) \ No newline at end of file From 61368a1f93ba21fa1ac957b28a2e8a0655030b86 Mon Sep 17 00:00:00 2001 From: Emily <44536222+emi-hi@users.noreply.github.com> Date: Mon, 22 Apr 2024 09:26:40 -0700 Subject: [PATCH 134/152] task: backend tests for user permissions (#279) * chore: adds tests for user permissions * removes extra space --- README.md | 13 ++++ django/api/keycloak_authentication.py | 10 ++- django/api/settings.py | 2 +- django/api/tests/test_user.py | 89 +++++++++++++++++++++++++++ 4 files changed, 111 insertions(+), 3 deletions(-) create mode 100644 django/api/tests/test_user.py diff --git a/README.md b/README.md index a1be2b22..7b1a67cf 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,19 @@ The Clean Transportation Data Hub provides an evidence base for the Clean Transp - This is where you can make changes to your package.json - You can technically make changes to your packages without going into your container, but you'll need npm installed into your system + + - To run in testing mode + if you don't have docker-compose-local-dev.yml locally, create a new file and + add the contents from docker-compose plus a line for: + - KEYCLOAK_TESTING=True + in api environment + + to run using this docker file: + docker-compose -f docker-compose-local-dev.yml up + + this ensures that the authentication skips the actual keycloak authentication + and uses the user table to get permissions + # Rebasing Guide - To rebase your branch onto the latest release branch: - ```git fetch upstream``` diff --git a/django/api/keycloak_authentication.py b/django/api/keycloak_authentication.py index 7abd1d86..b532a5b3 100644 --- a/django/api/keycloak_authentication.py +++ b/django/api/keycloak_authentication.py @@ -2,15 +2,21 @@ from django.conf import settings from rest_framework import authentication, exceptions +from api.models.user import User class KeycloakAuthentication(authentication.BaseAuthentication): def authenticate(self, request): auth = request.headers.get("Authorization", None) - + if settings.KEYCLOAK_TESTING: + try: + user = User.objects.get(idir=auth['idir']) + return user.idir, None + except User.DoesNotExist as exc: + # print("Testing User does not exist") + raise User.DoesNotExist(str(exc)) if not auth: raise exceptions.AuthenticationFailed("Authorization token required") - try: scheme, token = auth.split() except ValueError: diff --git a/django/api/settings.py b/django/api/settings.py index 1a6ddbb1..73856b9d 100644 --- a/django/api/settings.py +++ b/django/api/settings.py @@ -167,7 +167,7 @@ KEYCLOAK_CLIENT_ID = os.getenv("KEYCLOAK_CLIENT_ID") KEYCLOAK_REALM = os.getenv("KEYCLOAK_REALM") KEYCLOAK_URL = os.getenv("KEYCLOAK_URL", "http://localhost:8080") - +KEYCLOAK_TESTING = os.getenv("KEYCLOAK_TESTING", False) MINIO_ACCESS_KEY = os.getenv("MINIO_ROOT_USER") MINIO_SECRET_KEY = os.getenv("MINIO_ROOT_PASSWORD") diff --git a/django/api/tests/test_user.py b/django/api/tests/test_user.py new file mode 100644 index 00000000..b4324f4a --- /dev/null +++ b/django/api/tests/test_user.py @@ -0,0 +1,89 @@ +from unittest.mock import MagicMock +from django.test import TestCase +from api.keycloak_authentication import KeycloakAuthentication +from rest_framework.test import APIRequestFactory +from rest_framework.response import Response +from rest_framework import status +from django.http import HttpResponse +from rest_framework import exceptions + +from api.models.permission import Permission +from api.models.user import User +from api.models.user_permission import UserPermission +from api.viewsets.user import UserViewSet +from api.viewsets.upload import UploadViewset +from api.decorators.permission import check_upload_permission +class TestUsers(TestCase): + def setUp(self): + self.factory = APIRequestFactory() + self.userauth = KeycloakAuthentication() + self.view = UserViewSet.as_view({'get': 'list'}) + #user with no permission + self.test_user = User.objects.create(idir='test_user') + + #user with admin permission + self.test_admin_permission = Permission.objects.create(description='admin') + self.test_admin_user = User.objects.create(idir='test_admin_user') + self.admin_user_permission = UserPermission.objects.create(user=self.test_admin_user, permission=self.test_admin_permission) + + #user with upload permission + self.test_upload_permission = Permission.objects.create(description='uploader') + self.test_upload_user = User.objects.create(idir='test_upload_user') + self.upload_user_permission = UserPermission.objects.create(user=self.test_upload_user, permission=self.test_upload_permission) + + + def test_get_user_list(self): + self.assertTrue(User.objects.filter(idir='test_user').exists()) + request = self.factory.get('/api/users/list') + request.META = { + 'HTTP_AUTHORIZATION': { + 'idir': 'test_user' + } + } + response = self.view(request) + self.assertEqual(response.status_code, 403) # Forbidden status code + + self.assertTrue(User.objects.filter(idir='test_upload_user').exists()) + request_uploader = self.factory.get('/api/users/list') + request_uploader.META = { + 'HTTP_AUTHORIZATION': { + 'idir': 'test_upload_user' + } + } + response = self.view(request_uploader) + self.assertEqual(response.status_code, 403) # Forbidden status code + + self.assertTrue(User.objects.filter(idir='test_admin_user').exists()) + request_admin = self.factory.get('/api/users/list') + request_admin.META = { + 'HTTP_AUTHORIZATION': { + 'idir': 'test_admin_user' + } + } + response = self.view(request_admin) + self.assertEqual(response.status_code, 200) # OK status code + def test_not_authenticated_user(self): + request = self.factory.get('/api/users/list') + request.META = { + 'HTTP_AUTHORIZATION': { + 'idir': 'test' + } + } + with self.assertRaises(User.DoesNotExist): + _user, _auth = self.userauth.authenticate(request) + + def test_upload_user_permissions(self): + # test decorator for user with upload permission + @check_upload_permission() + def mock_import_function(request): + return HttpResponse() + request = self.factory.post('/api/users/list') + request.user = 'test_upload_user' + response = mock_import_function(request) + self.assertEqual(response.status_code, 200) # OK status code + + request_admin = self.factory.post('/api/users/list') + request_admin.user = 'test_admin_user' + response = mock_import_function(request_admin) + self.assertEqual(response.status_code, 403) # Forbidden! + From 46111d148a37b6a4bdcf80472fe5feadfbf3047c Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Fri, 26 Apr 2024 13:35:25 -0700 Subject: [PATCH 135/152] cleanup knps: --- .../backup-container-2.6.1/cronjob.md | 22 ++ .../db-backup-cronjob-2.6.1.yaml | 253 ++++++++++++++++++ .../{knp-env-base.yaml => 1-knp-base.yaml} | 11 - .../templates/knp/2-allow-crunchy-accept.yaml | 72 +++++ openshift/templates/knp/README.md | 34 --- openshift/templates/knp/knp-diagram.drawio | 91 ++++++- openshift/templates/knp/knp-env-non-pr.yaml | 30 --- openshift/templates/knp/knp-env-pr.yaml | 192 ------------- openshift/templates/knp/knp-quick-start.yaml | 10 - 9 files changed, 437 insertions(+), 278 deletions(-) create mode 100644 openshift/templates/backup-container-2.6.1/cronjob.md create mode 100644 openshift/templates/backup-container-2.6.1/db-backup-cronjob-2.6.1.yaml rename openshift/templates/knp/{knp-env-base.yaml => 1-knp-base.yaml} (66%) create mode 100644 openshift/templates/knp/2-allow-crunchy-accept.yaml delete mode 100644 openshift/templates/knp/README.md delete mode 100644 openshift/templates/knp/knp-env-non-pr.yaml delete mode 100644 openshift/templates/knp/knp-env-pr.yaml diff --git a/openshift/templates/backup-container-2.6.1/cronjob.md b/openshift/templates/backup-container-2.6.1/cronjob.md new file mode 100644 index 00000000..25b81607 --- /dev/null +++ b/openshift/templates/backup-container-2.6.1/cronjob.md @@ -0,0 +1,22 @@ +# Cronjob prerequisites +Backup PVC: backup +KNP: allow CronJob to connect to Spilo + +# Create database backup cronjob +oc process -f ./db-backup-cronjob-2.6.1.yaml \ +JOB_NAME=cthub-db-backup \ +JOB_PERSISTENT_STORAGE_NAME=backup \ +SCHEDULE="00 07,21 * * *" \ +TAG_NAME=2.6.1 \ +DATABASE_SERVICE_NAME=cthub-test-crunchy-replicas \ +DATABASE_DEFAULT_PORT=5432 \ +DATABASE_NAME=cthub \ +DATABASE_DEPLOYMENT_NAME=cthub-patroni-app \ +DATABASE_USER_KEY_NAME=app-db-username \ +DATABASE_PASSWORD_KEY_NAME=app-db-password \ +BACKUP_STRATEGY=rolling \ +BACKUP_DIR=/backups \ +DAILY_BACKUPS=30 \ +WEEKLY_BACKUPS=8 \ +MONTHLY_BACKUPS=2 | oc apply -f - -n 30b186-test + diff --git a/openshift/templates/backup-container-2.6.1/db-backup-cronjob-2.6.1.yaml b/openshift/templates/backup-container-2.6.1/db-backup-cronjob-2.6.1.yaml new file mode 100644 index 00000000..e36f9628 --- /dev/null +++ b/openshift/templates/backup-container-2.6.1/db-backup-cronjob-2.6.1.yaml @@ -0,0 +1,253 @@ +--- +kind: "Template" +apiVersion: "template.openshift.io/v1" +metadata: + name: "{$JOB_NAME}-cronjob-template" + annotations: + description: "Scheduled Task to perform a Database Backup" + tags: "cronjob,backup" +parameters: + - name: "JOB_NAME" + displayName: "Job Name" + description: "Name of the Scheduled Job to Create." + value: "backup-postgres" + required: true + - name: "JOB_PERSISTENT_STORAGE_NAME" + displayName: "Backup Persistent Storage Name" + description: "Pre-Created PVC to use for backup target" + value: "bk-devex-von-tools-a9vlgd1jpsg1" + required: true + - name: "SCHEDULE" + displayName: "Cron Schedule" + description: "Cron Schedule to Execute the Job (using local cluster system TZ)" + # Currently targeting 1:00 AM Daily + value: "0 1 * * *" + required: true + - name: "SOURCE_IMAGE_NAME" + displayName: "Source Image Name" + description: "The name of the image to use for this resource." + required: true + value: "backup-container" + - name: "IMAGE_REGISTRY" + description: "The base OpenShift docker registry" + displayName: "Docker Image Registry" + required: true + # Set value to "docker-registry.default.svc:5000" if using OCP3 + value: "docker.io" + - name: "IMAGE_NAMESPACE" + displayName: "Image Namespace" + description: "The namespace of the OpenShift project containing the imagestream for the application." + required: true + value: "bcgovimages" + - name: "TAG_NAME" + displayName: "Environment TAG name" + description: "The TAG name for this environment, e.g., dev, test, prod" + required: true + value: "dev" + - name: "DATABASE_SERVICE_NAME" + displayName: "Database Service Name" + description: "The name of the database service." + required: true + value: "postgresql" + - name: "DATABASE_DEFAULT_PORT" + displayName: "Database Service Port" + description: "The configured port for the database service" + required: true + value: "5432" + - name: "DATABASE_NAME" + displayName: "Database Name" + description: "The name of the database." + required: true + value: "MyDatabase" + - name: "DATABASE_DEPLOYMENT_NAME" + displayName: "Database Deployment Name" + description: "The name associated to the database deployment resources. In particular, this is used to wire up the credentials associated to the database." + required: true + value: "postgresql" + - name: DATABASE_USER_KEY_NAME + displayName: Database User Key Name + description: + The database user key name stored in database deployment resources specified + by DATABASE_DEPLOYMENT_NAME. + required: true + value: database-user + - name: DATABASE_PASSWORD_KEY_NAME + displayName: Database Password Key Name + description: + The database password key name stored in database deployment resources + specified by DATABASE_DEPLOYMENT_NAME. + required: true + value: database-password + - name: "BACKUP_STRATEGY" + displayName: "Backup Strategy" + description: "The strategy to use for backups; for example daily, or rolling." + required: true + value: "rolling" + - name: "BACKUP_DIR" + displayName: "The root backup directory" + description: "The name of the root backup directory" + required: true + value: "/backups/" + - name: "NUM_BACKUPS" + displayName: "The number of backup files to be retained" + description: "The number of backup files to be retained. Used for the `daily` backup strategy. Ignored when using the `rolling` backup strategy." + required: false + value: "5" + - name: "DAILY_BACKUPS" + displayName: "Number of Daily Backups to Retain" + description: "The number of daily backup files to be retained. Used for the `rolling` backup strategy." + required: false + value: "7" + - name: "WEEKLY_BACKUPS" + displayName: "Number of Weekly Backups to Retain" + description: "The number of weekly backup files to be retained. Used for the `rolling` backup strategy." + required: false + value: "4" + - name: "MONTHLY_BACKUPS" + displayName: "Number of Monthly Backups to Retain" + description: "The number of monthly backup files to be retained. Used for the `rolling` backup strategy." + required: false + value: "1" + - name: "JOB_SERVICE_ACCOUNT" + displayName: "Service Account Name" + description: "Name of the Service Account To Exeucte the Job As." + value: "default" + required: true + - name: "SUCCESS_JOBS_HISTORY_LIMIT" + displayName: "Successful Job History Limit" + description: "The number of successful jobs that will be retained" + value: "5" + required: true + - name: "FAILED_JOBS_HISTORY_LIMIT" + displayName: "Failed Job History Limit" + description: "The number of failed jobs that will be retained" + value: "2" + required: true + - name: "JOB_BACKOFF_LIMIT" + displayName: "Job Backoff Limit" + description: "The number of attempts to try for a successful job outcome" + value: "0" + required: false +objects: +- kind: ConfigMap + apiVersion: v1 + metadata: + name: "${JOB_NAME}-config" + labels: + template: "${JOB_NAME}-config-template" + cronjob: "${JOB_NAME}" + data: + DATABASE_SERVICE_NAME: "${DATABASE_SERVICE_NAME}" + DEFAULT_PORT: "${DATABASE_DEFAULT_PORT}" + POSTGRESQL_DATABASE: "${DATABASE_NAME}" + # BACKUP_STRATEGY: "daily" + BACKUP_STRATEGY: "rolling" + RETENTION.NUM_BACKUPS: "${NUM_BACKUPS}" + RETENTION.DAILY_BACKUPS: "${DAILY_BACKUPS}" + RETENTION.WEEKLY_BACKUPS: "${WEEKLY_BACKUPS}" + RETENTION.MONTHLY_BACKUPS: "${MONTHLY_BACKUPS}" + +- kind: "CronJob" + apiVersion: "batch/v1" + metadata: + name: "${JOB_NAME}" + labels: + template: "${JOB_NAME}-cronjob" + cronjob: "${JOB_NAME}" + spec: + schedule: "${SCHEDULE}" + concurrencyPolicy: "Forbid" + successfulJobsHistoryLimit: "${{SUCCESS_JOBS_HISTORY_LIMIT}}" + failedJobsHistoryLimit: "${{FAILED_JOBS_HISTORY_LIMIT}}" + jobTemplate: + metadata: + labels: + template: "${JOB_NAME}-job" + cronjob: "${JOB_NAME}" + spec: + backoffLimit: ${{JOB_BACKOFF_LIMIT}} + template: + metadata: + labels: + template: "${JOB_NAME}-job" + cronjob: "${JOB_NAME}" + spec: + containers: + - name: "${JOB_NAME}-cronjob" + image: "${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/${SOURCE_IMAGE_NAME}:${TAG_NAME}" + # image: backup + command: + - "/bin/bash" + - "-c" + - "/backup.sh -1" + volumeMounts: + - mountPath: "${BACKUP_DIR}" + name: "backup" + env: + - name: BACKUP_DIR + value: "${BACKUP_DIR}/db-backups-by-cron/" + - name: BACKUP_STRATEGY + valueFrom: + configMapKeyRef: + name: "${JOB_NAME}-config" + key: BACKUP_STRATEGY + - name: NUM_BACKUPS + valueFrom: + configMapKeyRef: + name: "${JOB_NAME}-config" + key: RETENTION.NUM_BACKUPS + optional: true + - name: DAILY_BACKUPS + valueFrom: + configMapKeyRef: + name: "${JOB_NAME}-config" + key: RETENTION.DAILY_BACKUPS + optional: true + - name: WEEKLY_BACKUPS + valueFrom: + configMapKeyRef: + name: "${JOB_NAME}-config" + key: RETENTION.WEEKLY_BACKUPS + optional: true + - name: MONTHLY_BACKUPS + valueFrom: + configMapKeyRef: + name: "${JOB_NAME}-config" + key: RETENTION.MONTHLY_BACKUPS + optional: true + - name: DATABASE_SERVICE_NAME + valueFrom: + configMapKeyRef: + name: "${JOB_NAME}-config" + key: DATABASE_SERVICE_NAME + - name: DEFAULT_PORT + valueFrom: + configMapKeyRef: + name: "${JOB_NAME}-config" + key: DEFAULT_PORT + optional: true + - name: POSTGRESQL_DATABASE + valueFrom: + configMapKeyRef: + name: "${JOB_NAME}-config" + key: POSTGRESQL_DATABASE + - name: DATABASE_USER + valueFrom: + secretKeyRef: + name: "${DATABASE_DEPLOYMENT_NAME}" + key: "${DATABASE_USER_KEY_NAME}" + - name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: "${DATABASE_DEPLOYMENT_NAME}" + key: "${DATABASE_PASSWORD_KEY_NAME}" + volumes: + - name: backup + persistentVolumeClaim: + claimName: "${JOB_PERSISTENT_STORAGE_NAME}" + restartPolicy: "Never" + terminationGracePeriodSeconds: 30 + activeDeadlineSeconds: 1600 + dnsPolicy: "ClusterFirst" + serviceAccountName: "${JOB_SERVICE_ACCOUNT}" + serviceAccount: "${JOB_SERVICE_ACCOUNT}" diff --git a/openshift/templates/knp/knp-env-base.yaml b/openshift/templates/knp/1-knp-base.yaml similarity index 66% rename from openshift/templates/knp/knp-env-base.yaml rename to openshift/templates/knp/1-knp-base.yaml index 0b75d846..b02d498f 100644 --- a/openshift/templates/knp/knp-env-base.yaml +++ b/openshift/templates/knp/1-knp-base.yaml @@ -6,17 +6,6 @@ labels: metadata: name: cthub-network-policy objects: - ## Base Network Policies - - kind: NetworkPolicy - apiVersion: networking.k8s.io/v1 - metadata: - name: deny-by-default - spec: - # The default posture for a security first namespace is to - # deny all traffic. If not added this rule will be added - # by Platform Services during environment cut-over. - podSelector: {} - ingress: [] - apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: diff --git a/openshift/templates/knp/2-allow-crunchy-accept.yaml b/openshift/templates/knp/2-allow-crunchy-accept.yaml new file mode 100644 index 00000000..c5565417 --- /dev/null +++ b/openshift/templates/knp/2-allow-crunchy-accept.yaml @@ -0,0 +1,72 @@ +--- +apiVersion: template.openshift.io/v1 +kind: Template +labels: + template: cthub-network-policy +metadata: + name: allow-crunchy-accept +parameters: + - name: ENVIRONMENT + displayName: null + description: such as dev, test or prod + required: true +objects: + - apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: allow-crunchy-accept + spec: + podSelector: + matchLabels: + postgres-operator.crunchydata.com/cluster: cthub-${ENVIRONMENT}-crunchy + ingress: + - from: + - podSelector: + matchLabels: + postgres-operator.crunchydata.com/cluster: cthub-${ENVIRONMENT}-crunchy + - ports: + - protocol: TCP + port: 5432 + from: + - podSelector: + matchLabels: + app.kubernetes.io/instance: cthub-${ENVIRONMENT} + app.kubernetes.io/name: backend + - ports: + - protocol: TCP + port: 5432 + from: + - podSelector: + matchLabels: + openshift.io/deployer-pod.type: hook-mid + - ports: + - protocol: TCP + port: 5432 + from: + - podSelector: + matchLabels: + cronjob: cthub-db-backup + - ports: + - protocol: TCP + port: 9187 + from: + - namespaceSelector: + matchLabels: + environment: tools + name: 0ab226 + - podSelector: + matchLabels: + name: crunchy-prometheus + - ports: + - protocol: TCP + port: 5432 + from: + - namespaceSelector: + matchLabels: + environment: ${ENVIRONMENT} + name: 30b186 + - podSelector: + matchLabels: + app: metabase + policyTypes: + - Ingress \ No newline at end of file diff --git a/openshift/templates/knp/README.md b/openshift/templates/knp/README.md deleted file mode 100644 index 0e2bf6dd..00000000 --- a/openshift/templates/knp/README.md +++ /dev/null @@ -1,34 +0,0 @@ - -## For Aporeto network security policies - -### remove all Aporeto network security policies -oc get nsp -n -oc delete nsp,en --all -n - -### Apply generic Aporeto network security policies -oc process -f nsp-generic.yaml NAMESPACE_PREFIX= ENVIRONMENT= | oc apply -f - -n -Note: once it is applied, the application will NOT be blocked by Aporeto. Aporeto should become transparent. - -## For the new network policies - -### For tools project, apply quick start -oc process -f knp-quick-start.yaml NAMESPACE_PREFIX= ENVIRONMENT= | oc apply -f - -n -Note : the quick start include three knps: deny-by-default, allow-from-openshift-ingress and allow-all-internal. Once the quick start is applied, the application will NOT be blocked by Openshift network policies. - -### For environment projects -oc process -f knp-env-base.yaml ENVIRONMENT= | oc create -f - -n -oc process -f knp-env-non-pr.yaml ENVIRONMENT= | oc create -f - -n -#### For Dev -Apply knp-env-pr.yaml through pipeline -#### For Test and Prod -oc process -f knp-env-pr.yaml SUFFIX=-test ENVIRONMENT=test | oc create -f - -n -oc process -f knp-env-pr.yaml SUFFIX=-prod ENVIRONMENT=prod | oc create -f - -n - -## Setup the new network policies on Test -oc get nsp -n e52f12-test -oc delete nsp,en --all -n e52f12-test -oc process -f nsp-generic.yaml NAMESPACE_PREFIX=e52f12 ENVIRONMENT=test | oc apply -f - -n e52f12-test -oc process -f knp-env-base.yaml ENVIRONMENT=test | oc create -f - -n e52f12-test -oc process -f knp-env-non-pr.yaml ENVIRONMENT=test | oc create -f - -n e52f12-test -oc process -f knp-env-pr.yaml SUFFIX=-test ENVIRONMENT=test | oc create -f - -n e52f12-test - \ No newline at end of file diff --git a/openshift/templates/knp/knp-diagram.drawio b/openshift/templates/knp/knp-diagram.drawio index b00c787c..2a5f2086 100644 --- a/openshift/templates/knp/knp-diagram.drawio +++ b/openshift/templates/knp/knp-diagram.drawio @@ -1 +1,90 @@ -3Vtbe6I8EP41XtqHU0Avrfa0X9vtud3eRYhKi4RCqNpf/wUJKEm0ukVhfXohGUKAeefwzoQ29O54ehbCYHSFHeQ1NMWZNvReQ9NU1bToTyKZMUlLaaWSYeg6TLYQ3LtfiAkVJo1dB0WFiQRjj7hBUWhj30c2KchgGOJJcdoAe8W7BnCIBMG9DT1R+uw6ZJRKW0BZyM+ROxxld1YVdmYMs8lMEI2ggydLIv2koXdDjEl6NJ52kZdoL9NLet3pirP5g4XIJ5tc0Dder7UrcDe4+dP5uL16O+/qTlMF7OHILHtj5FAFsCEOyQgPsQ+9k4X0OMSx76BkWYWOFnMuMQ6oUKXCN0TIjKEJY4KpaETGHjuLpi55WTr+kyx1BNioN2UrzwczNohIiN9RF3s4nD+nrqj0T6dnREUw3UQ4Dm207u2ZQcFwiMiaeXo6L9HM0g2Yms8QHiMSzuiEEHmQuJ9F04HMAof5vAVI9IDhtA1m6bqf0IvZnQYh9gnynYZmevQ9jvshPRomR7qS2CMvBYqiibh7HnWqBN/JyCXoPoBz3U2oYxfRg1GQetrAnSZWsBqATxQSNF2rsuysybyExYmmwcaTJadjotGSv2Wy0pWsmVU6hrLkGOre3ULf0C3UevmFZtUmltUWslatENOFSNaH9rs0kFHeoIjwVhmydAMUQlY2rCpitQRlBpBamu/KlJmwMCErGHrNsgJQi1lBtfaXFXrXX0ow6YNz82LSih9/wc4jalYTYXz66HlWSAZLfCkZLsLMfLRTxqTUPdBcA//s+N2+7Uw+Tn5fniLz7XmQUaa6Z4YcaXU7pDfC8+0VvIyH7xeXD2/urXvT/2hOQNOoigHPL+2EIZwtTQiw65NoaeWbRLCIB4ZVjAcG4Eqeb+a3AGdJ6QMs7Cp/kx+wc0Wa1OKgSUtUAl0fhfUKsjrgg6x2JKYyC4hhNpOVX+GIxGDs+i4Wc1ZbUWpGC9Q2p852xTmrmthXVs5qWz3Fsn6Us7RNc1Zl5YwUN70uOWt9AfpPIl1+5fpX+YwLFZShlZqfpGZVacNv875G3c1qHZuqmiVxJMkql/VIraqablnNbWRd97ZiGzH3wIQ1kQmjKUEhhbhelE0vOgwQCZslIWxWCYStPX19mt0N8ezVB4/x5eShcwEkXTFBXXYcfs49J3UDp5PsdNGh7cEocu3ScvqWdeguOg6tDf1q04bDEqpAgmom2879BP8CfGpXOGtJ35tdtaaEBSa/kFlcKFWMsNDWAUHj7mOuL61NQ/6CK+frW85XuDKqVS43kubvaii33Nl262rS19dKdrXS66R1Ty1uIjTH9LpaJRreCfIUuYfegFR31dK2gsGrFRj8pt3Pehm8IRh8ZI/QmBrs7J/YN+M2dQzJVv+u+BY4V4xxv4/iG+tX+IQfev7LneR7ChhQPXSozCajuN+c9x6bDvoUFElfmRQ1VjRTH/uJlgeu53Ei6LlDP2FsVJGIyo8TBbo29DrsBA1ejrcKoqIPlgCKCoqbmZpkp02V8SUtbxaXjovIg304RsvAZIGeQtNsHy44efzPuJAuAUeWN7RduYwYgmwvjqiumguI2O7zHBwAxJ3Tg4HHaMmpcwEeWUTbGTymAA9NB3CcvL3fj+Y/AkzpJtXhoqTzBY6kzFd1yS7XzlCy1sS3PKM3g7jvufbhe5HGFbr5tx/L+LT36UXtb/NP9t3loScglfMdCWPbb4CTbLLjAPnRyB2Qo2Sf+NRBgYdnNCEF2Dkis4DhNsL4XVoXHgxWgNvOl31Jm3c8ywbrP+cuCp7Puq3p0wv+reCrh8uOhF/TdWEf0gKk7MJEUJNEmSs1Z3FWrpqW5EOIXdXnUtWJvQ1RZd90f4sGJtuI++7L+599lcQVzd8W6utMaLNCvaqer8G3LNucWWza8zX4L3IMbqEVPd+/aINKtS2y+7wgXum5YvzcPC6GKHK/YH++VGI4rFdN1wXHDdBL1ooJjtJ+0jaheQdBQUYbS2IldLj4b6AUy8U/Vekn/wM= \ No newline at end of file + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/openshift/templates/knp/knp-env-non-pr.yaml b/openshift/templates/knp/knp-env-non-pr.yaml deleted file mode 100644 index 4d511845..00000000 --- a/openshift/templates/knp/knp-env-non-pr.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- -apiVersion: template.openshift.io/v1 -kind: Template -labels: - template: cthub-network-policy -metadata: - name: cthub-network-policy -parameters: - - name: ENVIRONMENT - displayName: null - description: such as dev, test or prod - required: true -objects: - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-minio-accepts-nagios - spec: - ## Allow minio to accept communication from nagios - podSelector: - matchLabels: - app: cthub-minio-${ENVIRONMENT} - ingress: - - from: - - podSelector: - matchLabels: - app: nagios - ports: - - protocol: TCP - port: 9000 \ No newline at end of file diff --git a/openshift/templates/knp/knp-env-pr.yaml b/openshift/templates/knp/knp-env-pr.yaml deleted file mode 100644 index c6da19ba..00000000 --- a/openshift/templates/knp/knp-env-pr.yaml +++ /dev/null @@ -1,192 +0,0 @@ ---- -apiVersion: template.openshift.io/v1 -kind: Template -labels: - template: cthub-network-policy -metadata: - name: cthub-network-policy -parameters: - - name: SUFFIX - displayName: null - description: sample is -dev-97 - required: true - - name: ENVIRONMENT - displayName: null - description: such as dev, test or prod - required: true -objects: - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-backend-accepts-frontend${SUFFIX} - spec: - ## Allow backend to accept communication from frontend - podSelector: - matchLabels: - name: cthub-backend${SUFFIX} - ingress: - - from: - - podSelector: - matchLabels: - name: cthub-frontend${SUFFIX} - ports: - - protocol: TCP - port: 8080 - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-patroni-accepts-backend${SUFFIX} - spec: - ## Allow patroni to accept communications from backend - podSelector: - matchLabels: - cluster-name: patroni${SUFFIX} - ingress: - - from: - - podSelector: - matchLabels: - name: cthub-backend${SUFFIX} - ports: - - protocol: TCP - port: 5432 - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-minio-accepts-backend${SUFFIX} - spec: - ## Allow minio to accept communication from backend - podSelector: - matchLabels: - app: cthub-minio-${ENVIRONMENT} - ingress: - - from: - - podSelector: - matchLabels: - name: cthub-backend${SUFFIX} - ports: - - protocol: TCP - port: 9000 - ## Other Network Policies - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-patroni-accepts-backendmid${SUFFIX} - spec: - ## Allow patroni to accept communications from backend mid lifecycle pod - podSelector: - matchLabels: - cluster-name: patroni${SUFFIX} - ingress: - - from: - - podSelector: - matchLabels: - openshift.io/deployer-pod.type: hook-mid - ports: - - protocol: TCP - port: 5432 - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-patroni-accepts-patroni-itself${SUFFIX} - spec: - ## Allow patroni to accept communications from other patroni pods - podSelector: - matchLabels: - cluster-name: patroni${SUFFIX} - ingress: - - from: - - podSelector: - matchLabels: - cluster-name: patroni${SUFFIX} - ports: - - protocol: TCP - port: 5432 - - protocol: TCP - port: 8008 - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-patroni-accepts-backup-container${SUFFIX} - spec: - ## Allow patroni to accept communications from backup container - podSelector: - matchLabels: - cluster-name: patroni${SUFFIX} - ingress: - - from: - - podSelector: - matchLabels: - name: patroni-backup - ports: - - protocol: TCP - port: 5432 - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-patroni-accepts-nagios${SUFFIX} - spec: - ## Allow patroni to accept communications from nagios - podSelector: - matchLabels: - cluster-name: patroni${SUFFIX} - ingress: - - from: - - podSelector: - matchLabels: - app: nagios - ports: - - protocol: TCP - port: 5432 - - protocol: TCP - port: 8008 - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-patroni-accepts-schemaspy${SUFFIX} - spec: - ## Allow patroni to accept communications from schemaspy - podSelector: - matchLabels: - cluster-name: patroni${SUFFIX} - ingress: - - from: - - podSelector: - matchLabels: - name: schemaspy-public${SUFFIX} - ports: - - protocol: TCP - port: 5432 - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-patroni-accepts-metabase - spec: - ## Allow patroni to accept communications from backup container - podSelector: - matchLabels: - cluster-name: patroni${SUFFIX} - ingress: - - from: - - podSelector: - matchLabels: - app: metabase - ports: - - protocol: TCP - port: 5432 - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-backend-accepts-schemaspy${SUFFIX} - spec: - ## Allow backend to accept communication from schemaspy - podSelector: - matchLabels: - name: cthub-backend${SUFFIX} - ingress: - - from: - - podSelector: - matchLabels: - name: schemaspy-public${SUFFIX} - ports: - - protocol: TCP - port: 8080 \ No newline at end of file diff --git a/openshift/templates/knp/knp-quick-start.yaml b/openshift/templates/knp/knp-quick-start.yaml index e7a97a6b..7e121cb0 100644 --- a/openshift/templates/knp/knp-quick-start.yaml +++ b/openshift/templates/knp/knp-quick-start.yaml @@ -15,16 +15,6 @@ parameters: description: the namespace prefix required: true objects: - - kind: NetworkPolicy - apiVersion: networking.k8s.io/v1 - metadata: - name: deny-by-default - spec: - # The default posture for a security first namespace is to - # deny all traffic. If not added this rule will be added - # by Platform Services during environment cut-over. - podSelector: {} - ingress: [] - apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: From 22dd264d1df841660467e923163ae326def10632 Mon Sep 17 00:00:00 2001 From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Date: Fri, 26 Apr 2024 14:31:58 -0700 Subject: [PATCH 136/152] Cleanup 0.2.0 (#286) --- openshift/templates/cleanup/Dockerfile | 4 + .../templates/cleanup/cleanup-bc-docker.yaml | 52 ++++++++ openshift/templates/cleanup/cleanup-cron.yaml | 111 ++++++++++++++++++ openshift/templates/cleanup/readme.md | 13 ++ 4 files changed, 180 insertions(+) create mode 100644 openshift/templates/cleanup/Dockerfile create mode 100644 openshift/templates/cleanup/cleanup-bc-docker.yaml create mode 100644 openshift/templates/cleanup/cleanup-cron.yaml create mode 100644 openshift/templates/cleanup/readme.md diff --git a/openshift/templates/cleanup/Dockerfile b/openshift/templates/cleanup/Dockerfile new file mode 100644 index 00000000..876880aa --- /dev/null +++ b/openshift/templates/cleanup/Dockerfile @@ -0,0 +1,4 @@ +FROM registry.redhat.io/openshift4/ose-cli +RUN mkdir /.kube && \ + chgrp -R root /.kube && \ + chmod -R g+w /.kube diff --git a/openshift/templates/cleanup/cleanup-bc-docker.yaml b/openshift/templates/cleanup/cleanup-bc-docker.yaml new file mode 100644 index 00000000..dad1c7ca --- /dev/null +++ b/openshift/templates/cleanup/cleanup-bc-docker.yaml @@ -0,0 +1,52 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: frontend +parameters: + - name: GIT_URL + displayName: + description: cthub repo + required: true + - name: GIT_REF + displayName: + description: cthub branch name of the pr + required: true +objects: + - apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: cleanup + creationTimestamp: null + name: cthub-cleanup + spec: + lookupPolicy: + local: false + - apiVersion: build.openshift.io/v1 + kind: BuildConfig + metadata: + name: cthub-cleanup + creationTimestamp: + spec: + output: + to: + kind: ImageStreamTag + name: cthub-cleanup:prod + resources: + limits: + cpu: 1500m + memory: 1300Mi + requests: + cpu: 750m + memory: 650Mi + source: + contextDir: openshift/templates/cleanup + git: + uri: ${GIT_URL} + ref: ${GIT_REF} + type: Git + strategy: + type: Docker + dockerStrategy: + dockerfilePath: Dockerfile diff --git a/openshift/templates/cleanup/cleanup-cron.yaml b/openshift/templates/cleanup/cleanup-cron.yaml new file mode 100644 index 00000000..4ef48112 --- /dev/null +++ b/openshift/templates/cleanup/cleanup-cron.yaml @@ -0,0 +1,111 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: cthub-cleanup +parameters: + - name: LICENSE_PLATE + description: license plate for the projec + required: true + - name: LOGIN_TOKEN_SECRET + description: The secret having the login token + required: true +objects: + - kind: CronJob + apiVersion: batch/v1 + metadata: + name: cthub-cleanup + spec: + schedule: 0 7 * * * + concurrencyPolicy: Forbid + suspend: false + jobTemplate: + metadata: + creationTimestamp: null + spec: + template: + metadata: + creationTimestamp: null + spec: + containers: + - resources: + limits: + cpu: 100m + memory: 100Mi + requests: + cpu: 50m + memory: 50Mi + terminationMessagePath: /dev/termination-log + name: oc + command: + - /bin/sh + - "-c" + env: + - name: LOGIN_TOKEN + valueFrom: + secretKeyRef: + name: ${LOGIN_TOKEN_SECRET} + key: token + imagePullPolicy: Always + terminationMessagePolicy: File + image: >- + image-registry.openshift-image-registry.svc:5000/${LICENSE_PLATE}-tools/cthub-cleanup:prod + args: + - > + date + + oc login --token=$(LOGIN_TOKEN) --server=https://api.silver.devops.gov.bc.ca:6443 + + oc version + + echo "" + + echo "====> Cleaning up ${LICENSE_PLATE}-tools" + + echo "==========> Removing expired builds" + + oc -n ${LICENSE_PLATE}-tools get builds | grep -E "Complete|Failed|Cancelled" | awk '{print $1}' | xargs oc -n ${LICENSE_PLATE}-tools delete build || true + + echo "==========> Removing expired frontend and backend image tags" + + oc -n ${LICENSE_PLATE}-tools get imagetags | grep -E "cthub-frontend|cthub-backend" | awk '{print $1}' | xargs oc -n ${LICENSE_PLATE}-tools delete imagetag || true + + echo "==========> Removing expired pods" + + oc -n ${LICENSE_PLATE}-tools get pods | grep -E "Completed|Error|ContainerStatusUnknown" | grep -v crunchy | grep -v spilo | awk '{print $1}' | xargs oc -n ${LICENSE_PLATE}-tools delete pod || true + + namespaces=("${LICENSE_PLATE}-dev" "${LICENSE_PLATE}-test") + + for namespace in "${namespaces[@]}"; do + + echo "" + + echo "====> Cleaning up $namespace" + + echo "==========> Removing expired pods" + + oc -n $namespace get pods | grep -E "Completed|Error|ContainerStatusUnknown" | grep -v crunchy | grep -v spilo | grep -v backup | awk '{print $1}' | xargs oc -n $namespace delete pod || true + + env=$(echo $namespace | awk -F '-' '{print $NF}') + + runningBackendImageTag=$(oc -n $namespace describe dc/cthub-$env-backend | grep Image | awk -F ':' '{print $4}') + + echo "==========> Removing expired backend image tags except cthub-backend:$runningBackendImageTag" + + oc -n $namespace get imagetags | grep cthub-backend | grep -v $runningBackendImageTag | awk '{print $1}' | xargs oc -n $namespace delete imagetag || true + + runningFrontendImageTag=$(oc -n $namespace describe deployment/cthub-$env-frontend| grep Image | awk -F ':' '{print $4}') + + echo "==========> Removing expired frontend image tags except cthub-frontend:$runningFrontendImageTag" + + oc -n $namespace get imagetags | grep cthub-frontend | grep -v $runningFrontendImageTag | awk '{print $1}' | xargs oc -n $namespace delete imagetag || true + + done + + restartPolicy: OnFailure + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 1 diff --git a/openshift/templates/cleanup/readme.md b/openshift/templates/cleanup/readme.md new file mode 100644 index 00000000..52896d76 --- /dev/null +++ b/openshift/templates/cleanup/readme.md @@ -0,0 +1,13 @@ +# Cleanup Cron Job + +## cleanup-bc-docker.yaml + +The build config to build a clean up image base on Openshift4 oc client image + +## cleanup-cron.yaml + +The Openshift Cron Job to run periodically to clean up unused resource on in CTHUB spaces + +## Dockerfile + +The Dockerfile to build a new image on top of registry.redhat.io/openshift4/ose-cli From 68c76c4f370340149ad57d4a9f912a878992b234 Mon Sep 17 00:00:00 2001 From: JulianForeman <71847719+JulianForeman@users.noreply.github.com> Date: Wed, 8 May 2024 11:16:47 -0700 Subject: [PATCH 137/152] Adding test for changing a user's permissions (#290) --- django/api/tests/test_user.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/django/api/tests/test_user.py b/django/api/tests/test_user.py index b4324f4a..59711280 100644 --- a/django/api/tests/test_user.py +++ b/django/api/tests/test_user.py @@ -12,7 +12,7 @@ from api.models.user_permission import UserPermission from api.viewsets.user import UserViewSet from api.viewsets.upload import UploadViewset -from api.decorators.permission import check_upload_permission +from api.decorators.permission import check_upload_permission, check_admin_permission class TestUsers(TestCase): def setUp(self): self.factory = APIRequestFactory() @@ -87,3 +87,17 @@ def mock_import_function(request): response = mock_import_function(request_admin) self.assertEqual(response.status_code, 403) # Forbidden! + def test_change_user_permissions(self): + @check_admin_permission() + def mock_change_permission_function(request): + return HttpResponse() + request = self.factory.post('/api/users/update_permissions') + request.user = 'test_admin_user' + response = mock_change_permission_function(request) + self.assertEqual(response.status_code, 200) + + request_uploader = self.factory.post('/api/users/update_permissions') + request_uploader.user = 'test_upload_user' + response = mock_change_permission_function(request_uploader) + self.assertEqual(response.status_code, 403) + From da1eb07e43fc5cef03ec0ca97b49203c4f72d9da Mon Sep 17 00:00:00 2001 From: JulianForeman <71847719+JulianForeman@users.noreply.github.com> Date: Wed, 8 May 2024 11:27:59 -0700 Subject: [PATCH 138/152] Task: Go Electric Rebates Uploader Update #283 (#287) * Updating SUVI model to new GER model. Updating logic for null values to avoid incorrect error messages * Adding in new model file and migration * Removing new dataset from incomplete dataset list --- django/api/constants.py | 32 ++++++++------- .../api/migrations/0022_auto_20240503_1823.py | 41 +++++++++++++++++++ django/api/models/__init__.py | 2 +- ...e_incentives.py => go_electric_rebates.py} | 6 ++- .../services/datasheet_template_generator.py | 4 +- django/api/services/spreadsheet_uploader.py | 3 +- .../api/services/spreadsheet_uploader_prep.py | 20 +-------- django/api/viewsets/upload.py | 1 - 8 files changed, 69 insertions(+), 40 deletions(-) create mode 100644 django/api/migrations/0022_auto_20240503_1823.py rename django/api/models/{speciality_use_vehicle_incentives.py => go_electric_rebates.py} (82%) diff --git a/django/api/constants.py b/django/api/constants.py index ce0ddef6..1e4de116 100644 --- a/django/api/constants.py +++ b/django/api/constants.py @@ -12,7 +12,7 @@ from api.models.ldv_rebates import LdvRebates from api.models.public_charging import PublicCharging from api.models.scrap_it import ScrapIt -from api.models.speciality_use_vehicle_incentives import SpecialityUseVehicleIncentives +from api.models.go_electric_rebates import GoElectricRebates from api.services.spreadsheet_uploader_prep import ( prepare_arc_project_tracking, prepare_hydrogen_fleets, @@ -20,7 +20,7 @@ prepare_ldv_rebates, prepare_public_charging, prepare_scrap_it, - prepare_speciality_use_vehicle_incentives, + prepare_go_electric_rebates, ) @@ -364,23 +364,24 @@ class ScrapItColumnMapping(Enum): scrap_date = "Scrap Date" -class SpecialityUseVehicleIncentiveProgramColumns(Enum): +class GoElectricRebatesColumns(Enum): APPROVALS = "Approvals" DATE = "Date" - FLEET = "Fleet" + FLEET = "Fleet/Individuals" APPLICANT_NAME = "Applicant Name" MAX_INCENTIVE_AMOUNT_REQUESTED = "Max Incentive Amount Requested" CATEGORY = "Category" - INDIVIDUAL = "Individual" INCENTIVE_PAID = "Incentive Paid" TOTAL_PURCHASE_PRICE_PRE_TAX = "Total Purchase Price (pre-tax)" MANUFACTURER = "Manufacturer" MODEL = "Model" + GER_CLASS = "Class" -class SpecialityUseVehicleIncentivesColumnMapping(Enum): +class GoElectricRebatesColumnMapping(Enum): approvals = "Approvals" date = "Date" + fleet = "Fleet/Individuals" applicant_name = "Applicant Name" max_incentive_amount_requested = "Max Incentive Amount Requested" category = "Category" @@ -389,6 +390,7 @@ class SpecialityUseVehicleIncentivesColumnMapping(Enum): total_purchase_price = "Total Purchase Price (pre-tax)" manufacturer = "Manufacturer" model = "Model" + ger_class = "Class" FIELD_TYPES = { @@ -547,9 +549,10 @@ class SpecialityUseVehicleIncentivesColumnMapping(Enum): "budget_code": str, "scrap_date": str, }, - "Specialty Use Vehicle Incentive Program": { + "Go Electric Rebates Program": { "approvals": str, - "date": str, + "date": datetime.date, + "fleet": str, "applicant_name": str, "max_incentive_amount_requested": int, "category": str, @@ -558,6 +561,7 @@ class SpecialityUseVehicleIncentivesColumnMapping(Enum): "total_purchase_price": int, "manufacturer": str, "model": str, + "ger_class": str }, } @@ -618,11 +622,11 @@ class SpecialityUseVehicleIncentivesColumnMapping(Enum): "header_row": 5, "preparation_functions": [prepare_scrap_it], }, - "Specialty Use Vehicle Incentive Program": { - "model": SpecialityUseVehicleIncentives, - "columns": SpecialityUseVehicleIncentiveProgramColumns, - "column_mapping": SpecialityUseVehicleIncentivesColumnMapping, - "sheet_name": "Sheet1", - "preparation_functions": [prepare_speciality_use_vehicle_incentives], + "Go Electric Rebates Program": { + "model": GoElectricRebates, + "columns": GoElectricRebatesColumns, + "column_mapping": GoElectricRebatesColumnMapping, + "sheet_name": "Main list", + "preparation_functions": [prepare_go_electric_rebates], }, } diff --git a/django/api/migrations/0022_auto_20240503_1823.py b/django/api/migrations/0022_auto_20240503_1823.py new file mode 100644 index 00000000..e5d284cd --- /dev/null +++ b/django/api/migrations/0022_auto_20240503_1823.py @@ -0,0 +1,41 @@ +# Generated by Django 3.1.6 on 2024-05-03 18:23 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0021_auto_20240326_2152'), + ] + + operations = [ + migrations.CreateModel( + name='GoElectricRebates', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), + ('create_user', models.CharField(default='SYSTEM', max_length=130)), + ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), + ('update_user', models.CharField(max_length=130, null=True)), + ('approvals', models.CharField(blank=True, max_length=20, null=True)), + ('date', models.DateField(blank=True, max_length=20, null=True)), + ('fleet', models.CharField(max_length=20)), + ('applicant_name', models.CharField(blank=True, max_length=250, null=True)), + ('max_incentive_amount_requested', models.IntegerField(blank=True, null=True)), + ('category', models.CharField(blank=True, max_length=250, null=True)), + ('applicant_type', models.CharField(blank=True, max_length=50, null=True)), + ('incentive_paid', models.IntegerField(blank=True, null=True)), + ('total_purchase_price', models.IntegerField(blank=True, null=True)), + ('manufacturer', models.CharField(blank=True, max_length=250, null=True)), + ('model', models.CharField(blank=True, max_length=250, null=True)), + ('ger_class', models.CharField(blank=True, max_length=50, null=True)), + ], + options={ + 'db_table': 'go_electric_rebates', + }, + ), + migrations.DeleteModel( + name='SpecialityUseVehicleIncentives', + ), + ] diff --git a/django/api/models/__init__.py b/django/api/models/__init__.py index 2aebd7a9..518b5b7f 100644 --- a/django/api/models/__init__.py +++ b/django/api/models/__init__.py @@ -9,7 +9,7 @@ from . import vehicle_zev_type from . import vin_decoded_information from . import ldv_rebates -from . import speciality_use_vehicle_incentives +from . import go_electric_rebates from . import datasets from . import charger_rebates from . import public_charging diff --git a/django/api/models/speciality_use_vehicle_incentives.py b/django/api/models/go_electric_rebates.py similarity index 82% rename from django/api/models/speciality_use_vehicle_incentives.py rename to django/api/models/go_electric_rebates.py index 13b74877..af672482 100644 --- a/django/api/models/speciality_use_vehicle_incentives.py +++ b/django/api/models/go_electric_rebates.py @@ -3,9 +3,10 @@ from auditable.models import Auditable -class SpecialityUseVehicleIncentives(Auditable): +class GoElectricRebates(Auditable): approvals = models.CharField(blank=True, null=True, max_length=20) date = models.DateField(max_length=20, null=True, blank=True) + fleet = models.CharField(max_length=20) applicant_name = models.CharField(blank=True, null=True, max_length=250) max_incentive_amount_requested = models.IntegerField( null=True, @@ -23,6 +24,7 @@ class SpecialityUseVehicleIncentives(Auditable): ) manufacturer = models.CharField(blank=True, max_length=250, null=True) model = models.CharField(blank=True, max_length=250, null=True) + ger_class = models.CharField(blank=True, null=True, max_length=50) class Meta: - db_table = "speciality_use_vehicle_incentives" + db_table = "go_electric_rebates" diff --git a/django/api/services/datasheet_template_generator.py b/django/api/services/datasheet_template_generator.py index 5a5bdc3a..5ea132f8 100644 --- a/django/api/services/datasheet_template_generator.py +++ b/django/api/services/datasheet_template_generator.py @@ -16,7 +16,7 @@ def generate_template(dataset_name): "LDV Rebates": LDVRebatesColumns, "Public Charging": PublicChargingColumns, "Scrap It": ScrapItColumns, - "Specialty Use Vehicle Incentive Program": SpecialityUseVehicleIncentiveProgramColumns, + "Go Electric Rebates Program": GoElectricRebatesColumns, } if dataset_name not in dataset_column_enum_map: @@ -33,7 +33,7 @@ def generate_template(dataset_name): custom_sheet_names = { "ARC Project Tracking": "Project_Tracking", - "Specialty Use Vehicle Incentive Program": "Sheet1", + "Go Electric Rebates": "Main list", "Public Charging": "Project_applications", "LDV Rebates": "Raw Data", "EV Charging Rebates": "Updated", diff --git a/django/api/services/spreadsheet_uploader.py b/django/api/services/spreadsheet_uploader.py index 6caa6452..30dbaedb 100644 --- a/django/api/services/spreadsheet_uploader.py +++ b/django/api/services/spreadsheet_uploader.py @@ -29,6 +29,7 @@ def trim_all_columns(df): def extract_data(excel_file, sheet_name, header_row): try: df = pd.read_excel(excel_file, sheet_name, header=header_row) + df = df.fillna('TEMP_NULL') df = trim_all_columns(df) return df except Exception as e: @@ -84,7 +85,7 @@ def load_data(df, model, field_types, replace_data, user): expected_type = field_types.get(column) is_nullable = column in nullable_fields - if pd.isna(value) or value == "": + if pd.isna(value) or value == "" or value == 'TEMP_NULL': if is_nullable: row_dict[column] = None else: diff --git a/django/api/services/spreadsheet_uploader_prep.py b/django/api/services/spreadsheet_uploader_prep.py index dfda4ed1..9accc46d 100644 --- a/django/api/services/spreadsheet_uploader_prep.py +++ b/django/api/services/spreadsheet_uploader_prep.py @@ -75,17 +75,7 @@ def prepare_scrap_it(df): return df - -def applicant_type(row): - if isinstance((row["Fleet"]), str): - return "Fleet" - elif isinstance((row["Individual"]), str): - return "Individual" - else: - return "" - - -def prepare_speciality_use_vehicle_incentives(df): +def prepare_go_electric_rebates(df): df = df.applymap(lambda s: s.upper() if type(s) == str else s) @@ -95,12 +85,4 @@ def prepare_speciality_use_vehicle_incentives(df): non_num_columns = df.columns.difference(num_columns) df[non_num_columns] = df[non_num_columns].fillna("") - df["Applicant Type"] = df.apply(lambda row: applicant_type(row), axis=1) - - if "Fleet" in df.columns: - df = df.drop(columns=["Fleet"]) - - if "Individual" in df.columns: - df = df.drop(columns=["Individual"]) - return df diff --git a/django/api/viewsets/upload.py b/django/api/viewsets/upload.py index 03c29e20..d81757cd 100644 --- a/django/api/viewsets/upload.py +++ b/django/api/viewsets/upload.py @@ -27,7 +27,6 @@ def datasets_list(self, request): incomplete_datasets = [ "LDV Rebates", - "Specialty Use Vehicle Incentive Program", "Public Charging", "EV Charging Rebates", "Hydrogen Fueling", From e577b06fb8833202cb5a498379adfa1086a741ad Mon Sep 17 00:00:00 2001 From: tim738745 <98717409+tim738745@users.noreply.github.com> Date: Tue, 14 May 2024 10:39:40 -0700 Subject: [PATCH 139/152] feat: 284 and 285 - vin decoding (#289) * feat: 284 and 285 - vin decoding * update migration * add db table comments --- django/api/decoder_constants.py | 32 +++++ django/api/logging_filters.py | 8 ++ .../commands/create_app_user_and_token.py | 21 ++++ .../management/commands/reset_app_token.py | 24 ++++ .../api/migrations/0023_auto_20240514_1721.py | 116 ++++++++++++++++++ django/api/models/__init__.py | 4 + django/api/models/app_user.py | 33 +++++ django/api/models/decoded_vin_record.py | 25 ++++ django/api/models/uploaded_vin_record.py | 28 +++++ django/api/models/uploaded_vins_file.py | 20 +++ django/api/services/decoded_vin_record.py | 73 +++++++++++ django/api/services/minio.py | 28 +++-- django/api/services/uploaded_vin_record.py | 72 +++++++++++ django/api/services/uploaded_vins_file.py | 5 + django/api/settings.py | 47 +++++++ django/api/token_authentication.py | 6 + django/api/urls.py | 5 + django/api/utilities/generic.py | 6 + django/api/viewsets/decoded_vin_record.py | 19 +++ django/api/viewsets/healthcheck.py | 11 ++ django/api/viewsets/upload.py | 9 +- django/requirements.txt | 8 +- django/workers/apps.py | 18 +++ django/workers/external_apis/vinpower.py | 2 + django/workers/external_apis/vpic.py | 38 ++++++ django/workers/scheduled_jobs.py | 42 +++++++ django/workers/tasks.py | 92 ++++++++++++++ docker-compose.yml | 33 +++++ 28 files changed, 814 insertions(+), 11 deletions(-) create mode 100644 django/api/decoder_constants.py create mode 100644 django/api/logging_filters.py create mode 100644 django/api/management/commands/create_app_user_and_token.py create mode 100644 django/api/management/commands/reset_app_token.py create mode 100644 django/api/migrations/0023_auto_20240514_1721.py create mode 100644 django/api/models/app_user.py create mode 100644 django/api/models/decoded_vin_record.py create mode 100644 django/api/models/uploaded_vin_record.py create mode 100644 django/api/models/uploaded_vins_file.py create mode 100644 django/api/services/decoded_vin_record.py create mode 100644 django/api/services/uploaded_vin_record.py create mode 100644 django/api/services/uploaded_vins_file.py create mode 100644 django/api/token_authentication.py create mode 100644 django/api/utilities/generic.py create mode 100644 django/api/viewsets/decoded_vin_record.py create mode 100644 django/api/viewsets/healthcheck.py create mode 100644 django/workers/apps.py create mode 100644 django/workers/external_apis/vinpower.py create mode 100644 django/workers/external_apis/vpic.py create mode 100644 django/workers/scheduled_jobs.py create mode 100644 django/workers/tasks.py diff --git a/django/api/decoder_constants.py b/django/api/decoder_constants.py new file mode 100644 index 00000000..dc6eca6f --- /dev/null +++ b/django/api/decoder_constants.py @@ -0,0 +1,32 @@ +import os +from enum import Enum +from functools import partial +from api.models.decoded_vin_record import VpicDecodedVinRecord, VinpowerDecodedVinRecord +from workers.external_apis.vpic import batch_decode as vpic_batch_decode +from workers.external_apis.vinpower import batch_decode as vinpower_batch_decode + + +class VPIC(Enum): + NAME = "vpic" + CURRENT_DECODE_SUCCESSFUL = "vpic_current_decode_successful" + NUMBER_OF_CURRENT_DECODE_ATTEMPTS = "vpic_number_of_current_decode_attempts" + MODEL = VpicDecodedVinRecord + BATCH_DECODER = partial(vpic_batch_decode) + + +class VINPOWER(Enum): + NAME = "vinpower" + CURRENT_DECODE_SUCCESSFUL = "vinpower_current_decode_successful" + NUMBER_OF_CURRENT_DECODE_ATTEMPTS = "vinpower_number_of_current_decode_attempts" + MODEL = VinpowerDecodedVinRecord + BATCH_DECODER = partial(vinpower_batch_decode) + + +SERVICES = [VPIC, VINPOWER] + + +def get_service(service_name): + for service in SERVICES: + if service.NAME.value == service_name: + return service + return None diff --git a/django/api/logging_filters.py b/django/api/logging_filters.py new file mode 100644 index 00000000..f04b64e2 --- /dev/null +++ b/django/api/logging_filters.py @@ -0,0 +1,8 @@ +import logging + +class HealthcheckFilter(logging.Filter): + def filter(self, record): + msg = record.getMessage() + if "GET /api/healthcheck HTTP/1.1" in msg: + return False + return True \ No newline at end of file diff --git a/django/api/management/commands/create_app_user_and_token.py b/django/api/management/commands/create_app_user_and_token.py new file mode 100644 index 00000000..37fc5538 --- /dev/null +++ b/django/api/management/commands/create_app_user_and_token.py @@ -0,0 +1,21 @@ +from django.core.management.base import BaseCommand, CommandError +from api.models.app_user import AppUser, AppToken +from django.conf import settings +from django.db import transaction + + +class Command(BaseCommand): + def add_arguments(self, parser): + parser.add_argument("app_name", type=str) + + @transaction.atomic + def handle(self, *args, **options): + app_name = options["app_name"] + + try: + app_user = AppUser.objects.create(app_name=app_name) + token = AppToken.objects.create(user=app_user) + except Exception: + raise CommandError("Error generating user and token") + + self.stdout.write("Generated token {} for app {}".format(token.key, app_name)) \ No newline at end of file diff --git a/django/api/management/commands/reset_app_token.py b/django/api/management/commands/reset_app_token.py new file mode 100644 index 00000000..644d6336 --- /dev/null +++ b/django/api/management/commands/reset_app_token.py @@ -0,0 +1,24 @@ +from django.core.management.base import BaseCommand, CommandError +from api.models.app_user import AppUser, AppToken +from django.conf import settings +from django.db import transaction + + +class Command(BaseCommand): + def add_arguments(self, parser): + parser.add_argument("app_name", type=str) + + @transaction.atomic + def handle(self, *args, **options): + app_name = options["app_name"] + + try: + app_user = AppUser.objects.get(app_name=app_name) + AppToken.objects.get(user=app_user).delete() + new_token = AppToken.objects.create(user=app_user) + except Exception: + raise CommandError("Error resetting token") + + self.stdout.write( + "Generated new token {} for app {}".format(new_token.key, app_name) + ) diff --git a/django/api/migrations/0023_auto_20240514_1721.py b/django/api/migrations/0023_auto_20240514_1721.py new file mode 100644 index 00000000..dd3f0be7 --- /dev/null +++ b/django/api/migrations/0023_auto_20240514_1721.py @@ -0,0 +1,116 @@ +# Generated by Django 3.2.25 on 2024-05-14 17:21 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0022_auto_20240503_1823'), + ] + + operations = [ + migrations.CreateModel( + name='AppToken', + fields=[ + ('key', models.CharField(max_length=40, primary_key=True, serialize=False, verbose_name='Key')), + ('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')), + ], + options={ + 'db_table': 'app_token', + }, + ), + migrations.CreateModel( + name='AppUser', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), + ('create_user', models.CharField(default='SYSTEM', max_length=130)), + ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), + ('update_user', models.CharField(max_length=130, null=True)), + ('is_active', models.BooleanField(default=True)), + ('app_name', models.CharField(max_length=100, unique=True)), + ], + options={ + 'db_table': 'app_user', + }, + ), + migrations.CreateModel( + name='UploadedVinRecord', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), + ('create_user', models.CharField(default='SYSTEM', max_length=130)), + ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), + ('update_user', models.CharField(max_length=130, null=True)), + ('vin', models.CharField(max_length=17)), + ('postal_code', models.CharField(blank=True, max_length=7, null=True)), + ('data', models.JSONField()), + ('vpic_current_decode_successful', models.BooleanField(default=False)), + ('vpic_number_of_current_decode_attempts', models.IntegerField(default=0)), + ('vinpower_current_decode_successful', models.BooleanField(default=False)), + ('vinpower_number_of_current_decode_attempts', models.IntegerField(default=0)), + ], + options={ + 'db_table': 'uploaded_vin_record', + }, + ), + migrations.CreateModel( + name='UploadedVinsFile', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), + ('create_user', models.CharField(default='SYSTEM', max_length=130)), + ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), + ('update_user', models.CharField(max_length=130, null=True)), + ('filename', models.CharField(max_length=32, unique=True)), + ('chunk_size', models.IntegerField(default=25000)), + ('chunks_per_run', models.IntegerField(default=4)), + ('start_index', models.IntegerField(default=0)), + ('processed', models.BooleanField(default=False)), + ], + options={ + 'db_table': 'uploaded_vins_file', + }, + ), + migrations.CreateModel( + name='VinpowerDecodedVinRecord', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), + ('create_user', models.CharField(default='SYSTEM', max_length=130)), + ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), + ('update_user', models.CharField(max_length=130, null=True)), + ('vin', models.CharField(max_length=17, unique=True)), + ('data', models.JSONField()), + ], + options={ + 'db_table': 'vinpower_decoded_vin_record', + }, + ), + migrations.CreateModel( + name='VpicDecodedVinRecord', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)), + ('create_user', models.CharField(default='SYSTEM', max_length=130)), + ('update_timestamp', models.DateTimeField(auto_now=True, null=True)), + ('update_user', models.CharField(max_length=130, null=True)), + ('vin', models.CharField(max_length=17, unique=True)), + ('data', models.JSONField()), + ], + options={ + 'db_table': 'vpic_decoded_vin_record', + }, + ), + migrations.AddConstraint( + model_name='uploadedvinrecord', + constraint=models.UniqueConstraint(fields=('vin', 'postal_code'), name='unique_vin_postal_code'), + ), + migrations.AddField( + model_name='apptoken', + name='user', + field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='auth_token', to='api.appuser', verbose_name='User'), + ), + ] diff --git a/django/api/models/__init__.py b/django/api/models/__init__.py index 518b5b7f..79c55108 100644 --- a/django/api/models/__init__.py +++ b/django/api/models/__init__.py @@ -21,3 +21,7 @@ from . import user from . import permission from . import user_permission +from . import app_user +from . import uploaded_vins_file +from . import uploaded_vin_record +from . import decoded_vin_record diff --git a/django/api/models/app_user.py b/django/api/models/app_user.py new file mode 100644 index 00000000..10a75f36 --- /dev/null +++ b/django/api/models/app_user.py @@ -0,0 +1,33 @@ +from django.db import models +from auditable.models import Auditable +from rest_framework.authtoken.models import Token +from django.utils.translation import gettext_lazy as _ + + +class AppUser(Auditable): + is_active = models.BooleanField(default=True) + + app_name = models.CharField(max_length=100, unique=True) + + @property + def is_authenticated(self): + return True + + class Meta: + db_table = "app_user" + + db_table_comment = "represents an external application that integrates this app via API" + + +class AppToken(Token): + user = models.OneToOneField( + AppUser, + related_name="auth_token", + on_delete=models.CASCADE, + verbose_name=_("User"), + ) + + class Meta: + db_table = "app_token" + + db_table_comment = "the token of an external application that integrates this app via API" diff --git a/django/api/models/decoded_vin_record.py b/django/api/models/decoded_vin_record.py new file mode 100644 index 00000000..ab4eb6e6 --- /dev/null +++ b/django/api/models/decoded_vin_record.py @@ -0,0 +1,25 @@ +from django.db import models +from auditable.models import Auditable + + +class DecodedVinRecord(Auditable): + vin = models.CharField(max_length=17, unique=True) + + data = models.JSONField() + + class Meta: + abstract = True + + +class VpicDecodedVinRecord(DecodedVinRecord): + class Meta: + db_table = "vpic_decoded_vin_record" + + db_table_comment = "contains vpic-decoded VIN information" + + +class VinpowerDecodedVinRecord(DecodedVinRecord): + class Meta: + db_table = "vinpower_decoded_vin_record" + + db_table_comment = "contains vinpower-decoded VIN information" diff --git a/django/api/models/uploaded_vin_record.py b/django/api/models/uploaded_vin_record.py new file mode 100644 index 00000000..371b6b38 --- /dev/null +++ b/django/api/models/uploaded_vin_record.py @@ -0,0 +1,28 @@ +from django.db import models +from auditable.models import Auditable + + +class UploadedVinRecord(Auditable): + vin = models.CharField(max_length=17) + + postal_code = models.CharField(max_length=7, null=True, blank=True) + + data = models.JSONField() + + vpic_current_decode_successful = models.BooleanField(default=False) + + vpic_number_of_current_decode_attempts = models.IntegerField(default=0) + + vinpower_current_decode_successful = models.BooleanField(default=False) + + vinpower_number_of_current_decode_attempts = models.IntegerField(default=0) + + class Meta: + db_table = "uploaded_vin_record" + constraints = [ + models.UniqueConstraint( + fields=["vin", "postal_code"], name="unique_vin_postal_code" + ) + ] + + db_table_comment = "represents an uploaded VIN, and associated information" diff --git a/django/api/models/uploaded_vins_file.py b/django/api/models/uploaded_vins_file.py new file mode 100644 index 00000000..5f3345e1 --- /dev/null +++ b/django/api/models/uploaded_vins_file.py @@ -0,0 +1,20 @@ +from django.db import models +from auditable.models import Auditable + + +class UploadedVinsFile(Auditable): + filename = models.CharField(max_length=32, unique=True) + + chunk_size = models.IntegerField(default=25000) + + chunks_per_run = models.IntegerField(default=4) + + start_index = models.IntegerField(default=0) + + processed = models.BooleanField(default=False) + + class Meta: + db_table = "uploaded_vins_file" + + db_table_comment = "represents a file containing VINs, and parsing information" + diff --git a/django/api/services/decoded_vin_record.py b/django/api/services/decoded_vin_record.py new file mode 100644 index 00000000..f8846d3c --- /dev/null +++ b/django/api/services/decoded_vin_record.py @@ -0,0 +1,73 @@ +from api.models.uploaded_vin_record import UploadedVinRecord +from api.decoder_constants import get_service +from api.services.uploaded_vin_record import ( + set_decode_successful, + get_number_of_decode_attempts, + set_number_of_decode_attempts, +) +from django.db import transaction +from django.utils import timezone + + +@transaction.atomic +def save_decoded_data( + uploaded_vin_records, + vins_to_insert, + decoded_records_to_update_map, + service_name, + decoded_data, +): + decoded_records_to_insert = [] + decoded_records_to_update = [] + successful_records = decoded_data["successful_records"] + failed_vins = decoded_data["failed_vins"] + + service = get_service(service_name) + if service: + decoded_vin_model = service.MODEL.value + for uploaded_record in uploaded_vin_records: + vin = uploaded_record.vin + if vin in successful_records: + decoded_datum = successful_records.get(vin) + set_decode_successful(service_name, uploaded_record, True) + if vin in vins_to_insert: + decoded_records_to_insert.append( + decoded_vin_model(vin=vin, data=decoded_datum) + ) + elif vin in decoded_records_to_update_map: + decoded_record_to_update = decoded_records_to_update_map.get(vin) + decoded_record_to_update.update_timestamp = timezone.now() + decoded_record_to_update.data = decoded_datum + decoded_records_to_update.append(decoded_record_to_update) + elif vin in failed_vins: + set_decode_successful(service_name, uploaded_record, False) + + set_number_of_decode_attempts( + service_name, + uploaded_record, + get_number_of_decode_attempts(service_name, uploaded_record) + 1, + ) + + decoded_vin_model.objects.bulk_update( + decoded_records_to_update, ["update_timestamp", "data"] + ) + decoded_vin_model.objects.bulk_create(decoded_records_to_insert) + UploadedVinRecord.objects.bulk_update( + uploaded_vin_records, + [ + "update_timestamp", + service.CURRENT_DECODE_SUCCESSFUL.value, + service.NUMBER_OF_CURRENT_DECODE_ATTEMPTS.value, + ], + ) + + +def get_decoded_vins(service_name, vins): + result = {} + service = get_service(service_name) + if service: + decoded_records_model = service.MODEL.value + records = decoded_records_model.objects.filter(vin__in=vins) + for record in records: + result[record.vin] = record.data + return result diff --git a/django/api/services/minio.py b/django/api/services/minio.py index de1b0fd5..692225f1 100644 --- a/django/api/services/minio.py +++ b/django/api/services/minio.py @@ -3,12 +3,13 @@ from django.conf import settings -MINIO = Minio( - settings.MINIO_ENDPOINT, - access_key=settings.MINIO_ACCESS_KEY, - secret_key=settings.MINIO_SECRET_KEY, - secure=settings.MINIO_USE_SSL, -) +def get_minio_client(): + return Minio( + settings.MINIO_ENDPOINT, + access_key=settings.MINIO_ACCESS_KEY, + secret_key=settings.MINIO_SECRET_KEY, + secure=settings.MINIO_USE_SSL, + ) def get_refined_object_name(object_name): @@ -19,15 +20,24 @@ def get_refined_object_name(object_name): def minio_get_object(object_name): - return MINIO.presigned_get_object( + return get_minio_client().presigned_get_object( bucket_name=settings.MINIO_BUCKET_NAME, object_name=get_refined_object_name(object_name), expires=timedelta(seconds=3600), ) +def get_minio_object(object_name): + try: + client = get_minio_client() + refined_object_name = get_refined_object_name(object_name) + return client.get_object(settings.MINIO_BUCKET_NAME, refined_object_name) + except: + raise + + def minio_put_object(object_name): - return MINIO.presigned_put_object( + return get_minio_client().presigned_put_object( bucket_name=settings.MINIO_BUCKET_NAME, object_name=get_refined_object_name(object_name), expires=timedelta(seconds=7200), @@ -35,7 +45,7 @@ def minio_put_object(object_name): def minio_remove_object(object_name): - return MINIO.remove_object( + return get_minio_client().remove_object( bucket_name=settings.MINIO_BUCKET_NAME, object_name=get_refined_object_name(object_name), ) diff --git a/django/api/services/uploaded_vin_record.py b/django/api/services/uploaded_vin_record.py new file mode 100644 index 00000000..ae1bea36 --- /dev/null +++ b/django/api/services/uploaded_vin_record.py @@ -0,0 +1,72 @@ +import pandas as pd +from api.models.uploaded_vin_record import UploadedVinRecord +from api.decoder_constants import get_service + + +def parse_and_save(uploaded_vins_file, file_response): + processed = True + number_of_chunks_processed = 0 + number_of_chunks_to_process = uploaded_vins_file.chunks_per_run + chunksize = uploaded_vins_file.chunk_size + start_index = uploaded_vins_file.start_index + chunks = pd.read_csv(file_response, sep="|", chunksize=chunksize) + + for idx, chunk in enumerate(chunks): + if ( + idx >= start_index + and number_of_chunks_processed < number_of_chunks_to_process + ): + vin_records_to_insert = get_vin_records_to_insert(chunk) + UploadedVinRecord.objects.bulk_create( + vin_records_to_insert, + ignore_conflicts=True, + ) + number_of_chunks_processed = number_of_chunks_processed + 1 + elif idx >= start_index + number_of_chunks_processed: + processed = False + break + + new_start_index = start_index + number_of_chunks_processed + uploaded_vins_file.processed = processed + uploaded_vins_file.start_index = new_start_index + uploaded_vins_file.save() + + +def get_vin_records_to_insert(df): + result = [] + df.fillna("", inplace=True) + for _, row in df.iterrows(): + if row["vin"] != "": + vin = row["vin"] + postal_code = row["postal_code"] + data = row.to_dict() + del data["vin"] + del data["postal_code"] + result.append( + UploadedVinRecord(vin=vin, postal_code=postal_code, data=data) + ) + return result + + +def get_decode_successful(service_name, uploaded_record): + service = get_service(service_name) + if service: + return getattr(uploaded_record, service.CURRENT_DECODE_SUCCESSFUL.value) + + +def set_decode_successful(service_name, uploaded_record, value): + service = get_service(service_name) + if service: + setattr(uploaded_record, service.CURRENT_DECODE_SUCCESSFUL.value, value) + + +def get_number_of_decode_attempts(service_name, uploaded_record): + service = get_service(service_name) + if service: + return getattr(uploaded_record, service.NUMBER_OF_CURRENT_DECODE_ATTEMPTS.value) + + +def set_number_of_decode_attempts(service_name, uploaded_record, value): + service = get_service(service_name) + if service: + setattr(uploaded_record, service.NUMBER_OF_CURRENT_DECODE_ATTEMPTS.value, value) diff --git a/django/api/services/uploaded_vins_file.py b/django/api/services/uploaded_vins_file.py new file mode 100644 index 00000000..ed169f10 --- /dev/null +++ b/django/api/services/uploaded_vins_file.py @@ -0,0 +1,5 @@ +from api.models.uploaded_vins_file import UploadedVinsFile + + +def create_vins_file(filename, **kwargs): + UploadedVinsFile.objects.create(filename=filename, **kwargs) diff --git a/django/api/settings.py b/django/api/settings.py index 73856b9d..dbf9125a 100644 --- a/django/api/settings.py +++ b/django/api/settings.py @@ -40,6 +40,7 @@ INSTALLED_APPS = [ "api.apps.ApiConfig", + "workers.apps.Config", "tfrs.apps.ApiConfig", "metabase.apps.MetabaseConfig", "corsheaders", @@ -51,8 +52,11 @@ "django.contrib.sessions", "django.contrib.staticfiles", "rest_framework", + "django_q", ] +DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' + MIDDLEWARE = [ "corsheaders.middleware.CorsMiddleware", "django.middleware.security.SecurityMiddleware", @@ -179,3 +183,46 @@ DECODER_ACCESS_KEY = os.getenv("DECODER_ACCESS_KEY") DECODER_SECRET_KEY = os.getenv("DECODER_SECRET_KEY") + +Q_CLUSTER = { + "name": "CTHUB", + "workers": 4, + "timeout": 90, + "retry": 120, + "queue_limit": 50, + "bulk": 10, + "orm": "default", + "save_limit": -1, + "max_attempts": 100, +} + +MAX_DECODE_ATTEMPTS = os.getenv("MAX_DECODE_ATTEMPTS", 5) + +VPIC_ENDPOINT = os.getenv( + "VPIC_ENDPOINT", + "https://vpic.nhtsa.dot.gov/api/vehicles", +) +VPIC_VIN_KEY = os.getenv("VPIC_VIN_KEY", "VIN") +VPIC_ERROR_CODE_NAME = os.getenv("VPIC_ERROR_CODE_NAME", "ErrorCode") +VPIC_SUCCESS_ERROR_CODE = os.getenv("VPIC_SUCCESS_ERROR_CODE", "0") + +LOGGING = { + "version": 1, + "disable_existing_loggers": False, + "filters": { + "healthcheck": { + "()": "api.logging_filters.HealthcheckFilter", + }, + }, + "handlers": { + "console": { + "class": "logging.StreamHandler", + "filters": ["healthcheck"], + } + }, + "loggers": { + "django": { + "handlers": ["console"], + }, + }, +} diff --git a/django/api/token_authentication.py b/django/api/token_authentication.py new file mode 100644 index 00000000..754e1280 --- /dev/null +++ b/django/api/token_authentication.py @@ -0,0 +1,6 @@ +from rest_framework.authentication import TokenAuthentication +from api.models.app_user import AppToken + + +class CustomTokenAuthentication(TokenAuthentication): + model = AppToken diff --git a/django/api/urls.py b/django/api/urls.py index 89abbbf9..fb628427 100644 --- a/django/api/urls.py +++ b/django/api/urls.py @@ -22,6 +22,9 @@ from api.viewsets.minio import MinioViewSet from api.viewsets.upload import UploadViewset from api.viewsets.user import UserViewSet +from api.viewsets.healthcheck import HealthCheckViewset +from api.viewsets.decoded_vin_record import DecodedVinRecordViewset + ROUTER = routers.SimpleRouter(trailing_slash=False) @@ -31,6 +34,8 @@ ROUTER.register(r"minio", MinioViewSet, basename="minio") ROUTER.register(r"users", UserViewSet) +ROUTER.register(r"healthcheck", HealthCheckViewset, basename="healthcheck") +ROUTER.register(r"decoded-vin-records", DecodedVinRecordViewset) urlpatterns = [ path("admin/", admin.site.urls), diff --git a/django/api/utilities/generic.py b/django/api/utilities/generic.py new file mode 100644 index 00000000..20d5a8d7 --- /dev/null +++ b/django/api/utilities/generic.py @@ -0,0 +1,6 @@ +def get_map(key_name, objects): + result = {} + for object in objects: + key = getattr(object, key_name) + result[key] = object + return result diff --git a/django/api/viewsets/decoded_vin_record.py b/django/api/viewsets/decoded_vin_record.py new file mode 100644 index 00000000..96f8dd0a --- /dev/null +++ b/django/api/viewsets/decoded_vin_record.py @@ -0,0 +1,19 @@ +from rest_framework.viewsets import GenericViewSet +from api.token_authentication import CustomTokenAuthentication +from api.models.decoded_vin_record import VpicDecodedVinRecord +from api.services.decoded_vin_record import get_decoded_vins +from rest_framework.decorators import action +from rest_framework.response import Response + + +class DecodedVinRecordViewset(GenericViewSet): + authentication_classes = [CustomTokenAuthentication] + + queryset = VpicDecodedVinRecord.objects.all() + + @action(detail=False, methods=["post"]) + def get_decoded_vins(self, request): + service_name = request.data.get("service_name") + vins = request.data.get("vins") + decoded_vins = get_decoded_vins(service_name, vins) + return Response(decoded_vins) diff --git a/django/api/viewsets/healthcheck.py b/django/api/viewsets/healthcheck.py new file mode 100644 index 00000000..1972bbb8 --- /dev/null +++ b/django/api/viewsets/healthcheck.py @@ -0,0 +1,11 @@ +from rest_framework.viewsets import GenericViewSet +from rest_framework.response import Response +from rest_framework import status + + +class HealthCheckViewset(GenericViewSet): + authentication_classes = [] + permission_classes=[] + + def list(self, request): + return Response(status=status.HTTP_200_OK) \ No newline at end of file diff --git a/django/api/viewsets/upload.py b/django/api/viewsets/upload.py index d81757cd..67971719 100644 --- a/django/api/viewsets/upload.py +++ b/django/api/viewsets/upload.py @@ -16,6 +16,7 @@ from api.services.spreadsheet_uploader import import_from_xls import api.constants as constants from api.services.spreadsheet_uploader_prep import * +from api.services.uploaded_vins_file import create_vins_file class UploadViewset(GenericViewSet): @@ -38,7 +39,9 @@ def datasets_list(self, request): datasets = Datasets.objects.all().exclude(name__in=incomplete_datasets) serializer = DatasetsSerializer(datasets, many=True, read_only=True) - return Response(serializer.data) + serializer_data = serializer.data + serializer_data.append({"id": -1, "name": "ICBC Vins"}) + return Response(serializer_data) @action(detail=False, methods=["post"]) @method_decorator(check_upload_permission()) @@ -48,6 +51,10 @@ def import_data(self, request): dataset_selected = request.data.get("datasetSelected") replace_data = request.data.get("replace", False) + if dataset_selected == "ICBC Vins": + create_vins_file(filename) + return Response({"success": True}, status=status.HTTP_200_OK) + try: url = minio_get_object(filename) urllib.request.urlretrieve(url, filename) diff --git a/django/requirements.txt b/django/requirements.txt index f17c6e41..b8a19ac5 100644 --- a/django/requirements.txt +++ b/django/requirements.txt @@ -1,9 +1,15 @@ +arrow==1.3.0 black==24.3.0 -Django==3.1.6 +blessed==1.20.0 +croniter==2.0.1 +Django==3.2.25 psycopg2-binary==2.8.6 djangorestframework==3.12.2 django-filter==2.4.0 django-cors-headers==3.7.0 +django-picklefield==3.1 +django-q==1.3.9 +func-timeout==4.3.5 coverage==5.4 pycodestyle==2.6.0 whitenoise==5.2.0 diff --git a/django/workers/apps.py b/django/workers/apps.py new file mode 100644 index 00000000..c05d62f6 --- /dev/null +++ b/django/workers/apps.py @@ -0,0 +1,18 @@ +from django.apps import AppConfig +import sys + + +class Config(AppConfig): + name = "workers" + + def ready(self): + from workers.scheduled_jobs import ( + schedule_create_minio_bucket, + schedule_read_uploaded_vins_file, + schedule_batch_decode_vins, + ) + + if "qcluster" in sys.argv: + schedule_create_minio_bucket() + schedule_read_uploaded_vins_file() + schedule_batch_decode_vins() diff --git a/django/workers/external_apis/vinpower.py b/django/workers/external_apis/vinpower.py new file mode 100644 index 00000000..735c918d --- /dev/null +++ b/django/workers/external_apis/vinpower.py @@ -0,0 +1,2 @@ +def batch_decode(uploaded_vin_records): + return {"successful_records": [], "failed_vins": []} diff --git a/django/workers/external_apis/vpic.py b/django/workers/external_apis/vpic.py new file mode 100644 index 00000000..fa941a56 --- /dev/null +++ b/django/workers/external_apis/vpic.py @@ -0,0 +1,38 @@ +import requests +from django.conf import settings + + +def batch_decode(uploaded_vin_records): + vpic_vin_key = settings.VPIC_VIN_KEY + vpic_error_code_name = settings.VPIC_ERROR_CODE_NAME + vpic_success_error_code = settings.VPIC_SUCCESS_ERROR_CODE + successful_records = {} + failed_vins = set() + + url = settings.VPIC_ENDPOINT + "/DecodeVINValuesBatch/" + + request_data = "" + for record in uploaded_vin_records: + request_data = request_data + record.vin + ";" + + body = {"format": "json", "data": request_data} + response = requests.post(url, data=body) + response.raise_for_status + data = response.json()["Results"] + decoded_vins_map = {} + for record in data: + vin = record.get(vpic_vin_key) + decoded_vins_map[vin] = record + + for record in uploaded_vin_records: + vin = record.vin + decoded_record = decoded_vins_map.get(vin) + if ( + decoded_record is not None + and decoded_record[vpic_error_code_name] == vpic_success_error_code + ): + successful_records[vin] = decoded_record + else: + failed_vins.add(vin) + + return {"successful_records": successful_records, "failed_vins": failed_vins} diff --git a/django/workers/scheduled_jobs.py b/django/workers/scheduled_jobs.py new file mode 100644 index 00000000..28d878c7 --- /dev/null +++ b/django/workers/scheduled_jobs.py @@ -0,0 +1,42 @@ +from django_q.tasks import schedule +from django.db import IntegrityError + + +def schedule_create_minio_bucket(): + try: + schedule( + "workers.tasks.create_minio_bucket", + name="create_minio_bucket", + schedule_type="O", + repeats=1, + ) + except IntegrityError: + pass + + +def schedule_read_uploaded_vins_file(): + try: + schedule( + "workers.tasks.read_uploaded_vins_file", + name="read_uploaded_vins_file", + schedule_type="C", + cron="*/15 * * * *", + q_options={"timeout": 660, "ack_failure": True}, + ) + except IntegrityError: + pass + + +def schedule_batch_decode_vins(): + try: + schedule( + "workers.tasks.batch_decode_vins", + "vpic", + 50, + name="batch_decode_vins", + schedule_type="C", + cron="* * * * *", + q_options={"timeout": 60, "ack_failure": True}, + ) + except IntegrityError: + pass diff --git a/django/workers/tasks.py b/django/workers/tasks.py new file mode 100644 index 00000000..1efe4076 --- /dev/null +++ b/django/workers/tasks.py @@ -0,0 +1,92 @@ +from django.conf import settings +from api.services.minio import get_minio_client, get_minio_object +from func_timeout import func_timeout, FunctionTimedOut +from api.models.uploaded_vins_file import UploadedVinsFile +from api.models.uploaded_vin_record import UploadedVinRecord +from api.decoder_constants import get_service +from api.utilities.generic import get_map +from api.services.decoded_vin_record import save_decoded_data +from api.services.uploaded_vin_record import parse_and_save +from django.db import transaction + + +def create_minio_bucket(): + bucket_name = settings.MINIO_BUCKET_NAME + client = get_minio_client() + found = client.bucket_exists(bucket_name) + if not found: + client.make_bucket(bucket_name) + + +def read_uploaded_vins_file(): + # TODO: this job will probably have to become more involved; it currently just uploads whatever is in the file while skipping records + # that encounter uniqueness conflicts. + # we'll probably have to do an initial, chunked read from the + # file in order to build a map of (vin, postal_code) -> (record chunk index, record index within chunk) of unique records (based on snapshot_date?), + # then we'll have to compare the (vin, postal_code) keys to existing records in the database, and + # determine which ones need to get bulk-inserted, and which ones bulk-updated. + # also have to keep in mind the memory used by any data structures we use + @transaction.atomic + def inner(vins_file, file_response): + if vins_file is not None and file_response is not None: + parse_and_save(vins_file, file_response) + + file_response = None + vins_file = ( + UploadedVinsFile.objects.filter(processed=False).order_by("create_timestamp").first() + ) + if vins_file is not None: + file_response = get_minio_object(vins_file.filename) + try: + func_timeout(600, inner, args=(vins_file, file_response)) + except FunctionTimedOut: + print("reading vins file job timed out") + raise Exception + finally: + if file_response is not None: + file_response.close() + file_response.release_conn() + + +def batch_decode_vins(service_name, batch_size=50): + def inner(): + max_decode_attempts = settings.MAX_DECODE_ATTEMPTS + service = get_service(service_name) + if service: + decoded_vin_model = service.MODEL.value + filters = { + service.CURRENT_DECODE_SUCCESSFUL.value: False, + service.NUMBER_OF_CURRENT_DECODE_ATTEMPTS.value + + "__lt": max_decode_attempts, + } + order_by = [service.NUMBER_OF_CURRENT_DECODE_ATTEMPTS.value, "create_timestamp"] + uploaded_vin_records = UploadedVinRecord.objects.filter(**filters).order_by( + *order_by + )[:batch_size] + uploaded_vins = set() + for uploaded_record in uploaded_vin_records: + uploaded_vins.add(uploaded_record.vin) + vins_to_update = set() + decoded_records_to_update_map = get_map( + "vin", decoded_vin_model.objects.filter(vin__in=uploaded_vins) + ) + for decoded_vin in decoded_records_to_update_map: + vins_to_update.add(decoded_vin) + vins_to_insert = uploaded_vins.difference(vins_to_update) + + decoder = service.BATCH_DECODER.value + decoded_data = decoder(uploaded_vin_records) + + save_decoded_data( + uploaded_vin_records, + vins_to_insert, + decoded_records_to_update_map, + service_name, + decoded_data, + ) + + try: + func_timeout(45, inner) + except FunctionTimedOut: + print("batch decode vins job timed out") + raise Exception diff --git a/docker-compose.yml b/docker-compose.yml index cd960728..52add316 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -35,6 +35,12 @@ services: ports: - 9000:9000 - 9001:9001 + healthcheck: + test: "curl -I --fail --max-time 10 http://localhost:9000/minio/health/live" + start_period: 5s + interval: 5s + timeout: 5s + retries: 3 api: build: ./django command: > @@ -59,9 +65,36 @@ services: - ./django:/api ports: - 8000:8000 + healthcheck: + test: "curl --fail --max-time 5 http://localhost:8000/api/healthcheck" + start_period: 15s + interval: 15s + timeout: 10s + retries: 2 depends_on: db: condition: service_healthy + minio: + condition: service_healthy + workers: + build: ./django + command: > + sh -c "python manage.py qcluster" + env_file: + - minio.env + environment: + - DB_ENGINE=django.db.backends.postgresql + - DB_HOST=db + - DB_NAME=postgres + - DB_PASSWORD=postgres + - DB_PORT=5432 + - DB_USER=postgres + - DJANGO_DEBUG=True + volumes: + - ./django:/api + depends_on: + api: + condition: service_healthy web: build: ./frontend command: npm run start From ff66babc84a43bdbb06e31120ff04b332c9daa98 Mon Sep 17 00:00:00 2001 From: JulianForeman <71847719+JulianForeman@users.noreply.github.com> Date: Tue, 21 May 2024 10:14:55 -0700 Subject: [PATCH 140/152] Updating model & constants (#302) --- django/api/constants.py | 29 +++-- .../api/migrations/0024_auto_20240516_2114.py | 100 ++++++++++++++++++ .../api/migrations/0025_auto_20240516_2248.py | 21 ++++ django/api/models/go_electric_rebates.py | 28 ++--- 4 files changed, 159 insertions(+), 19 deletions(-) create mode 100644 django/api/migrations/0024_auto_20240516_2114.py create mode 100644 django/api/migrations/0025_auto_20240516_2248.py diff --git a/django/api/constants.py b/django/api/constants.py index 1e4de116..f613d2bf 100644 --- a/django/api/constants.py +++ b/django/api/constants.py @@ -367,30 +367,40 @@ class ScrapItColumnMapping(Enum): class GoElectricRebatesColumns(Enum): APPROVALS = "Approvals" DATE = "Date" - FLEET = "Fleet/Individuals" APPLICANT_NAME = "Applicant Name" MAX_INCENTIVE_AMOUNT_REQUESTED = "Max Incentive Amount Requested" CATEGORY = "Category" + APPLICANT_TYPE = "Fleet/Individuals" INCENTIVE_PAID = "Incentive Paid" TOTAL_PURCHASE_PRICE_PRE_TAX = "Total Purchase Price (pre-tax)" MANUFACTURER = "Manufacturer" MODEL = "Model" - GER_CLASS = "Class" + CITY = "City" + POSTAL_CODE = "Postal Code" + PHONE = "Phone" + EMAIL = "Email" + VIN = "VIN" + VEHICLE_CLASS = "Class" class GoElectricRebatesColumnMapping(Enum): approvals = "Approvals" date = "Date" - fleet = "Fleet/Individuals" applicant_name = "Applicant Name" max_incentive_amount_requested = "Max Incentive Amount Requested" category = "Category" - applicant_type = "Applicant Type" + applicant_type = "Fleet/Individuals" incentive_paid = "Incentive Paid" total_purchase_price = "Total Purchase Price (pre-tax)" manufacturer = "Manufacturer" model = "Model" - ger_class = "Class" + city = "City" + postal_code = "Postal Code" + phone = "Phone" + email = "Email" + vin = "VIN" + vehicle_class = "Class" + FIELD_TYPES = { @@ -552,7 +562,6 @@ class GoElectricRebatesColumnMapping(Enum): "Go Electric Rebates Program": { "approvals": str, "date": datetime.date, - "fleet": str, "applicant_name": str, "max_incentive_amount_requested": int, "category": str, @@ -561,8 +570,14 @@ class GoElectricRebatesColumnMapping(Enum): "total_purchase_price": int, "manufacturer": str, "model": str, - "ger_class": str + "city": str, + "postal_code": str, + "phone": str, + "email": str, + "vin": str, + "vehicle_class": str, }, + } DATASET_CONFIG = { diff --git a/django/api/migrations/0024_auto_20240516_2114.py b/django/api/migrations/0024_auto_20240516_2114.py new file mode 100644 index 00000000..3915815b --- /dev/null +++ b/django/api/migrations/0024_auto_20240516_2114.py @@ -0,0 +1,100 @@ +# Generated by Django 3.2.25 on 2024-05-16 21:14 + +from django.db import migrations, models +import django.utils.timezone + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0023_auto_20240514_1721'), + ] + + operations = [ + migrations.RenameField( + model_name='goelectricrebates', + old_name='ger_class', + new_name='vehicle_class', + ), + migrations.AddField( + model_name='goelectricrebates', + name='city', + field=models.CharField(default=django.utils.timezone.now, max_length=250), + preserve_default=False, + ), + migrations.AddField( + model_name='goelectricrebates', + name='email', + field=models.CharField(default='a', max_length=50), + preserve_default=False, + ), + migrations.AddField( + model_name='goelectricrebates', + name='flagged', + field=models.CharField(blank=True, max_length=50, null=True), + ), + migrations.AddField( + model_name='goelectricrebates', + name='phone', + field=models.CharField(default='123', max_length=20), + preserve_default=False, + ), + migrations.AddField( + model_name='goelectricrebates', + name='postal_code', + field=models.CharField(blank=True, max_length=250, null=True), + ), + migrations.AddField( + model_name='goelectricrebates', + name='vin', + field=models.CharField(blank=True, max_length=100, null=True), + ), + migrations.AlterField( + model_name='goelectricrebates', + name='applicant_name', + field=models.CharField(default='a', max_length=250), + preserve_default=False, + ), + migrations.AlterField( + model_name='goelectricrebates', + name='approvals', + field=models.CharField(default='a', max_length=20), + preserve_default=False, + ), + migrations.AlterField( + model_name='goelectricrebates', + name='category', + field=models.CharField(default='a', max_length=250), + preserve_default=False, + ), + migrations.AlterField( + model_name='goelectricrebates', + name='date', + field=models.DateField(default=django.utils.timezone.now, max_length=20), + preserve_default=False, + ), + migrations.AlterField( + model_name='goelectricrebates', + name='incentive_paid', + field=models.IntegerField(default=1), + preserve_default=False, + ), + migrations.AlterField( + model_name='goelectricrebates', + name='manufacturer', + field=models.CharField(default='a', max_length=250), + preserve_default=False, + ), + migrations.AlterField( + model_name='goelectricrebates', + name='model', + field=models.CharField(default='a', max_length=250), + preserve_default=False, + ), + migrations.AlterField( + model_name='goelectricrebates', + name='total_purchase_price', + field=models.IntegerField(default=1), + preserve_default=False, + ), + ] diff --git a/django/api/migrations/0025_auto_20240516_2248.py b/django/api/migrations/0025_auto_20240516_2248.py new file mode 100644 index 00000000..e10058bd --- /dev/null +++ b/django/api/migrations/0025_auto_20240516_2248.py @@ -0,0 +1,21 @@ +# Generated by Django 3.2.25 on 2024-05-16 22:48 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0024_auto_20240516_2114'), + ] + + operations = [ + migrations.RemoveField( + model_name='goelectricrebates', + name='flagged', + ), + migrations.RemoveField( + model_name='goelectricrebates', + name='fleet', + ), + ] diff --git a/django/api/models/go_electric_rebates.py b/django/api/models/go_electric_rebates.py index af672482..b266e11d 100644 --- a/django/api/models/go_electric_rebates.py +++ b/django/api/models/go_electric_rebates.py @@ -4,27 +4,31 @@ class GoElectricRebates(Auditable): - approvals = models.CharField(blank=True, null=True, max_length=20) - date = models.DateField(max_length=20, null=True, blank=True) - fleet = models.CharField(max_length=20) - applicant_name = models.CharField(blank=True, null=True, max_length=250) + approvals = models.CharField(blank=False, null=False, max_length=20) + date = models.DateField(max_length=20, null=False, blank=False) + applicant_name = models.CharField(blank=False, null=False, max_length=250) max_incentive_amount_requested = models.IntegerField( null=True, blank=True, ) - category = models.CharField(blank=True, max_length=250, null=True) + category = models.CharField(blank=False, max_length=250, null=False) applicant_type = models.CharField(blank=True, max_length=50, null=True) incentive_paid = models.IntegerField( - null=True, - blank=True, + null=False, + blank=False, ) total_purchase_price = models.IntegerField( - null=True, - blank=True, + null=False, + blank=False, ) - manufacturer = models.CharField(blank=True, max_length=250, null=True) - model = models.CharField(blank=True, max_length=250, null=True) - ger_class = models.CharField(blank=True, null=True, max_length=50) + manufacturer = models.CharField(blank=False, max_length=250, null=False) + model = models.CharField(blank=False, max_length=250, null=False) + city = models.CharField(blank=False, max_length=250, null=False) + postal_code = models.CharField(blank=True, max_length=250, null=True) + phone = models.CharField(blank=False, max_length=20, null=False) + email = models.CharField(blank=False, max_length=50, null=False) + vin = models.CharField(blank=True, max_length=100, null=True) + vehicle_class = models.CharField(blank=True, null=True, max_length=50) class Meta: db_table = "go_electric_rebates" From a11e976d8a7a0301ecf144e4c11e7ced88c321a0 Mon Sep 17 00:00:00 2001 From: Emily <44536222+emi-hi@users.noreply.github.com> Date: Wed, 22 May 2024 10:57:43 -0700 Subject: [PATCH 141/152] fix: CTHUB 297 Adds success/error message to icbc vin upload (#303) * fix: adds messages to icbc vins upload in import data function, adds basic error messaging to frontend in case errors don't have our standard format * feat: adds check for csv on backend fix: updates frontend to catch keycloak logout and display message to user when attempting upload --- django/api/viewsets/upload.py | 14 +++++++++++--- frontend/src/uploads/UploadContainer.js | 13 ++++++++++--- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/django/api/viewsets/upload.py b/django/api/viewsets/upload.py index 67971719..13704861 100644 --- a/django/api/viewsets/upload.py +++ b/django/api/viewsets/upload.py @@ -5,6 +5,7 @@ from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.viewsets import GenericViewSet +import pathlib from django.http import HttpResponse from django.core.exceptions import ValidationError from django.utils.decorators import method_decorator @@ -50,11 +51,18 @@ def import_data(self, request): filename = request.data.get("filename") dataset_selected = request.data.get("datasetSelected") replace_data = request.data.get("replace", False) + filepath = request.data.get("filepath") if dataset_selected == "ICBC Vins": - create_vins_file(filename) - return Response({"success": True}, status=status.HTTP_200_OK) - + file_extension = pathlib.Path(filepath).suffix + if file_extension == '.csv': + try: + create_vins_file(filename) + return Response({"success": True, "message": "File successfully uploaded!"}, status=status.HTTP_200_OK) + except Exception as error: + return Response({"success": False, "message": str(error)}) + else: + return Response({"success": False, "message": "File must be a csv."}, status=status.HTTP_400_BAD_REQUEST) try: url = minio_get_object(filename) urllib.request.urlretrieve(url, filename) diff --git a/frontend/src/uploads/UploadContainer.js b/frontend/src/uploads/UploadContainer.js index 315400aa..22528fc0 100644 --- a/frontend/src/uploads/UploadContainer.js +++ b/frontend/src/uploads/UploadContainer.js @@ -45,15 +45,21 @@ const UploadContainer = () => { const showError = (error) => { const { response: errorResponse } = error; - setAlertContent( - `${errorResponse.data.message}\n${errorResponse.data.errors ? "Errors: " + errorResponse.data.errors.join("\n") : ""}`, - ); + setAlertContent("There was an issue uploading the file.") + if (errorResponse && errorResponse.data && errorResponse.data.message) { + setAlertContent( + `${errorResponse.data.message}\n${errorResponse.data.errors ? "Errors: " + errorResponse.data.errors.join("\n") : ""}`, + ) + } else if (errorResponse && errorResponse.data && errorResponse.status === 403) { + setAlertContent("There was an error. Please refresh page and ensure you are logged in.") + } setAlertSeverity("error"); setAlert(true); }; const doUpload = () => uploadFiles.forEach((file) => { + let filepath = file.path; setLoading(true); const uploadPromises = uploadFiles.map((file) => { return axios.get(ROUTES_UPLOAD.MINIO_URL).then((response) => { @@ -67,6 +73,7 @@ const UploadContainer = () => { filename, datasetSelected, replace, + filepath, }); }); }); From 0704a0d4a1e00d2e68911965c7a4759031047d64 Mon Sep 17 00:00:00 2001 From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com> Date: Fri, 24 May 2024 15:34:51 -0700 Subject: [PATCH 142/152] Task queue 0.2.0 (#309) \ --- .github/workflows/dev-task-queue.yaml | 97 +++++++++++++++++++ django/Dockerfile.taskq.Openshift | 17 ++++ .../templates/task-queue/task-queue-bc.yaml | 92 ++++++++++++++++++ 3 files changed, 206 insertions(+) create mode 100644 .github/workflows/dev-task-queue.yaml create mode 100644 django/Dockerfile.taskq.Openshift create mode 100644 openshift/templates/task-queue/task-queue-bc.yaml diff --git a/.github/workflows/dev-task-queue.yaml b/.github/workflows/dev-task-queue.yaml new file mode 100644 index 00000000..7b9756df --- /dev/null +++ b/.github/workflows/dev-task-queue.yaml @@ -0,0 +1,97 @@ +## For each release, the value of workflow name, branches and VERSION need to be adjusted accordingly + +name: CTHUB 0.2.0 Task Queue Dev CI + +on: + push: + branches: [ task-queue-0.2.0 ] + workflow_dispatch: + +env: + VERSION: 0.2.0 + GIT_URL: https://github.com/bcgov/cthub.git + TOOLS_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools + DEV_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev + + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + + set-pre-release: + name: Calculate pre-release number + runs-on: ubuntu-latest + + outputs: + output1: ${{ steps.set-pre-release.outputs.PRE_RELEASE }} + + steps: + - id: set-pre-release + run: echo "PRE_RELEASE=$(date +'%Y%m%d%H%M%S')" >> $GITHUB_OUTPUT + + build: + + name: Build CTHUB task queue + runs-on: ubuntu-latest + needs: set-pre-release + timeout-minutes: 60 + + env: + PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} + + steps: + + - name: Check out repository + uses: actions/checkout@v4.1.1 + + - name: Log in to Openshift + uses: redhat-actions/oc-login@v1.3 + with: + openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} + openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} + insecure_skip_tls_verify: true + namespace: ${{ env.TOOLS_NAMESPACE }} + + - name: Build CTHUB task queue + run: | + cd openshift/templates/task-queue + oc process -f ./task-queue-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=task-queue-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + sleep 5s + oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-task-queue-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 + oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} + + + # deploy: + + # name: Deploy CTHUB task queue on Dev + # runs-on: ubuntu-latest + # timeout-minutes: 60 + # needs: [set-pre-release, build] + + # env: + # PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} + + # steps: + + # - name: Checkout Manifest repository + # uses: actions/checkout@v4.1.1 + # with: + # repository: bcgov-c/tenant-gitops-30b186 + # ref: main + # ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} + + # - name: Update task-queue tag + # uses: mikefarah/yq@v4.40.5 + # with: + # cmd: yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + + # - name: GitHub Commit & Push + # run: | + # git config --global user.email "actions@github.com" + # git config --global user.name "GitHub Actions" + # git add cthub/values-dev.yaml + # git commit -m "Update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }} on Dev" + # git push + \ No newline at end of file diff --git a/django/Dockerfile.taskq.Openshift b/django/Dockerfile.taskq.Openshift new file mode 100644 index 00000000..375e369c --- /dev/null +++ b/django/Dockerfile.taskq.Openshift @@ -0,0 +1,17 @@ +FROM registry.fedoraproject.org/f35/python3 + +# Add application sources with correct permissions for OpenShift +USER 0 +WORKDIR /opt/app-root/src +ADD . . +RUN chown -R 1001:0 ./ +USER 1001 + +# # Install the dependencies +RUN pip install -U "pip>=19.3.1" && \ + pip install -r requirements.txt +RUN ls -l \ + && python -V + +# # Run the application +CMD python manage.py qcluster \ No newline at end of file diff --git a/openshift/templates/task-queue/task-queue-bc.yaml b/openshift/templates/task-queue/task-queue-bc.yaml new file mode 100644 index 00000000..668c3f22 --- /dev/null +++ b/openshift/templates/task-queue/task-queue-bc.yaml @@ -0,0 +1,92 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: cthub-task-queue +parameters: + - name: NAME + displayName: + description: the module name entered when run yo bcdk:pipeline, which is zeva + required: true + - name: SUFFIX + displayName: + description: sample is -pr-0 + required: true + - name: VERSION + displayName: + description: image tag name for output + required: true + - name: GIT_URL + displayName: + description: cthub repo + required: true + - name: GIT_REF + displayName: + description: cthub branch name of the pr + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the client / front end image + labels: + shared: "true" + creationTimestamp: null + name: ${NAME}-task-queue + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: "" +- apiVersion: build.openshift.io/v1 + kind: BuildConfig + metadata: + creationTimestamp: null + name: ${NAME}-task-queue${SUFFIX} + spec: + failedBuildsHistoryLimit: 5 + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: ${NAME}-task-queue:${VERSION} + postCommit: {} + resources: + limits: + cpu: 2000m + memory: 4Gi + requests: + cpu: 500m + memory: 2Gi + runPolicy: Serial + source: + git: + ref: ${GIT_REF} + uri: ${GIT_URL} + type: Git + contextDir: django + strategy: + dockerStrategy: + dockerfilePath: ./Dockerfile.taskq.Openshift + env: + - name: ARTIFACTORY_USER + valueFrom: + secretKeyRef: + name: artifacts-default-idxprm + key: username + - name: ARTIFACTORY_PASSWORD + valueFrom: + secretKeyRef: + name: artifacts-default-idxprm + key: password + noCache: true + forcePull: true + type: Docker + successfulBuildsHistoryLimit: 5 + triggers: + - imageChange: {} + type: ImageChange + - type: ConfigChange + status: + lastVersion: 0 \ No newline at end of file From 0eb128c81c6cec00189cab85a105666537a15266 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Fri, 24 May 2024 15:36:13 -0700 Subject: [PATCH 143/152] add taskq build --- .github/workflows/dev-ci.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/dev-ci.yaml b/.github/workflows/dev-ci.yaml index 42bf9729..2c9f1c30 100644 --- a/.github/workflows/dev-ci.yaml +++ b/.github/workflows/dev-ci.yaml @@ -72,6 +72,14 @@ jobs: sleep 5s oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-frontend-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} + + - name: Build CTHUB Task Queue + run: | + cd openshift/templates/task-queue + oc process -f ./task-queue-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} + sleep 5s + oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-task-queue-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 + oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} deploy: From 11d22b3f45bd9c97804c63b8ace48400318fd810 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Fri, 24 May 2024 15:37:47 -0700 Subject: [PATCH 144/152] cleanup --- .github/workflows/dev-task-queue.yaml | 97 --------------------------- 1 file changed, 97 deletions(-) delete mode 100644 .github/workflows/dev-task-queue.yaml diff --git a/.github/workflows/dev-task-queue.yaml b/.github/workflows/dev-task-queue.yaml deleted file mode 100644 index 7b9756df..00000000 --- a/.github/workflows/dev-task-queue.yaml +++ /dev/null @@ -1,97 +0,0 @@ -## For each release, the value of workflow name, branches and VERSION need to be adjusted accordingly - -name: CTHUB 0.2.0 Task Queue Dev CI - -on: - push: - branches: [ task-queue-0.2.0 ] - workflow_dispatch: - -env: - VERSION: 0.2.0 - GIT_URL: https://github.com/bcgov/cthub.git - TOOLS_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools - DEV_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev - - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - - set-pre-release: - name: Calculate pre-release number - runs-on: ubuntu-latest - - outputs: - output1: ${{ steps.set-pre-release.outputs.PRE_RELEASE }} - - steps: - - id: set-pre-release - run: echo "PRE_RELEASE=$(date +'%Y%m%d%H%M%S')" >> $GITHUB_OUTPUT - - build: - - name: Build CTHUB task queue - runs-on: ubuntu-latest - needs: set-pre-release - timeout-minutes: 60 - - env: - PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} - - steps: - - - name: Check out repository - uses: actions/checkout@v4.1.1 - - - name: Log in to Openshift - uses: redhat-actions/oc-login@v1.3 - with: - openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }} - openshift_token: ${{ secrets.OPENSHIFT_TOKEN }} - insecure_skip_tls_verify: true - namespace: ${{ env.TOOLS_NAMESPACE }} - - - name: Build CTHUB task queue - run: | - cd openshift/templates/task-queue - oc process -f ./task-queue-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=task-queue-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }} - sleep 5s - oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-task-queue-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1 - oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - - - # deploy: - - # name: Deploy CTHUB task queue on Dev - # runs-on: ubuntu-latest - # timeout-minutes: 60 - # needs: [set-pre-release, build] - - # env: - # PRE_RELEASE: ${{ needs.set-pre-release.outputs.output1 }} - - # steps: - - # - name: Checkout Manifest repository - # uses: actions/checkout@v4.1.1 - # with: - # repository: bcgov-c/tenant-gitops-30b186 - # ref: main - # ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }} - - # - name: Update task-queue tag - # uses: mikefarah/yq@v4.40.5 - # with: - # cmd: yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml - - # - name: GitHub Commit & Push - # run: | - # git config --global user.email "actions@github.com" - # git config --global user.name "GitHub Actions" - # git add cthub/values-dev.yaml - # git commit -m "Update the image tag to ${{ env.VERSION }}-${{ env.PRE_RELEASE }} on Dev" - # git push - \ No newline at end of file From ebf6f43ffc423182d68f322c974075636a716f83 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Fri, 24 May 2024 15:56:55 -0700 Subject: [PATCH 145/152] update task queue image tag --- .github/workflows/dev-ci.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/dev-ci.yaml b/.github/workflows/dev-ci.yaml index 2c9f1c30..42437304 100644 --- a/.github/workflows/dev-ci.yaml +++ b/.github/workflows/dev-ci.yaml @@ -110,6 +110,11 @@ jobs: with: cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + - name: Update task-queue tag + uses: mikefarah/yq@v4.40.5 + with: + cmd: yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml + - name: GitHub Commit & Push run: | git config --global user.email "actions@github.com" From c403ed17808c1addc3f7b7fabbfbf4ab7956f50c Mon Sep 17 00:00:00 2001 From: Emily <44536222+emi-hi@users.noreply.github.com> Date: Mon, 27 May 2024 10:32:21 -0700 Subject: [PATCH 146/152] feat: adds redirect to login page if user attempts any axios call after timing out (#308) --- frontend/src/app/utilities/useAxios.js | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/frontend/src/app/utilities/useAxios.js b/frontend/src/app/utilities/useAxios.js index 5de66a49..fe7f1fd7 100644 --- a/frontend/src/app/utilities/useAxios.js +++ b/frontend/src/app/utilities/useAxios.js @@ -1,8 +1,10 @@ import axios from "axios"; import useKeycloak from "./useKeycloak"; import { API_BASE } from "../../config"; +import { useHistory } from "react-router-dom"; const useAxios = (useDefault = false, opts = {}) => { + const history = useHistory(); const keycloak = useKeycloak(); if (useDefault) { return axios.create(opts); @@ -19,7 +21,8 @@ const useAxios = (useDefault = false, opts = {}) => { Authorization: `Bearer ${keycloak.token}`, }; } catch (error) { - // do something here? + keycloak.logout(); + history.push("/upload"); } } return config; From 53f2b129a49783f7e05954edaf0fbd0448821b6f Mon Sep 17 00:00:00 2001 From: Emily <44536222+emi-hi@users.noreply.github.com> Date: Mon, 27 May 2024 10:36:14 -0700 Subject: [PATCH 147/152] feat: disables save button on user permissions until any permission is changed (#306) --- frontend/src/users/UsersContainer.js | 3 +++ frontend/src/users/components/UsersPage.js | 2 ++ 2 files changed, 5 insertions(+) diff --git a/frontend/src/users/UsersContainer.js b/frontend/src/users/UsersContainer.js index 38711305..745d31f9 100644 --- a/frontend/src/users/UsersContainer.js +++ b/frontend/src/users/UsersContainer.js @@ -19,6 +19,7 @@ const UsersContainer = (props) => { const [messageSeverity, setMessageSeverity] = useState(""); const [userToDelete, setUserToDelete] = useState(""); const [openDialog, setOpenDialog] = useState(false); + const [saveButtonEnabled, setSaveButtonEnabled] = useState(false); const axios = useAxios(); const handleAddNewUser = () => { @@ -56,6 +57,7 @@ const UsersContainer = (props) => { userToChange.user_permissions[permissionType] = checked; }), ); + setSaveButtonEnabled(true) }, []); const handleDeleteUserClick = (idir) => { @@ -139,6 +141,7 @@ const UsersContainer = (props) => { setMessage={setMessage} newUser={newUser} handleXClick={handleDeleteUserClick} + saveButtonEnabled={saveButtonEnabled} />
); diff --git a/frontend/src/users/components/UsersPage.js b/frontend/src/users/components/UsersPage.js index 147e19dc..20304553 100644 --- a/frontend/src/users/components/UsersPage.js +++ b/frontend/src/users/components/UsersPage.js @@ -23,6 +23,7 @@ const UsersPage = (props) => { newUser, setMessage, handleXClick, + saveButtonEnabled } = props; const userRow = (user) => { @@ -171,6 +172,7 @@ const UsersPage = (props) => { className="button-dark-blue" startIcon={} onClick={handleSubmitUserUpdates} + disabled={!saveButtonEnabled} > Save From abe173000e908780b2d794475c7662ceb7caf7a9 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Mon, 27 May 2024 14:52:53 -0700 Subject: [PATCH 148/152] update cleanup cron --- openshift/templates/cleanup/cleanup-cron.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/openshift/templates/cleanup/cleanup-cron.yaml b/openshift/templates/cleanup/cleanup-cron.yaml index 4ef48112..b848df96 100644 --- a/openshift/templates/cleanup/cleanup-cron.yaml +++ b/openshift/templates/cleanup/cleanup-cron.yaml @@ -23,6 +23,7 @@ objects: metadata: creationTimestamp: null spec: + backoffLimit: 0 template: metadata: creationTimestamp: null @@ -84,7 +85,9 @@ objects: echo "==========> Removing expired pods" - oc -n $namespace get pods | grep -E "Completed|Error|ContainerStatusUnknown" | grep -v crunchy | grep -v spilo | grep -v backup | awk '{print $1}' | xargs oc -n $namespace delete pod || true + oc -n $namespace get pods | grep Completed | grep -v backup | awk '{print $1}' | xargs oc -n $namespace delete pod || true + + oc -n $namespace get pods | grep -E "Error|ContainerStatusUnknown" | grep -v crunchy | grep -v spilo | grep -v backup | awk '{print $1}' | xargs oc -n $namespace delete pod || true env=$(echo $namespace | awk -F '-' '{print $NF}') @@ -102,7 +105,7 @@ objects: done - restartPolicy: OnFailure + restartPolicy: Never terminationGracePeriodSeconds: 30 dnsPolicy: ClusterFirst securityContext: {} From 7dbf76d825edc0fddd35bc1b4025f4af551daeb3 Mon Sep 17 00:00:00 2001 From: tim738745 <98717409+tim738745@users.noreply.github.com> Date: Mon, 27 May 2024 18:04:19 -0700 Subject: [PATCH 149/152] fix: 285 - update default chunksize for processing vin files (#313) --- .../0026_alter_uploadedvinsfile_chunk_size.py | 18 +++++++++++++++ django/api/models/uploaded_vins_file.py | 2 +- django/workers/tasks.py | 23 ++++++++++++++----- 3 files changed, 36 insertions(+), 7 deletions(-) create mode 100644 django/api/migrations/0026_alter_uploadedvinsfile_chunk_size.py diff --git a/django/api/migrations/0026_alter_uploadedvinsfile_chunk_size.py b/django/api/migrations/0026_alter_uploadedvinsfile_chunk_size.py new file mode 100644 index 00000000..292cb0e6 --- /dev/null +++ b/django/api/migrations/0026_alter_uploadedvinsfile_chunk_size.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.25 on 2024-05-28 00:49 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0025_auto_20240516_2248'), + ] + + operations = [ + migrations.AlterField( + model_name='uploadedvinsfile', + name='chunk_size', + field=models.IntegerField(default=5000), + ), + ] diff --git a/django/api/models/uploaded_vins_file.py b/django/api/models/uploaded_vins_file.py index 5f3345e1..a958dd28 100644 --- a/django/api/models/uploaded_vins_file.py +++ b/django/api/models/uploaded_vins_file.py @@ -5,7 +5,7 @@ class UploadedVinsFile(Auditable): filename = models.CharField(max_length=32, unique=True) - chunk_size = models.IntegerField(default=25000) + chunk_size = models.IntegerField(default=5000) chunks_per_run = models.IntegerField(default=4) diff --git a/django/workers/tasks.py b/django/workers/tasks.py index 1efe4076..280a6c8a 100644 --- a/django/workers/tasks.py +++ b/django/workers/tasks.py @@ -26,6 +26,11 @@ def read_uploaded_vins_file(): # then we'll have to compare the (vin, postal_code) keys to existing records in the database, and # determine which ones need to get bulk-inserted, and which ones bulk-updated. # also have to keep in mind the memory used by any data structures we use + def close_file_response(file_response): + if file_response is not None: + file_response.close() + file_response.release_conn() + @transaction.atomic def inner(vins_file, file_response): if vins_file is not None and file_response is not None: @@ -33,19 +38,22 @@ def inner(vins_file, file_response): file_response = None vins_file = ( - UploadedVinsFile.objects.filter(processed=False).order_by("create_timestamp").first() + UploadedVinsFile.objects.filter(processed=False) + .order_by("create_timestamp") + .first() ) if vins_file is not None: file_response = get_minio_object(vins_file.filename) try: func_timeout(600, inner, args=(vins_file, file_response)) + close_file_response(file_response) except FunctionTimedOut: print("reading vins file job timed out") + close_file_response(file_response) + raise Exception + except Exception: + close_file_response(file_response) raise Exception - finally: - if file_response is not None: - file_response.close() - file_response.release_conn() def batch_decode_vins(service_name, batch_size=50): @@ -59,7 +67,10 @@ def inner(): service.NUMBER_OF_CURRENT_DECODE_ATTEMPTS.value + "__lt": max_decode_attempts, } - order_by = [service.NUMBER_OF_CURRENT_DECODE_ATTEMPTS.value, "create_timestamp"] + order_by = [ + service.NUMBER_OF_CURRENT_DECODE_ATTEMPTS.value, + "create_timestamp", + ] uploaded_vin_records = UploadedVinRecord.objects.filter(**filters).order_by( *order_by )[:batch_size] From bead5c5db21316db6bcbffb3bea75489e73422e0 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Tue, 4 Jun 2024 11:53:22 -0700 Subject: [PATCH 150/152] tag taskq image for test --- .github/workflows/test-ci.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/test-ci.yaml b/.github/workflows/test-ci.yaml index 302f6347..6a8da706 100644 --- a/.github/workflows/test-ci.yaml +++ b/.github/workflows/test-ci.yaml @@ -69,6 +69,7 @@ jobs: run: | oc tag ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} oc tag ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} + oc tag ${{ env.DEV_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - name: Checkout Manifest repository uses: actions/checkout@v4.1.1 @@ -87,6 +88,11 @@ jobs: with: cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml + - name: Update task-queue tag + uses: mikefarah/yq@v4.40.5 + with: + cmd: yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml + - name: GitHub Commit & Push run: | git config --global user.email "actions@github.com" From ee2c0e0730b0777d0f57381700f1f3b53ff9bcba Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Wed, 5 Jun 2024 11:40:00 -0700 Subject: [PATCH 151/152] add task queue to prod pipeline --- .github/workflows/prod-ci.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/prod-ci.yaml b/.github/workflows/prod-ci.yaml index 047bffed..a561b435 100644 --- a/.github/workflows/prod-ci.yaml +++ b/.github/workflows/prod-ci.yaml @@ -69,6 +69,7 @@ jobs: run: | oc tag ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.PROD_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} oc tag ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.PROD_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} + oc tag ${{ env.TEST_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.PROD_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} - name: Checkout Manifest repository uses: actions/checkout@v4.1.1 @@ -87,6 +88,11 @@ jobs: with: cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml + - name: Update backend tag + uses: mikefarah/yq@v4.40.5 + with: + cmd: yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml + - name: GitHub Commit & Push run: | git config --global user.email "actions@github.com" From eda1af28d5a8d799ad9948805e87a6cd3842d2d9 Mon Sep 17 00:00:00 2001 From: Kuan Fan Date: Wed, 5 Jun 2024 16:54:31 -0700 Subject: [PATCH 152/152] update knps --- .../knp/1-allow-backend-accepts.yaml | 30 ++++++ openshift/templates/knp/1-knp-base.yaml | 25 ----- ...cept.yaml => 2-allow-crunchy-accepts.yaml} | 19 ++-- openshift/templates/knp/knp-diagram.drawio | 101 +++++++----------- 4 files changed, 79 insertions(+), 96 deletions(-) create mode 100644 openshift/templates/knp/1-allow-backend-accepts.yaml delete mode 100644 openshift/templates/knp/1-knp-base.yaml rename openshift/templates/knp/{2-allow-crunchy-accept.yaml => 2-allow-crunchy-accepts.yaml} (79%) diff --git a/openshift/templates/knp/1-allow-backend-accepts.yaml b/openshift/templates/knp/1-allow-backend-accepts.yaml new file mode 100644 index 00000000..c5cb97e8 --- /dev/null +++ b/openshift/templates/knp/1-allow-backend-accepts.yaml @@ -0,0 +1,30 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: allow-backend-accepts +parameters: + - name: ENV + displayName: null + description: such as dev, test or prod + required: true +objects: + - kind: NetworkPolicy + apiVersion: networking.k8s.io/v1 + metadata: + name: allow-backend-accepts-${ENV} + spec: + podSelector: + matchLabels: + app.kubernetes.io/instance: cthub-${ENV} + app.kubernetes.io/name: backend + ingress: + - ports: + - protocol: TCP + port: 8080 + from: + - podSelector: + matchLabels: + app.kubernetes.io/instance: cthub-${ENV} + app.kubernetes.io/name: frontend + policyTypes: + - Ingress diff --git a/openshift/templates/knp/1-knp-base.yaml b/openshift/templates/knp/1-knp-base.yaml deleted file mode 100644 index b02d498f..00000000 --- a/openshift/templates/knp/1-knp-base.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- -apiVersion: template.openshift.io/v1 -kind: Template -labels: - template: cthub-network-policy -metadata: - name: cthub-network-policy -objects: - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-from-openshift-ingress - spec: - # This policy allows any pod with a route & service combination - # to accept traffic from the OpenShift router pods. This is - # required for things outside of OpenShift (like the Internet) - # to reach your pods. - ingress: - - from: - - namespaceSelector: - matchLabels: - network.openshift.io/policy-group: ingress - podSelector: {} - policyTypes: - - Ingress diff --git a/openshift/templates/knp/2-allow-crunchy-accept.yaml b/openshift/templates/knp/2-allow-crunchy-accepts.yaml similarity index 79% rename from openshift/templates/knp/2-allow-crunchy-accept.yaml rename to openshift/templates/knp/2-allow-crunchy-accepts.yaml index c5565417..d7d0da3a 100644 --- a/openshift/templates/knp/2-allow-crunchy-accept.yaml +++ b/openshift/templates/knp/2-allow-crunchy-accepts.yaml @@ -6,7 +6,7 @@ labels: metadata: name: allow-crunchy-accept parameters: - - name: ENVIRONMENT + - name: ENV displayName: null description: such as dev, test or prod required: true @@ -14,23 +14,23 @@ objects: - apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: allow-crunchy-accept + name: allow-crunchy-accepts-${ENV} spec: podSelector: matchLabels: - postgres-operator.crunchydata.com/cluster: cthub-${ENVIRONMENT}-crunchy + postgres-operator.crunchydata.com/cluster: cthub-${ENV}-crunchy ingress: - from: - podSelector: matchLabels: - postgres-operator.crunchydata.com/cluster: cthub-${ENVIRONMENT}-crunchy + postgres-operator.crunchydata.com/cluster: cthub-${ENV}-crunchy - ports: - protocol: TCP port: 5432 from: - podSelector: matchLabels: - app.kubernetes.io/instance: cthub-${ENVIRONMENT} + app.kubernetes.io/instance: cthub-${ENV} app.kubernetes.io/name: backend - ports: - protocol: TCP @@ -38,14 +38,15 @@ objects: from: - podSelector: matchLabels: - openshift.io/deployer-pod.type: hook-mid + app.kubernetes.io/instance: cthub-${ENV} + app.kubernetes.io/name: task-queue - ports: - protocol: TCP port: 5432 from: - podSelector: matchLabels: - cronjob: cthub-db-backup + openshift.io/deployer-pod.type: hook-mid - ports: - protocol: TCP port: 9187 @@ -61,10 +62,6 @@ objects: - protocol: TCP port: 5432 from: - - namespaceSelector: - matchLabels: - environment: ${ENVIRONMENT} - name: 30b186 - podSelector: matchLabels: app: metabase diff --git a/openshift/templates/knp/knp-diagram.drawio b/openshift/templates/knp/knp-diagram.drawio index 2a5f2086..a60a689e 100644 --- a/openshift/templates/knp/knp-diagram.drawio +++ b/openshift/templates/knp/knp-diagram.drawio @@ -1,88 +1,69 @@ - - - + + + - - - - + - - + + - - + + - + - - - - - - - - - - - - - - + + - - - - - + + - - + + - - - - - - - - + + - + - - + + - - + + - - + + - - + + - - + + + + + + + - - + + - - + + - - + + - - + +