From 4a2d3bcc73e51570d6a7ab63f17312e6bcb8b360 Mon Sep 17 00:00:00 2001 From: Robbie Blaine <4052340+rblaine95@users.noreply.github.com> Date: Fri, 7 Feb 2025 10:47:27 +0200 Subject: [PATCH] =?UTF-8?q?=E2=99=BB=EF=B8=8F=F0=9F=91=B7=20Refactor=20CIC?= =?UTF-8?q?D=20Pipeline=20(#1311)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Set `MISE_JOBS` to 4 when setting up Mise * Sort Docker Builds alphabetically * Move NATS chart from `tilt` to `helm` * Pin Redpanda Connect and update Postgres * Helmfile * JSON Serialize Logs * NATS Job Pod Labels * Toggleable PGAdmin * Configure PGAdmin Ingress * ReplicaCount 1 * Dev Test values * Increase PGPool Max Connections * Don't inject istio sidecar on PGAdmin * Add Init Containers to verify NATS is ready * Rename and deduplicate docker images * `d-cloud` -> `acapy-cloud` * `governance-web`, `multitenant-web`, `public-web`, `tenant-web` are now built once as `acapy-cloud/app` * `dockerfiles/fastapi` -> `dockerfiles/app` * CICD Build depends on Lint, Style, and Unit tests * Remove unneeded `style-check.yml` Workflow * Don't deploy to EKS if draft PR * Cache python venv * Rename `continuous-deploy.yml` to `cicd.yml` * Remove UV * We don't really use it for anything * Bump Metrics Server * `hyperledger/aries-cloudagent-python` -> `openwallet-foundation/acapy` * :memo: `aries-cloudapi` -> `acapy-cloud` * Remove Lint Job (Soon™ to be replaced with Ruff) And more that I've probably forgotten --- .github/actions/deploy-eks/action.yml | 104 ++ .github/actions/test-eks/action.yml | 291 +++++ .github/workflows/cicd.yml | 516 ++++++++ .github/workflows/continuous-deploy.yml | 1089 ----------------- .github/workflows/sonarcloud.yml | 24 +- .github/workflows/style-check.yml | 49 - .mise.toml | 10 +- .pylintrc | 2 + README.md | 4 +- Tiltfile | 6 +- docker-compose-ledger.yaml | 4 +- docker-compose.yaml | 8 +- dockerfiles/{fastapi => app}/Dockerfile | 0 helm/acapy-cloud.yaml.gotmpl | 203 +++ helm/acapy-cloud/README.md | 2 +- helm/acapy-cloud/conf/dev/connect-cloud.yaml | 50 + helm/acapy-cloud/conf/dev/endorser.yaml | 149 +++ .../conf/dev/governance-agent-pg-proxy.yaml | 3 + .../conf/dev/governance-agent.yaml | 251 ++++ helm/acapy-cloud/conf/dev/governance-web.yaml | 213 ++++ helm/acapy-cloud/conf/dev/ledger-browser.yaml | 86 ++ helm/acapy-cloud/conf/dev/mediator.yaml | 219 ++++ .../conf/dev/multitenant-agent-pg-proxy.yaml | 3 + .../conf/dev/multitenant-agent.yaml | 247 ++++ .../acapy-cloud/conf/dev/multitenant-web.yaml | 172 +++ helm/acapy-cloud/conf/dev/public-web.yaml | 181 +++ helm/acapy-cloud/conf/dev/tails-server.yaml | 71 ++ helm/acapy-cloud/conf/dev/tenant-web.yaml | 190 +++ helm/acapy-cloud/conf/dev/trust-registry.yaml | 140 +++ helm/acapy-cloud/conf/dev/waypoint.yaml | 157 +++ .../acapy-cloud/conf/local/connect-cloud.yaml | 50 + helm/acapy-cloud/conf/local/endorser.yaml | 25 +- .../conf/local/governance-agent-pg-proxy.yaml | 3 + .../conf/local/governance-agent.yaml | 21 +- .../conf/local/governance-web.yaml | 12 +- .../conf/local/ledger-browser.yaml | 2 +- .../local/multitenant-agent-pg-proxy.yaml | 3 + .../conf/local/multitenant-agent.yaml | 19 +- .../conf/local/multitenant-web.yaml | 12 +- helm/acapy-cloud/conf/local/public-web.yaml | 2 +- helm/acapy-cloud/conf/local/tails-server.yaml | 2 +- helm/acapy-cloud/conf/local/tenant-web.yaml | 2 +- .../conf/local/trust-registry.yaml | 2 +- helm/acapy-cloud/conf/local/waypoint.yaml | 19 +- helm/acapy-cloud/templates/NOTES.txt | 2 +- helm/acapy-cloud/values.yaml | 4 +- helm/acapy-test.yaml.gotmpl | 42 + helm/acapy-test/README.md | 2 +- helm/acapy-test/conf/dev/regression.yaml | 18 + helm/acapy-test/conf/dev/values.yaml | 43 + helm/acapy-test/conf/local/values.yaml | 2 +- helm/acapy-test/values.yaml | 2 +- helm/ledger-nodes/conf/dev/values.yaml | 26 + helm/ledger-nodes/values.yaml | 2 +- {tilt/acapy-cloud => helm}/nats/.helmignore | 0 {tilt/acapy-cloud => helm}/nats/Chart.lock | 0 {tilt/acapy-cloud => helm}/nats/Chart.yaml | 0 .../nats/templates/job.yaml | 4 + {tilt/acapy-cloud => helm}/nats/values.yaml | 2 +- .../cloud/streams/cloud-events.yaml | 4 +- .../cloud/streams/state-monitoring.yaml | 2 +- tilt/acapy-cloud/Tiltfile | 27 +- tilt/acapy-cloud/connect-cloud.yaml | 23 - tilt/acapy-cloud/nats/charts/.gitignore | 4 - tilt/acapy-cloud/pgadmin.yaml | 5 +- tilt/acapy-cloud/postgres.yaml | 1 + tilt/metrics/Tiltfile | 2 +- 67 files changed, 3580 insertions(+), 1255 deletions(-) create mode 100644 .github/actions/deploy-eks/action.yml create mode 100644 .github/actions/test-eks/action.yml create mode 100644 .github/workflows/cicd.yml delete mode 100644 .github/workflows/continuous-deploy.yml delete mode 100644 .github/workflows/style-check.yml rename dockerfiles/{fastapi => app}/Dockerfile (100%) create mode 100644 helm/acapy-cloud.yaml.gotmpl create mode 100644 helm/acapy-cloud/conf/dev/connect-cloud.yaml create mode 100644 helm/acapy-cloud/conf/dev/endorser.yaml create mode 100644 helm/acapy-cloud/conf/dev/governance-agent-pg-proxy.yaml create mode 100644 helm/acapy-cloud/conf/dev/governance-agent.yaml create mode 100644 helm/acapy-cloud/conf/dev/governance-web.yaml create mode 100644 helm/acapy-cloud/conf/dev/ledger-browser.yaml create mode 100644 helm/acapy-cloud/conf/dev/mediator.yaml create mode 100644 helm/acapy-cloud/conf/dev/multitenant-agent-pg-proxy.yaml create mode 100644 helm/acapy-cloud/conf/dev/multitenant-agent.yaml create mode 100644 helm/acapy-cloud/conf/dev/multitenant-web.yaml create mode 100644 helm/acapy-cloud/conf/dev/public-web.yaml create mode 100644 helm/acapy-cloud/conf/dev/tails-server.yaml create mode 100644 helm/acapy-cloud/conf/dev/tenant-web.yaml create mode 100644 helm/acapy-cloud/conf/dev/trust-registry.yaml create mode 100644 helm/acapy-cloud/conf/dev/waypoint.yaml create mode 100644 helm/acapy-cloud/conf/local/connect-cloud.yaml create mode 100644 helm/acapy-cloud/conf/local/governance-agent-pg-proxy.yaml create mode 100644 helm/acapy-cloud/conf/local/multitenant-agent-pg-proxy.yaml create mode 100644 helm/acapy-test.yaml.gotmpl create mode 100644 helm/acapy-test/conf/dev/regression.yaml create mode 100644 helm/acapy-test/conf/dev/values.yaml create mode 100644 helm/ledger-nodes/conf/dev/values.yaml rename {tilt/acapy-cloud => helm}/nats/.helmignore (100%) rename {tilt/acapy-cloud => helm}/nats/Chart.lock (100%) rename {tilt/acapy-cloud => helm}/nats/Chart.yaml (100%) rename {tilt/acapy-cloud => helm}/nats/templates/job.yaml (96%) rename {tilt/acapy-cloud => helm}/nats/values.yaml (99%) delete mode 100644 tilt/acapy-cloud/connect-cloud.yaml delete mode 100644 tilt/acapy-cloud/nats/charts/.gitignore diff --git a/.github/actions/deploy-eks/action.yml b/.github/actions/deploy-eks/action.yml new file mode 100644 index 0000000000..4856ff8c9c --- /dev/null +++ b/.github/actions/deploy-eks/action.yml @@ -0,0 +1,104 @@ +name: Deploy EKS +description: Deploy to EKS + +inputs: + aws-region: + description: "The AWS region where the EKS cluster is located" + required: true + aws-role-arn: + description: "The ARN of the AWS role to assume" + required: true + aws-role-session-name: + description: "The name of the AWS role session" + required: true + clean-start: + description: "Whether to clean up the EKS cluster before deploying" + required: false + default: "false" + cluster-name: + description: "The name of the EKS cluster" + required: true + environment: + description: "The environment to deploy to" + required: true + helm-version: + description: "The version of Helm to use" + required: false + default: v3.17.0 + helmfile-plugins: + description: "The Helmfile plugins to install" + required: false + default: https://github.com/databus23/helm-diff + helmfile-version: + description: "The version of Helmfile to use" + required: false + default: v0.170.1 + image-tag: + description: "The tag of the Docker image to deploy" + required: true + namespace: + description: "The Kubernetes namespace to deploy to" + required: true + +runs: + using: composite + + steps: + - name: Configure AWS credentials + # https://github.com/aws-actions/configure-aws-credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-region: ${{ inputs.aws-region }} + role-to-assume: ${{ inputs.aws-role-arn }} + role-session-name: ${{ inputs.aws-role-session-name }} + + - name: Update Kubeconfig + shell: bash + run: aws eks update-kubeconfig --name $CLUSTER_NAME --region $REGION + env: + CLUSTER_NAME: ${{ inputs.cluster-name }} + REGION: ${{ inputs.aws-region }} + + - name: Helmfile Destroy + if: inputs.clean-start == 'true' + # https://github.com/helmfile/helmfile-action + uses: helmfile/helmfile-action@v2.0.2 + with: + helmfile-args: | + destroy \ + --environment ${{ inputs.environment }} \ + -f ./helm/acapy-cloud.yaml.gotmpl \ + --state-values-set namespace=${{ inputs.namespace }} + helm-plugins: ${{ inputs.helmfile-plugins }} + helmfile-version: ${{ inputs.helmfile-version }} + helm-version: ${{ inputs.helm-version }} + + - name: Create Redpanda Connect Stream ConfigMap + shell: bash + # https://docs.redpanda.com/redpanda-connect/get-started/quickstarts/helm-chart/#run-multiple-pipelines-in-streams-mode + run: | + kubectl create configmap connect-cloud-streams \ + --from-file=./resources/connect-processors/cloud/streams/cloud-events.yaml \ + --from-file=./resources/connect-processors/cloud/streams/state-monitoring.yaml \ + --dry-run=client \ + -o yaml \ + -n $NAMESPACE | kubectl apply -f - + kubectl -n $NAMESPACE rollout restart deploy/connect-cloud || true + env: + NAMESPACE: ${{ inputs.namespace }} + + - name: Helmfile Apply + # https://github.com/helmfile/helmfile-action + uses: helmfile/helmfile-action@v2.0.2 + with: + helmfile-args: | + apply \ + --environment ${{ inputs.environment }} \ + -f ./helm/acapy-cloud.yaml.gotmpl \ + --state-values-set image.tag=${{ inputs.image-tag }} \ + --state-values-set image.registry=ghcr.io/${{ github.repository_owner }} \ + --state-values-set pgProxyEnabled=${{ inputs.clean-start == 'false' }} \ + --state-values-set namespace=${{ inputs.namespace }} + helm-plugins: ${{ inputs.helmfile-plugins }} + helmfile-version: ${{ inputs.helmfile-version }} + helm-version: ${{ inputs.helm-version }} diff --git a/.github/actions/test-eks/action.yml b/.github/actions/test-eks/action.yml new file mode 100644 index 0000000000..9ba3edd3c7 --- /dev/null +++ b/.github/actions/test-eks/action.yml @@ -0,0 +1,291 @@ +name: Test EKS +description: Run Tests against EKS + +inputs: + clean-start: + description: "Whether this is a clean start or not" + required: false + default: "false" + environment: + description: "The environment to deploy to" + required: true + helm-version: + description: "The version of Helm to use" + required: false + default: v3.17.0 + helmfile-plugins: + description: "The Helmfile plugins to install" + required: false + default: https://github.com/databus23/helm-diff + helmfile-version: + description: "The version of Helmfile to use" + required: false + default: v0.170.1 + image-tag: + description: "The tag of the Docker image to deploy" + required: true + namespace: + description: "The Kubernetes namespace to deploy to" + required: true + pytest-completions: + description: "How many completions to run" + required: false + default: "1" + run-regression-tests: + description: "Whether to run regression tests" + required: false + default: "true" + run-tests: + description: "Whether to run tests" + required: false + default: "true" + +runs: + using: composite + + steps: + - name: Helmfile run regression tests + if: inputs.run-regression-tests == 'true' + id: pytest-regression + # https://github.com/helmfile/helmfile-action + uses: helmfile/helmfile-action@v2.0.2 + with: + helmfile-args: | + apply \ + --environment ${{ inputs.environment }} \ + -f ./helm/acapy-test.yaml.gotmpl \ + --set image.tag=${{ inputs.image-tag }} \ + --set image.registry=ghcr.io/${{ github.repository_owner }} \ + --set completions=${{ inputs.pytest-completions }} \ + --state-values-set release=acapy-test-regression \ + --set fullnameOverride=acapy-test-regression \ + --set env.RUN_REGRESSION_TESTS=${{ inputs.run-regression-tests }} \ + --set env.FAIL_ON_RECREATING_FIXTURES=${{ inputs.clean-start != 'true' }} \ + --state-values-set regressionEnabled=${{ inputs.run-regression-tests }} \ + --state-values-set namespace=${{ inputs.namespace }} + helm-plugins: ${{ inputs.helmfile-plugins }} + helmfile-version: ${{ inputs.helmfile-version }} + helm-version: ${{ inputs.helm-version }} + + - name: Helmfile run pytest + if: inputs.run-tests != 'false' + id: pytest + # https://github.com/helmfile/helmfile-action + uses: helmfile/helmfile-action@v2.0.2 + with: + helmfile-args: | + apply \ + --environment ${{ inputs.environment }} \ + -f ./helm/acapy-test.yaml.gotmpl \ + --set image.tag=${{ inputs.image-tag }} \ + --set image.registry=ghcr.io/${{ github.repository_owner }} \ + --set completions=${{ inputs.pytest-completions }} \ + --state-values-set release=acapy-test \ + --set fullnameOverride=acapy-test \ + --state-values-set namespace=${{ inputs.namespace }} + helm-plugins: ${{ inputs.helmfile-plugins }} + helmfile-version: ${{ inputs.helmfile-version }} + helm-version: ${{ inputs.helm-version }} + + - name: Wait for pytest and print logs + if: steps.pytest.outcome == 'success' + shell: bash + run: | + while true; do + # Check if the job is complete or failed + COMPLETION_STATUS=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.status.succeeded}') + FAILURE_STATUS=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.status.failed}') + + if [ "$COMPLETION_STATUS" == "${PYTEST_COMPLETIONS}" ] || [ "$FAILURE_STATUS" == "1" ]; then + echo "Job $JOB_NAME has completed." + break + else + echo "Waiting for $JOB_NAME to complete..." + sleep 10 + fi + done + + # Get all pods for the job + pods=$(kubectl get pods -n $NAMESPACE --selector=job-name=$JOB_NAME -o jsonpath='{.items[*].metadata.name}') + + # Loop through the pods and get logs + for pod in $pods + do + echo "Logs for Pod: $pod" + kubectl logs -n $NAMESPACE $pod + done + + env: + JOB_NAME: acapy-test + NAMESPACE: ${{ inputs.namespace }} + PYTEST_COMPLETIONS: ${{ inputs.pytest-completions }} + + - name: Wait for pytest regression and print logs + if: steps.pytest-regression.outcome == 'success' + shell: bash + run: | + while true; do + # Check if the job is complete or failed + COMPLETION_STATUS=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.status.succeeded}') + FAILURE_STATUS=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.status.failed}') + + if [ "$COMPLETION_STATUS" == "${PYTEST_COMPLETIONS}" ] || [ "$FAILURE_STATUS" == "1" ]; then + echo "Job $JOB_NAME has completed." + break + else + echo "Waiting for $JOB_NAME to complete..." + sleep 10 + fi + done + + # Get all pods for the job + pods=$(kubectl get pods -n $NAMESPACE --selector=job-name=$JOB_NAME -o jsonpath='{.items[*].metadata.name}') + + # Loop through the pods and get logs + for pod in $pods + do + echo "Logs for Pod: $pod" + kubectl logs -n $NAMESPACE $pod + done + + env: + JOB_NAME: acapy-test-regression + NAMESPACE: ${{ inputs.namespace }} + PYTEST_COMPLETIONS: ${{ inputs.pytest-completions }} + + - name: Copy k8s pytest results + if: steps.pytest.outcome == 'success' || steps.pytest-regression.outcome == 'success' + shell: bash + run: | + echo "apiVersion: v1 + kind: Pod + metadata: + name: $POD_NAME + namespace: $NAMESPACE + labels: + sidecar.istio.io/inject: \"false\" + spec: + containers: + - name: $POD_NAME + image: $CONTAINER_IMAGE + command: [\"sleep\", \"3600\"] + volumeMounts: + - name: pytest-volume + mountPath: $MOUNT_PATH/pytest + - name: pytest-regression-volume + mountPath: $MOUNT_PATH/pytest-regression + volumes: + - name: pytest-volume + persistentVolumeClaim: + claimName: $PVC_NAME + - name: pytest-regression-volume + persistentVolumeClaim: + claimName: $PVC_NAME_REGRESSION + restartPolicy: Never" > pytest-results-pod.yaml + + kubectl apply -f pytest-results-pod.yaml + + # Wait for the pod to be ready + echo "Waiting for pod to be ready..." + kubectl -n $NAMESPACE wait --for=condition=ready pod/$POD_NAME --timeout=60s + + # Copy the files from the pod to your local system + echo "Copying files from pod..." + mkdir -p $LOCAL_PATH $LOCAL_PATH_REGRESSION + kubectl -n $NAMESPACE cp $POD_NAME:$MOUNT_PATH/pytest/$OUTPUT_FILE $LOCAL_PATH/$OUTPUT_FILE + kubectl -n $NAMESPACE cp $POD_NAME:$MOUNT_PATH/pytest/$COVERAGE_FILE $LOCAL_PATH/$COVERAGE_FILE + kubectl -n $NAMESPACE cp $POD_NAME:$MOUNT_PATH/pytest-regression/$OUTPUT_FILE $LOCAL_PATH_REGRESSION/$OUTPUT_FILE + kubectl -n $NAMESPACE cp $POD_NAME:$MOUNT_PATH/pytest-regression/$COVERAGE_FILE $LOCAL_PATH_REGRESSION/$COVERAGE_FILE + + # Clean up: delete the temporary pod + echo "Cleaning up..." + kubectl -n $NAMESPACE delete pod $POD_NAME + + echo "Done!" + env: + PVC_NAME: acapy-test + PVC_NAME_REGRESSION: acapy-test-regression + POD_NAME: pytest-results-pod + CONTAINER_IMAGE: busybox + MOUNT_PATH: /mnt + LOCAL_PATH: ./pytest + LOCAL_PATH_REGRESSION: ./pytest-regression + NAMESPACE: ${{ inputs.namespace }} + OUTPUT_FILE: test_output.xml + COVERAGE_FILE: test_coverage.txt + + - name: Pytest coverage comment + if: steps.pytest.outcome == 'success' + # https://github.com/MishaKav/pytest-coverage-comment + uses: MishaKav/pytest-coverage-comment@v1.1.53 + with: + pytest-coverage-path: ./pytest/test_coverage.txt + junitxml-path: ./pytest/test_output.xml + create-new-comment: true + title: "K8s Test Coverage" + # Resolves `Warning: Your comment is too long (maximum is 65536 characters), coverage report will not be added.` + hide-report: ${{ github.event_name != 'pull_request' }} + hide-comment: ${{ github.event_name != 'pull_request' }} + + - name: Pytest regression coverage comment + if: steps.pytest-regression.outcome == 'success' + # https://github.com/MishaKav/pytest-coverage-comment + uses: MishaKav/pytest-coverage-comment@v1.1.53 + with: + pytest-coverage-path: ./pytest-regression/test_coverage.txt + junitxml-path: ./pytest-regression/test_output.xml + create-new-comment: true + title: "K8s Regression Test Coverage" + # Resolves `Warning: Your comment is too long (maximum is 65536 characters), coverage report will not be added.` + hide-report: ${{ github.event_name != 'pull_request' }} + hide-comment: ${{ github.event_name != 'pull_request' }} + + - name: Publish Pytest Report + # https://github.com/mikepenz/action-junit-report + uses: mikepenz/action-junit-report@v5 + if: steps.pytest.outcome == 'success' + with: + check_name: JUnit Test Report + report_paths: "./pytest/test_output.xml" + fail_on_failure: true + detailed_summary: true + require_passed_tests: true + + - name: Publish Pytest Regression Report + uses: mikepenz/action-junit-report@v5 + if: steps.pytest-regression.outcome == 'success' + with: + check_name: JUnit Test Report Regression + report_paths: "./pytest-regression/test_output.xml" + fail_on_failure: true + detailed_summary: true + require_passed_tests: true + + - name: Helmfile destroy pytest + # https://github.com/helmfile/helmfile-action + uses: helmfile/helmfile-action@v2.0.2 + if: always() + with: + helmfile-args: | + destroy \ + --environment ${{ inputs.environment }} \ + -f ./helm/acapy-test.yaml.gotmpl \ + --state-values-set release=acapy-test + helm-plugins: ${{ inputs.helmfile-plugins }} + helmfile-version: ${{ inputs.helmfile-version }} + helm-version: ${{ inputs.helm-version }} + + - name: Helmfile destroy pytest regression + # https://github.com/helmfile/helmfile-action + uses: helmfile/helmfile-action@v2.0.2 + if: always() + with: + helmfile-args: | + destroy \ + --environment ${{ inputs.environment }} \ + -f ./helm/acapy-test.yaml.gotmpl \ + --state-values-set release=acapy-test-regression \ + --state-values-set namespace=${{ inputs.namespace }} + helm-plugins: ${{ inputs.helmfile-plugins }} + helmfile-version: ${{ inputs.helmfile-version }} + helm-version: ${{ inputs.helm-version }} diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml new file mode 100644 index 0000000000..264ecb3492 --- /dev/null +++ b/.github/workflows/cicd.yml @@ -0,0 +1,516 @@ +name: CICD + +on: + workflow_dispatch: + inputs: + run-reset-deployments: + description: Reset deployment - Clean start + required: false + default: false + type: boolean + run-tests: + description: Run tests step + required: false + default: true + type: boolean + run-regression-tests: + description: Run regression tests step + required: false + default: true + type: boolean + + push: + branches: + - master + tags: + - "v*" + paths: + - "**" + - "!docs/**" # Ignore changes in the docs folder + - "!**.md" # Ignore changes to any markdown file + pull_request: + branches: + - master + types: + - opened + - reopened + - synchronize + - ready_for_review + paths: + - "**" + - "!docs/**" # Ignore changes in the docs folder + - "!**.md" # Ignore changes to any markdown file + +permissions: {} + +env: + TAILSCALE_VERSION: 1.80.0 + HELMFILE_VERSION: v0.170.1 + HELM_VERSION: v3.17.0 + MISE_VERSION: 2025.2.1 + +jobs: + format: + name: Format + runs-on: ubuntu-latest + + concurrency: + group: format-check-${{ github.ref_name }} + cancel-in-progress: true + + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Overwrite .mise.toml # It's not needed in this workflow + run: | + cat < .mise.toml + [tools] + "pipx:black" = "25.1" + "pipx:isort" = "6.0" + python = "3.12" + EOF + + - name: Set up Mise + uses: jdx/mise-action@v2 + with: + version: ${{ env.MISE_VERSION }} + cache: true + experimental: true + install: true + + - name: Check import style with isort + run: isort . --check --profile black --diff + + - name: Check code style with Black + run: black . --check --diff + + - name: Check Tiltfiles with Black + run: | + find . -type f -name "Tiltfile" | while read -r file; do + black --check --diff "$file" + done + + unit: + name: Unit Tests + runs-on: ubuntu-latest + + concurrency: + group: unit-test-${{ github.ref_name }} + cancel-in-progress: true + + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Cache Python venv + uses: actions/cache@v4 + with: + path: .venv + key: python-${{ hashFiles('**/poetry.lock', '.mise.toml') }} + + - name: Set up Mise + uses: jdx/mise-action@v2 + with: + version: ${{ env.MISE_VERSION }} + cache: true + experimental: true + install: true + env: + MISE_JOBS: 4 + - name: Load Mise env + run: | + mise env -s bash \ + | grep -v 'export PATH=' \ + | cut -d' ' -f2 \ + >> "$GITHUB_ENV" + + - name: Run unit tests + run: mise run tests:unit + build: + name: Build + permissions: + packages: write # To push to GHCR.io + runs-on: ubuntu-latest + + needs: + - format + - unit + + concurrency: + group: docker-build-${{ matrix.image }}-${{ github.ref_name }} + cancel-in-progress: true + + outputs: + image_version: ${{ steps.meta.outputs.version }} + + strategy: + fail-fast: false + matrix: + image: + - acapy-cloud/app + - acapy-cloud/endorser + - acapy-cloud/governance-agent + - acapy-cloud/ledger-nodes + - acapy-cloud/multitenant-agent + - acapy-cloud/pytest + - acapy-cloud/tails-server + - acapy-cloud/trust-registry + - acapy-cloud/waypoint + include: + - image: acapy-cloud/app + context: . + file: dockerfiles/app/Dockerfile + platforms: linux/amd64,linux/arm64 + - image: acapy-cloud/endorser + context: . + file: dockerfiles/endorser/Dockerfile + platforms: linux/amd64,linux/arm64 + - image: acapy-cloud/governance-agent + context: . + file: dockerfiles/agents/Dockerfile.agent + platforms: linux/amd64 # Pending BBS - linux/arm64 + - image: acapy-cloud/ledger-nodes + context: https://github.com/bcgov/von-network.git#v1.8.0 + file: Dockerfile + platforms: linux/amd64 + - image: acapy-cloud/multitenant-agent + context: . + file: dockerfiles/agents/Dockerfile.author.agent + platforms: linux/amd64 # Pending BBS - linux/arm64 + - image: acapy-cloud/pytest + context: . + file: dockerfiles/tests/Dockerfile + platforms: linux/amd64,linux/arm64 + - image: acapy-cloud/tails-server + context: https://github.com/bcgov/indy-tails-server.git#v1.1.0 + file: docker/Dockerfile.tails-server + platforms: linux/amd64,linux/arm64 + - image: acapy-cloud/trust-registry + context: . + file: dockerfiles/trustregistry/Dockerfile + platforms: linux/amd64,linux/arm64 + - image: acapy-cloud/waypoint + context: . + file: dockerfiles/waypoint/Dockerfile + platforms: linux/amd64,linux/arm64 + + steps: + - name: Check out code + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + cache-binary: false + + - uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ github.token }} + + - name: Docker Metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ghcr.io/${{ github.repository_owner }}/${{ matrix.image }} + tags: | + type=raw,value=latest,enable=${{ github.event.repository.default_branch == github.ref_name }} + type=sha,prefix=pr-${{ github.event.pull_request.number }}-,priority=601,enable=${{ github.event_name == 'pull_request' }} + type=sha,prefix={{branch}}-,priority=601,enable=${{ github.event_name == 'push' && github.ref_type == 'branch' }} + type=ref,event=branch,priority=600 + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + + - name: Build and push Docker images + uses: docker/build-push-action@v6 + with: + context: ${{ matrix.context }} + file: ${{ matrix.file }} + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: | + type=gha,scope=build-${{ matrix.image }} + type=registry,ref=ghcr.io/${{ github.repository_owner }}/${{ matrix.image }}:latest + cache-to: type=gha,mode=max,scope=build-${{ matrix.image }} + platforms: ${{ matrix.platforms }} + + test: + name: Local Test + needs: + - build + runs-on: ubuntu-latest + + concurrency: + group: local-test-${{ github.ref_name }} + cancel-in-progress: true + + outputs: + test_success: ${{ steps.test.outputs.test_success }} + + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Cache Python venv + uses: actions/cache@v4 + with: + path: .venv + key: python-${{ hashFiles('**/poetry.lock', '.mise.toml') }} + + - name: Set up Mise + uses: jdx/mise-action@v2 + with: + version: ${{ env.MISE_VERSION }} + cache: true + experimental: true # Required for mise tasks + install: true + env: + MISE_JOBS: 4 + - name: Load Mise env + run: | + mise env -s bash \ + | grep -v 'export PATH=' \ + | cut -d' ' -f2 \ + >> "$GITHUB_ENV" + - name: Install dependencies with Poetry + run: mise run poetry:install + env: + MISE_JOBS: 1 + + - name: Start Test Harness + run: mise run tilt:ci + shell: bash + env: + REGISTRY: ghcr.io/${{ github.repository_owner }} + IMAGE_TAG: ${{ needs.build.outputs.image_version }} + + - name: Test with pytest + id: test + run: | + source .venv/bin/activate + set +e + + cp .env.example .env + source .env + + # Any portforwards will not be active after `tilt ci` has exited. + kubectl port-forward svc/ledger-browser 9000:8000 -n cloudapi & + + poetry run pytest \ + --numprocesses 4 \ + --dist loadgroup \ + --durations=0 \ + --ignore ./tilt \ + --cov | tee test_output.txt + + EXIT_CODE=${PIPESTATUS[0]} + set -e + echo "Exit code: $EXIT_CODE" + + # very hacky way to get around the fact that teardown fails even if tests pass + TEARDOWN_ERROR=false + SINGLE_ERROR=false + TEST_FAILURES=0 + + if grep -q "ERROR at teardown" test_output.txt; then + echo "ERROR at teardown" + TEARDOWN_ERROR=true + fi + + if grep -q ", 1 error in" test_output.txt; then + echo "Only 1 error total" + SINGLE_ERROR=true + fi + + # Count the number of test failures + TEST_FAILURES=$(grep -c "^FAILED" test_output.txt || true) + echo "Number of test failures: $TEST_FAILURES" + + if [ "$TEARDOWN_ERROR" = true ] && [ "$SINGLE_ERROR" = true ] && [ "$TEST_FAILURES" -eq 0 ]; then + echo "Tests passed with teardown error" + exit 0 + else + if [ "$EXIT_CODE" -ne 0 ]; then + echo "test_success=false" >> $GITHUB_OUTPUT + else + echo "test_success=true" >> $GITHUB_OUTPUT + fi + exit $EXIT_CODE + fi + + - name: Install coverage and generate report + run: | + source .venv/bin/activate + pip install coverage + coverage report + coverage xml + sudo rm -rf tilt/docker + - name: Upload coverage to Codacy + run: bash <(curl -Ls https://coverage.codacy.com/get.sh) report -r coverage.xml + env: + CODACY_PROJECT_TOKEN: ${{ secrets.CODACY_PROJECT_TOKEN }} + - name: Upload coverage file as artifact + uses: actions/upload-artifact@v4 + with: + name: coverage + path: .coverage + include-hidden-files: true + + - name: Get Docker Containers + if: always() + run: docker ps -a + - name: Get Pods + if: always() + run: kubectl get pods --all-namespaces + - name: Get Helm Releases + if: always() + run: helm list --all-namespaces + - name: Connect Cloud Logs + # Connect Cloud can generate a lot of logs... + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=connect-cloud --tail 10000 + - name: Docker Cache Logs + if: always() + run: docker logs docker-cache + - name: Endorser Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=endorser --tail 10000 + - name: Governance Agent Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=governance-agent --tail 10000 + - name: Governance Web Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=governance-web --tail 10000 + - name: Ingress Nginx Logs + if: always() + run: kubectl logs -n ingress-system -l app.kubernetes.io/instance=ingress-nginx --tail 10000 + - name: Ledger Browser Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=ledger-browser --tail 10000 + - name: Ledger Nodes Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=ledger-nodes --tail 10000 + - name: Mediator Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=mediator --tail 10000 + - name: Multitenant Agent Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=multitenant-agent --tail 10000 + - name: Multitenant Web Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=multitenant-web --tail 10000 + - name: NATS Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=nats --tail 10000 + - name: PGPool Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=postgres,app.kubernetes.io/component=pgpool --tail 10000 + - name: PostgreSQL Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=postgres,app.kubernetes.io/component=postgresql --tail 10000 + - name: Public Web Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=public-web --tail 10000 + - name: Tails Server Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=tails-server --tail 10000 + - name: Tenant Web Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=tenant-web --tail 10000 + - name: Trust Registry Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=trust-registry --tail 10000 + - name: Waypoint Logs + if: always() + run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=waypoint --tail 10000 + + - name: Tilt Down Destroy + if: always() + run: mise run tilt:down:destroy + + deploy-test-eks: + if: github.actor != 'dependabot[bot]' && github.event.pull_request.draft == false + name: Deploy and Test EKS + runs-on: ubuntu-latest + + environment: + name: dev + + needs: + - build + + permissions: + id-token: write # Required to authenticate with AWS + checks: write # Required for action-junit-report + pull-requests: write # Required to comment on PRs for Pytest coverage comment + + concurrency: + group: deploy-test-eks + cancel-in-progress: false + + timeout-minutes: 60 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Set up Mise + uses: jdx/mise-action@v2 + with: + version: ${{ env.MISE_VERSION }} + cache: true + experimental: true # Required for mise tasks + install: true + env: + MISE_JOBS: 4 + - name: Load Mise env + run: | + mise env -s bash \ + | grep -v 'export PATH=' \ + | cut -d' ' -f2 \ + >> "$GITHUB_ENV" + + - uses: tailscale/github-action@main + with: + authkey: ${{ secrets.TAILSCALE_AUTHKEY }} + version: ${{ env.TAILSCALE_VERSION }} + + - name: Deploy to EKS + uses: ./.github/actions/deploy-eks + with: + aws-region: af-south-1 + aws-role-arn: arn:aws:iam::402177810328:role/cicd + aws-role-session-name: github-cicd + clean-start: ${{ github.event.inputs.run-reset-deployments || false }} + cluster-name: cloudapi-dev + environment: ${{ vars.ENVIRONMENT }} + helm-version: ${{ env.HELM_VERSION }} + helmfile-plugins: https://github.com/databus23/helm-diff + helmfile-version: ${{ env.HELMFILE_VERSION }} + image-tag: ${{ needs.build.outputs.image_version }} + namespace: acapy-cloud-dev + + - name: Run Tests + uses: ./.github/actions/test-eks + with: + clean-start: ${{ github.event.inputs.run-reset-deployments || false }} + environment: ${{ vars.ENVIRONMENT }} + helm-version: ${{ env.HELM_VERSION }} + helmfile-plugins: https://github.com/databus23/helm-diff + helmfile-version: ${{ env.HELMFILE_VERSION }} + image-tag: ${{ needs.build.outputs.image_version }} + namespace: acapy-cloud-dev + pytest-completions: 1 + run-regression-tests: ${{ github.event.inputs.run-regression-tests || true }} + run-tests: ${{ github.event.inputs.run-tests || true }} diff --git a/.github/workflows/continuous-deploy.yml b/.github/workflows/continuous-deploy.yml deleted file mode 100644 index 31aa65949c..0000000000 --- a/.github/workflows/continuous-deploy.yml +++ /dev/null @@ -1,1089 +0,0 @@ -name: CICD - -on: - workflow_dispatch: - inputs: - run-reset-deployments: - description: Reset deployment - Clean start - required: false - default: false - type: boolean - run-tests: - description: Run tests step - required: false - default: true - type: boolean - run-regression-tests: - description: Run regression tests step - required: false - default: true - type: boolean - - push: - branches: - - master - tags: - - "v*" - paths: - - "**" - - "!docs/**" # Ignore changes in the docs folder - - "!**.md" # Ignore changes to any markdown file - pull_request: - branches: - - master - types: - - opened - - reopened - - synchronize - - ready_for_review - paths: - - "**" - - "!docs/**" # Ignore changes in the docs folder - - "!**.md" # Ignore changes to any markdown file - -concurrency: - group: cicd - cancel-in-progress: false - -permissions: {} - -env: - TAILSCALE_VERSION: 1.80.0 - HELMFILE_VERSION: v0.170.1 - HELM_VERSION: v3.17.0 - MISE_VERSION: 2025.2.0 - -jobs: - build: - if: github.event.pull_request.draft == false - name: Build - permissions: - packages: write # To push to GHCR.io - runs-on: ubuntu-latest - - outputs: - image_version: ${{ steps.meta.outputs.version }} - - strategy: - fail-fast: false - matrix: - image: - [ - d-cloud/ledger-nodes, - d-cloud/ledger-browser, - d-cloud/tails-server, - d-cloud/governance-agent, - d-cloud/trust-registry, - d-cloud/multitenant-web, - d-cloud/governance-web, - d-cloud/tenant-web, - d-cloud/public-web, - d-cloud/multitenant-agent, - d-cloud/endorser, - d-cloud/pytest, - d-cloud/waypoint, - ] - include: - - image: d-cloud/governance-agent - context: . - file: dockerfiles/agents/Dockerfile.agent - platforms: linux/amd64 # Pending BBS - linux/arm64 - - image: d-cloud/trust-registry - context: . - file: dockerfiles/trustregistry/Dockerfile - platforms: linux/amd64,linux/arm64 - - image: d-cloud/multitenant-web - context: . - file: dockerfiles/fastapi/Dockerfile - platforms: linux/amd64,linux/arm64 - - image: d-cloud/governance-web - context: . - file: dockerfiles/fastapi/Dockerfile - platforms: linux/amd64,linux/arm64 - - image: d-cloud/tenant-web - context: . - file: dockerfiles/fastapi/Dockerfile - platforms: linux/amd64,linux/arm64 - - image: d-cloud/public-web - context: . - file: dockerfiles/fastapi/Dockerfile - platforms: linux/amd64,linux/arm64 - - image: d-cloud/multitenant-agent - context: . - file: dockerfiles/agents/Dockerfile.author.agent - platforms: linux/amd64 # Pending BBS - linux/arm64 - - image: d-cloud/ledger-browser - context: https://github.com/bcgov/von-network.git#v1.8.0 - file: Dockerfile - platforms: linux/amd64 - - image: d-cloud/ledger-nodes - context: https://github.com/bcgov/von-network.git#v1.8.0 - file: Dockerfile - platforms: linux/amd64 - - image: d-cloud/endorser - context: . - file: dockerfiles/endorser/Dockerfile - platforms: linux/amd64,linux/arm64 - - image: d-cloud/tails-server - context: https://github.com/bcgov/indy-tails-server.git#v1.1.0 - file: docker/Dockerfile.tails-server - platforms: linux/amd64,linux/arm64 - - image: d-cloud/pytest - context: . - file: dockerfiles/tests/Dockerfile - platforms: linux/amd64,linux/arm64 - - image: d-cloud/waypoint - context: . - file: dockerfiles/waypoint/Dockerfile - platforms: linux/amd64,linux/arm64 - - steps: - - name: Check out code - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - with: - cache-binary: false - - - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ github.token }} - - - name: Docker Metadata - id: meta - uses: docker/metadata-action@v5 - with: - images: ghcr.io/${{ github.repository_owner }}/${{ matrix.image }} - tags: | - type=raw,value=latest,enable=${{ github.event.repository.default_branch == github.ref_name }} - type=sha,prefix=pr-${{ github.event.pull_request.number }}-,priority=601,enable=${{ github.event_name == 'pull_request' }} - type=sha,prefix={{branch}}-,priority=601,enable=${{ github.event_name == 'push' && github.ref_type == 'branch' }} - type=ref,event=branch,priority=600 - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern={{major}}.{{minor}} - - - name: Build and push Docker images - uses: docker/build-push-action@v6 - with: - context: ${{ matrix.context }} - file: ${{ matrix.file }} - push: true - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - cache-from: | - type=gha,scope=build-${{ matrix.image }} - type=registry,ref=ghcr.io/${{ github.repository_owner }}/${{ matrix.image }}:latest - cache-to: type=gha,mode=max,scope=build-${{ matrix.image }} - platforms: ${{ matrix.platforms }} - - lint: - name: Lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - persist-credentials: false - - name: Set up Mise - uses: jdx/mise-action@v2 - with: - version: ${{ env.MISE_VERSION }} - cache: true - experimental: true # Required for mise tasks - install: true - - name: Load Mise env - run: | - mise env -s bash \ - | grep -v 'export PATH=' \ - | cut -d' ' -f2 \ - >> "$GITHUB_ENV" - - name: Install dependencies with Poetry - run: mise run poetry:install - - name: Run Pylint - run: | - poetry run pylint app/ endorser/ shared/ trustregistry/ waypoint/ --rcfile=.pylintrc -r n --msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}" --exit-zero > pylintreport.txt - - test: - if: github.event.pull_request.draft == false - name: Test - needs: - - build - runs-on: ubuntu-latest - outputs: - test_success: ${{ steps.test.outputs.test_success }} - strategy: - fail-fast: true - matrix: - module: - - { test: "app/tests/ endorser trustregistry waypoint --ignore=app/tests/e2e/", id: "1" } # fast tests, ignore e2e - - { test: "app/tests/e2e/issuer/", id: "2" } # then some individual, slower e2e tests - - { test: "app/tests/e2e/verifier/", id: "3" } - - { test: "app/tests/e2e/test_definitions.py app/tests/e2e/test_revocation.py", id: "4" } - - { test: "app/tests/e2e/ \ - --ignore=app/tests/e2e/issuer/ \ - --ignore=app/tests/e2e/verifier/ \ - --ignore=app/tests/e2e/test_definitions.py \ - --ignore=app/tests/e2e/test_revocation.py", id: "5" } # all other e2e tests - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - persist-credentials: false - - name: Set up Mise - uses: jdx/mise-action@v2 - with: - version: ${{ env.MISE_VERSION }} - cache: true - experimental: true # Required for mise tasks - install: true - - name: Load Mise env - run: | - mise env -s bash \ - | grep -v 'export PATH=' \ - | cut -d' ' -f2 \ - >> "$GITHUB_ENV" - - name: Install dependencies with Poetry - run: mise run poetry:install - - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ github.token }} - - name: Start Test Harness - run: mise run tilt:ci - shell: bash - env: - REGISTRY: ghcr.io/${{ github.repository_owner }} - IMAGE_TAG: ${{ needs.build.outputs.image_version }} - - name: Test with pytest - id: test - - run: | - source .venv/bin/activate - set +e - - cp .env.example .env - source .env - - # Any portforwards will not be active after `tilt ci` has exited. - kubectl port-forward svc/ledger-browser 9000:8000 -n cloudapi & - - poetry run pytest --durations=0 ${{ matrix.module.test }} --cov | tee test_output.txt - EXIT_CODE=${PIPESTATUS[0]} - set -e - echo "Exit code: $EXIT_CODE" - - mkdir -p coverage-files - mv .coverage coverage-files/.coverage.${{ matrix.module.id }} - - # very hacky way to get around the fact that teardown fails even if tests pass - TEARDOWN_ERROR=false - SINGLE_ERROR=false - TEST_FAILURES=0 - - if grep -q "ERROR at teardown" test_output.txt; then - echo "ERROR at teardown" - TEARDOWN_ERROR=true - fi - - if grep -q ", 1 error in" test_output.txt; then - echo "Only 1 error total" - SINGLE_ERROR=true - fi - - # Count the number of test failures - TEST_FAILURES=$(grep -c "^FAILED" test_output.txt || true) - echo "Number of test failures: $TEST_FAILURES" - - if [ "$TEARDOWN_ERROR" = true ] && [ "$SINGLE_ERROR" = true ] && [ "$TEST_FAILURES" -eq 0 ]; then - echo "Tests passed with teardown error" - exit 0 - else - if [ "$EXIT_CODE" -ne 0 ]; then - echo "test_success=false" >> $GITHUB_OUTPUT - else - echo "test_success=true" >> $GITHUB_OUTPUT - fi - exit $EXIT_CODE - fi - pwd - - name: Upload .coverage files as artifact - uses: actions/upload-artifact@v4 - with: - name: coverage-files-${{ matrix.module.id }} - path: coverage-files/.coverage.${{ matrix.module.id }} - include-hidden-files: true - - - name: Get Docker Containers - if: always() - run: docker ps -a - - name: Get Pods - if: always() - run: kubectl get pods --all-namespaces - # Connect Cloud generates a lot of logs... - - name: Connect Cloud Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=connect-cloud --tail 10000 - - name: Docker Cache Logs - if: always() - run: docker logs docker-cache - - name: Endorser Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=endorser --tail 10000 - - name: Governance Agent Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=governance-agent --tail 10000 - - name: Governance Web Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=governance-web --tail 10000 - - name: Ingress Nginx Logs - if: always() - run: kubectl logs -n ingress-system -l app.kubernetes.io/instance=ingress-nginx --tail 10000 - - name: Ledger Browser Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=ledger-browser --tail 10000 - - name: Ledger Nodes Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=ledger-nodes --tail 10000 - - name: Mediator Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=mediator --tail 10000 - - name: Multitenant Agent Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=multitenant-agent --tail 10000 - - name: Multitenant Web Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=multitenant-web --tail 10000 - - name: NATS Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=nats --tail 10000 - - name: PGPool Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=postgres,app.kubernetes.io/component=pgpool --tail 10000 - - name: PostgreSQL Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=postgres,app.kubernetes.io/component=postgresql --tail 10000 - - name: Public Web Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=public-web --tail 10000 - - name: Tails Server Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=tails-server --tail 10000 - - name: Tenant Web Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=tenant-web --tail 10000 - - name: Trust Registry Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=trust-registry --tail 10000 - - name: Waypoint Logs - if: always() - run: kubectl logs -n cloudapi -l app.kubernetes.io/instance=waypoint --tail 10000 - - - name: Tilt Down Destroy - if: always() - run: mise run tilt:down:destroy - - status-check: - name: Status Check - runs-on: ubuntu-latest - needs: test - if: always() - steps: - - name: Check if any test failed - run: exit 1 - if: needs.test.outputs.test_success == 'false' - - combine-coverage: - if: github.event.pull_request.draft == false - name: Coverage - runs-on: ubuntu-latest - needs: status-check - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - persist-credentials: false - - name: Install dependencies - run: pip install coverage - - name: Download all .coverage artifacts - uses: actions/download-artifact@v4 - with: - path: coverage-files - pattern: "coverage-*" - - name: Move coverage files to top-level directory - run: | - for dir in coverage-files/coverage-files-*; do - mv "$dir"/.coverage.* . - done - - name: Combine coverage files - run: | - coverage combine - coverage report - - name: Generate XML coverage report - run: | - coverage xml - - name: Upload coverage to Codacy - run: bash <(curl -Ls https://coverage.codacy.com/get.sh) report -r coverage.xml - env: - CODACY_PROJECT_TOKEN: ${{ secrets.CODACY_PROJECT_TOKEN }} - - deploy: - if: github.actor != 'dependabot[bot]' && github.event.pull_request.draft == false - name: Deploy to EKS - environment: - name: dev - needs: - - build - permissions: - id-token: write # Required to authenticate with AWS - checks: write # Required for action-junit-report - pull-requests: write # Required to comment on PRs for Pytest coverage comment - runs-on: ubuntu-latest - - timeout-minutes: 30 - - outputs: - output: ${{ steps.updated_deployments.outputs.success }} - - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Set up Mise - uses: jdx/mise-action@v2 - with: - version: ${{ env.MISE_VERSION }} - cache: true - experimental: true # Required for mise tasks - install: true - - name: Load Mise env - run: | - mise env -s bash \ - | grep -v 'export PATH=' \ - | cut -d' ' -f2 \ - >> "$GITHUB_ENV" - - - name: Authenticate GitHub CLI - run: echo "${{ secrets.CHARTS_PAT }}" | gh auth login --with-token - - - name: Set branch name - id: set_branch_name - run: | - if [[ "${{ github.event_name }}" == "pull_request" ]]; then - echo "BRANCH_NAME=${HEAD_REF}" >> $GITHUB_ENV - else - echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/} | cut -d/ -f2-)" >> $GITHUB_ENV - fi - env: - HEAD_REF: ${{ github.head_ref }} - - - name: Check if branch exists - id: check_branch - run: | - DEFAULT_BRANCH=master - SANITIZED_BRANCH_NAME=$(echo "$BRANCH_NAME" | sed 's/\//%2F/g') - if gh api "repos/didx-xyz/charts/git/ref/heads/$SANITIZED_BRANCH_NAME" &> /dev/null; then - echo "branch_exists=true" >> $GITHUB_ENV - echo "branch_name=$BRANCH_NAME" >> $GITHUB_ENV - else - echo "branch_exists=false" >> $GITHUB_ENV - echo "branch_name=$DEFAULT_BRANCH" >> $GITHUB_ENV - fi - - - name: Checkout Charts - uses: actions/checkout@v4 - with: - repository: didx-xyz/charts - token: ${{ secrets.CHARTS_PAT }} - path: charts - ref: ${{ env.branch_name }} - persist-credentials: false - - - name: Install dependencies - run: sudo apt-get install -y postgresql-client - - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v4 - with: - aws-region: af-south-1 - role-to-assume: arn:aws:iam::402177810328:role/cicd - role-session-name: github-cicd - - - name: Update Kubeconfig - run: aws eks update-kubeconfig --name cloudapi-dev - - - uses: tailscale/github-action@main - with: - authkey: ${{ secrets.TAILSCALE_AUTHKEY }} - version: ${{ env.TAILSCALE_VERSION }} - - - name: Helmfile Destroy - id: destroy_deployments - if: github.event.inputs.run-reset-deployments == 'true' - uses: helmfile/helmfile-action@v2.0.2 - with: - helmfile-args: | - destroy \ - --environment ${{ vars.ENVIRONMENT }} \ - -f ./charts/helmfiles/aries-cloudapi-python.yaml.gotmpl - helm-plugins: | - https://github.com/databus23/helm-diff - helmfile-version: ${{ env.HELMFILE_VERSION }} - helm-version: ${{ env.HELM_VERSION }} - env: - IMAGE_TAG: ${{ needs.build.outputs.image_version }} - - - name: Drop DBs - id: drop_dbs - if: github.event.inputs.run-reset-deployments == 'true' - env: - DB_PASSWORD: ${{ secrets.DB_PASSWORD }} - DB_HOST: ${{ secrets.DB_HOST }} - DB_USER: ${{ secrets.DB_USER}} - DB_PORT: ${{ secrets.DB_PORT }} - DB_EXCLUDE: ${{ secrets.DB_EXCLUDE }} - GA_ACAPY_WALLET_NAME: ${{ secrets.GA_ACAPY_WALLET_NAME }} - MT_ACAPY_WALLET_NAME: ${{ secrets.MT_ACAPY_WALLET_NAME }} - TRUST_REGISTRY_DB_OWNER: ${{ secrets.TRUST_REGISTRY_DB_OWNER }} - run: | - bash ./scripts/aurora-delete.sh -o $GA_ACAPY_WALLET_NAME -d - bash ./scripts/aurora-delete.sh -o $MT_ACAPY_WALLET_NAME -d - bash ./scripts/aurora-delete.sh -o $TRUST_REGISTRY_DB_OWNER -d - bash ./scripts/aurora-delete.sh -o $TRUST_REGISTRY_DB_OWNER -c - - - name: Reset NATS - if: github.event.inputs.run-reset-deployments == 'true' - env: - NAMESPACE: dev-cloudapi - run: | - kubectl get secret \ - -n ${NAMESPACE} \ - ${{ secrets.NATS_SECRET_NAME }} \ - -o jsonpath='{.data.cloudapi-nats-admin\.creds}' \ - | base64 -d > nats.creds - - nats --creds ./nats.creds \ - --server ${{ secrets.NATS_URL }} \ - stream rm cloudapi_aries_events -f - nats --creds ./nats.creds \ - --server ${{ secrets.NATS_URL }} \ - stream rm acapy_events -f - nats --creds ./nats.creds \ - --server ${{ secrets.NATS_URL }} \ - stream rm cloudapi_aries_state_monitoring -f - - nats --creds ./nats.creds \ - --server ${{ secrets.NATS_URL }} \ - stream add cloudapi_aries_events --subjects "cloudapi.aries.events.*.*" \ - --defaults \ - --storage file \ - --replicas 3 \ - --compression s2 - - nats --creds ./nats.creds \ - --server ${{ secrets.NATS_URL }} \ - stream add acapy_events --subjects "acapy.>" \ - --defaults \ - --storage file \ - --replicas 3 \ - --compression s2 - - nats --creds ./nats.creds \ - --server ${{ secrets.NATS_URL }} \ - stream add cloudapi_aries_state_monitoring --subjects "cloudapi.aries.state_monitoring.*.*.>" \ - --defaults \ - --storage file \ - --replicas 3 \ - --compression s2 \ - --retention limits \ - --discard old \ - --max-age 1m \ - --dupe-window 1m \ - --max-msgs-per-subject 1000 - - rm -f ./nats.creds - - - name: Helmfile Apply # Apply default Helmfile (without RDS proxy) when resetting deployments. - if: github.event.inputs.run-reset-deployments == 'true' - uses: helmfile/helmfile-action@v2.0.2 - with: - helmfile-args: | - apply \ - --environment ${{ vars.ENVIRONMENT }} \ - -f ./charts/helmfiles/aries-cloudapi-python.yaml.gotmpl \ - --state-values-set image.tag=${{ env.IMAGE_TAG }} \ - --state-values-set image.registry=ghcr.io/${{ github.repository_owner }} - helm-plugins: | - https://github.com/databus23/helm-diff - helmfile-version: ${{ env.HELMFILE_VERSION }} - helm-version: ${{ env.HELM_VERSION }} - env: - IMAGE_TAG: ${{ needs.build.outputs.image_version }} - - - name: Helmfile Apply (RDS Proxy) - if: github.event.inputs.run-reset-deployments != 'true' - uses: helmfile/helmfile-action@v2.0.2 - with: - helmfile-args: | - apply \ - --environment ${{ vars.ENVIRONMENT }} \ - -f ./charts/helmfiles/aries-cloudapi-python.yaml.gotmpl \ - --state-values-set image.tag=${{ env.IMAGE_TAG }} \ - --state-values-set image.registry=ghcr.io/${{ github.repository_owner }} \ - --state-values-set rdsProxyEnabled=true - helm-plugins: | - https://github.com/databus23/helm-diff - helmfile-version: ${{ env.HELMFILE_VERSION }} - helm-version: ${{ env.HELM_VERSION }} - env: - IMAGE_TAG: ${{ needs.build.outputs.image_version }} - test-eks: - if: github.actor != 'dependabot[bot]' && github.event.pull_request.draft == false - name: Run Pytest on EKS - environment: - name: dev - needs: - - build - - deploy - permissions: - id-token: write # Required to authenticate with AWS - checks: write # Required for action-junit-report - pull-requests: write # Required to comment on PRs for Pytest coverage comment - runs-on: ubuntu-latest - - timeout-minutes: 20 - - env: - OUTPUT_FILE: test_output.xml - COVERAGE_FILE: test_coverage.txt - PYTEST_COMPLETIONS: 1 - - outputs: - output: ${{ steps.updated_deployments.outputs.success }} - - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Set up Mise - uses: jdx/mise-action@v2 - with: - version: ${{ env.MISE_VERSION }} - cache: true - experimental: true # Required for mise tasks - install: true - - - name: Authenticate GitHub CLI - run: echo "${PAT}" | gh auth login --with-token - env: - PAT: ${{ secrets.CHARTS_PAT }} - - - name: Set branch name - id: set_branch_name - run: | - if [[ "${{ github.event_name }}" == "pull_request" ]]; then - echo "BRANCH_NAME=${HEAD_REF}" >> $GITHUB_ENV - else - echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/} | cut -d/ -f2-)" >> $GITHUB_ENV - fi - env: - HEAD_REF: ${{ github.head_ref }} - - - name: Check if branch exists - id: check_branch - run: | - DEFAULT_BRANCH=master - - if gh api repos/didx-xyz/charts/git/ref/heads/$BRANCH_NAME; then - echo "branch_exists=true" >> $GITHUB_ENV - echo "branch_name=$BRANCH_NAME" >> $GITHUB_ENV - else - echo "branch_exists=false" >> $GITHUB_ENV - echo "branch_name=$DEFAULT_BRANCH" >> $GITHUB_ENV - fi - - - name: Checkout Charts - uses: actions/checkout@v4 - with: - repository: didx-xyz/charts - token: ${{ secrets.CHARTS_PAT }} - path: charts - ref: ${{ env.branch_name }} - persist-credentials: false - - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v4 - with: - aws-region: af-south-1 - role-to-assume: arn:aws:iam::402177810328:role/cicd - role-session-name: github-cicd - - - name: Update Kubeconfig - run: aws eks update-kubeconfig --name cloudapi-dev - - - uses: tailscale/github-action@main - with: - authkey: ${{ secrets.TAILSCALE_AUTHKEY }} - version: ${{ env.TAILSCALE_VERSION }} - - - name: Helmfile init regression pytest - if: github.event.inputs.run-reset-deployments == 'true' && github.event.inputs.run-regression-tests == 'true' - id: pytest-init-regression - uses: helmfile/helmfile-action@v2.0.2 - with: - helmfile-args: | - apply \ - --environment ${{ vars.ENVIRONMENT }} \ - -f ./charts/helmfiles/aries-capi-test.yaml \ - --set image.tag=${{ env.IMAGE_TAG }} \ - --set image.registry=ghcr.io/${{ github.repository_owner }} \ - --set completions=${{ env.PYTEST_COMPLETIONS }} \ - --state-values-set release=cloudapi-pytest-regression \ - --set fullnameOverride=cloudapi-pytest-regression \ - --set env.RUN_REGRESSION_TESTS="true" \ - --state-values-set regressionEnabled=true - helm-plugins: | - https://github.com/databus23/helm-diff - helmfile-version: ${{ env.HELMFILE_VERSION }} - helm-version: ${{ env.HELM_VERSION }} - env: - IMAGE_TAG: ${{ needs.build.outputs.image_version }} - - - name: Helmfile run regression pytest - if: ${{ github.event_name != 'workflow_dispatch' || (github.event_name == 'workflow_dispatch' && github.event.inputs.run-reset-deployments == 'false' && github.event.inputs.run-regression-tests == 'true') }} - id: pytest-run-regression - uses: helmfile/helmfile-action@v2.0.2 - with: - helmfile-args: | - apply \ - --environment ${{ vars.ENVIRONMENT }} \ - -f ./charts/helmfiles/aries-capi-test.yaml \ - --set image.tag=${{ env.IMAGE_TAG }} \ - --set image.registry=ghcr.io/${{ github.repository_owner }} \ - --set completions=${{ env.PYTEST_COMPLETIONS }} \ - --state-values-set release=cloudapi-pytest-regression \ - --set fullnameOverride=cloudapi-pytest-regression \ - --set env.RUN_REGRESSION_TESTS="true" \ - --set env.FAIL_ON_RECREATING_FIXTURES="true" \ - --state-values-set regressionEnabled=true - helm-plugins: | - https://github.com/databus23/helm-diff - helmfile-version: ${{ env.HELMFILE_VERSION }} - helm-version: ${{ env.HELM_VERSION }} - env: - IMAGE_TAG: ${{ needs.build.outputs.image_version }} - - - name: Helmfile run pytest - if: ${{ github.event_name != 'workflow_dispatch' || (github.event_name == 'workflow_dispatch' && github.event.inputs.run-tests != 'false') }} - id: pytest - uses: helmfile/helmfile-action@v2.0.2 - with: - helmfile-args: | - apply \ - --environment ${{ vars.ENVIRONMENT }} \ - -f ./charts/helmfiles/aries-capi-test.yaml \ - --set image.tag=${{ env.IMAGE_TAG }} \ - --set image.registry=ghcr.io/${{ github.repository_owner }} \ - --set completions=${{ env.PYTEST_COMPLETIONS }} \ - --state-values-set release=cloudapi-pytest \ - --set fullnameOverride=cloudapi-pytest - helm-plugins: | - https://github.com/databus23/helm-diff - helmfile-version: ${{ env.HELMFILE_VERSION }} - helm-version: ${{ env.HELM_VERSION }} - env: - IMAGE_TAG: ${{ needs.build.outputs.image_version }} - - - name: Wait for pytest and print logs - if: steps.pytest.outcome == 'success' - run: | - while true; do - # Check if the job is complete or failed - COMPLETION_STATUS=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.status.succeeded}') - FAILURE_STATUS=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.status.failed}') - - if [ "$COMPLETION_STATUS" == "${PYTEST_COMPLETIONS}" ] || [ "$FAILURE_STATUS" == "1" ]; then - echo "Job $JOB_NAME has completed." - break - else - echo "Waiting for job to complete..." - sleep 10 - fi - done - - # Get all pods for the job - pods=$(kubectl get pods -n $NAMESPACE --selector=job-name=$JOB_NAME -o jsonpath='{.items[*].metadata.name}') - - # Loop through the pods and get logs - for pod in $pods - do - echo "Logs for Pod: $pod" - kubectl logs -n $NAMESPACE $pod - done - - env: - JOB_NAME: cloudapi-pytest - NAMESPACE: dev-cloudapi - PYTEST_COMPLETIONS: ${{ env.PYTEST_COMPLETIONS }} - - - name: Wait for pytest regression and print logs - if: steps.pytest-init-regression.outcome == 'success' || steps.pytest-run-regression.outcome == 'success' - run: | - while true; do - # Check if the job is complete or failed - COMPLETION_STATUS=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.status.succeeded}') - FAILURE_STATUS=$(kubectl get job $JOB_NAME -n $NAMESPACE -o jsonpath='{.status.failed}') - - if [ "$COMPLETION_STATUS" == "${PYTEST_COMPLETIONS}" ] || [ "$FAILURE_STATUS" == "1" ]; then - echo "Job $JOB_NAME has completed." - break - else - echo "Waiting for job to complete..." - sleep 10 - fi - done - - # Get all pods for the job - pods=$(kubectl get pods -n $NAMESPACE --selector=job-name=$JOB_NAME -o jsonpath='{.items[*].metadata.name}') - - # Loop through the pods and get logs - for pod in $pods - do - echo "Logs for Pod: $pod" - kubectl logs -n $NAMESPACE $pod - done - - env: - JOB_NAME: cloudapi-pytest-regression - NAMESPACE: dev-cloudapi - PYTEST_COMPLETIONS: ${{ env.PYTEST_COMPLETIONS }} - - - name: Copy k8s pytest results - if: steps.pytest.outcome == 'success' || steps.pytest-init-regression.outcome == 'success' || steps.pytest-run-regression.outcome == 'success' - run: | - echo "apiVersion: v1 - kind: Pod - metadata: - name: $POD_NAME - namespace: $NAMESPACE - labels: - sidecar.istio.io/inject: \"false\" - spec: - containers: - - name: $POD_NAME - image: $CONTAINER_IMAGE - command: [\"sleep\", \"3600\"] - volumeMounts: - - name: pytest-volume - mountPath: $MOUNT_PATH/pytest - - name: pytest-regression-volume - mountPath: $MOUNT_PATH/pytest-regression - volumes: - - name: pytest-volume - persistentVolumeClaim: - claimName: $PVC_NAME - - name: pytest-regression-volume - persistentVolumeClaim: - claimName: $PVC_NAME_REGRESSION - restartPolicy: Never" > pytest-results-pod.yaml - - kubectl apply -f pytest-results-pod.yaml - - # Wait for the pod to be ready - echo "Waiting for pod to be ready..." - kubectl -n $NAMESPACE wait --for=condition=ready pod/$POD_NAME --timeout=60s - - # Copy the files from the pod to your local system - echo "Copying files from pod..." - mkdir -p $LOCAL_PATH $LOCAL_PATH_REGRESSION - kubectl -n $NAMESPACE cp $POD_NAME:$MOUNT_PATH/pytest/$OUTPUT_FILE $LOCAL_PATH/$OUTPUT_FILE - kubectl -n $NAMESPACE cp $POD_NAME:$MOUNT_PATH/pytest/$COVERAGE_FILE $LOCAL_PATH/$COVERAGE_FILE - kubectl -n $NAMESPACE cp $POD_NAME:$MOUNT_PATH/pytest-regression/$OUTPUT_FILE $LOCAL_PATH_REGRESSION/$OUTPUT_FILE - kubectl -n $NAMESPACE cp $POD_NAME:$MOUNT_PATH/pytest-regression/$COVERAGE_FILE $LOCAL_PATH_REGRESSION/$COVERAGE_FILE - - # Clean up: delete the temporary pod - echo "Cleaning up..." - kubectl -n $NAMESPACE delete pod $POD_NAME - - echo "Done!" - env: - PVC_NAME: cloudapi-pytest - PVC_NAME_REGRESSION: cloudapi-pytest-regression - POD_NAME: pytest-results-pod - CONTAINER_IMAGE: busybox - MOUNT_PATH: /mnt - LOCAL_PATH: ./pytest - LOCAL_PATH_REGRESSION: ./pytest-regression - NAMESPACE: dev-cloudapi - OUTPUT_FILE: test_output.xml - COVERAGE_FILE: test_coverage.txt - - - name: Pytest coverage comment - if: steps.pytest.outcome == 'success' - uses: MishaKav/pytest-coverage-comment@v1.1.53 - with: - pytest-coverage-path: ./pytest/test_coverage.txt - junitxml-path: ./pytest/test_output.xml - create-new-comment: true - title: "K8s Test Coverage" - # Resolves `Warning: Your comment is too long (maximum is 65536 characters), coverage report will not be added.` - hide-report: ${{ github.event_name != 'pull_request' }} - hide-comment: ${{ github.event_name != 'pull_request' }} - - - name: Pytest regression coverage comment - if: steps.pytest-init-regression.outcome == 'success' || steps.pytest-run-regression.outcome == 'success' - uses: MishaKav/pytest-coverage-comment@v1.1.53 - with: - pytest-coverage-path: ./pytest-regression/test_coverage.txt - junitxml-path: ./pytest-regression/test_output.xml - create-new-comment: true - title: "K8s Regression Test Coverage" - # Resolves `Warning: Your comment is too long (maximum is 65536 characters), coverage report will not be added.` - hide-report: ${{ github.event_name != 'pull_request' }} - hide-comment: ${{ github.event_name != 'pull_request' }} - - - name: Publish Pytest Report - uses: mikepenz/action-junit-report@v5 - if: steps.pytest.outcome == 'success' - with: - check_name: JUnit Test Report - report_paths: "./pytest/test_output.xml" - fail_on_failure: true - detailed_summary: true - require_passed_tests: true - - - name: Publish Pytest Regression Report - uses: mikepenz/action-junit-report@v5 - if: steps.pytest-init-regression.outcome == 'success' || steps.pytest-run-regression.outcome == 'success' - with: - check_name: JUnit Test Report Regression - report_paths: "./pytest-regression/test_output.xml" - fail_on_failure: true - detailed_summary: true - require_passed_tests: true - - - name: Helmfile destroy pytest - uses: helmfile/helmfile-action@v2.0.2 - if: always() - with: - helmfile-args: | - destroy \ - --environment ${{ vars.ENVIRONMENT }} \ - -f ./charts/helmfiles/aries-capi-test.yaml \ - --state-values-set release=cloudapi-pytest - helm-plugins: | - https://github.com/databus23/helm-diff - helmfile-version: ${{ env.HELMFILE_VERSION }} - helm-version: ${{ env.HELM_VERSION }} - env: - IMAGE_TAG: ${{ needs.build.outputs.image_version }} - - - name: Helmfile destroy pytest regression - uses: helmfile/helmfile-action@v2.0.2 - if: always() - with: - helmfile-args: | - destroy \ - --environment ${{ vars.ENVIRONMENT }} \ - -f ./charts/helmfiles/aries-capi-test.yaml \ - --state-values-set release=cloudapi-pytest-regression - helm-plugins: | - https://github.com/databus23/helm-diff - helmfile-version: ${{ env.HELMFILE_VERSION }} - helm-version: ${{ env.HELM_VERSION }} - env: - IMAGE_TAG: ${{ needs.build.outputs.image_version }} - k6: - if: github.actor != 'dependabot[bot]' && github.event.pull_request.draft == false - name: K6 - environment: - name: dev - needs: - - build - - deploy - permissions: - id-token: write - packages: write - runs-on: ubuntu-latest - - timeout-minutes: 10 - - outputs: - image_version: ${{ steps.meta.outputs.version }} - - steps: - - name: Check out code - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - with: - cache-binary: false - - - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ github.token }} - - - name: Docker Metadata - id: meta - uses: docker/metadata-action@v5 - with: - images: ghcr.io/${{ github.repository_owner }}/xk6 - tags: | - type=raw,value=latest,enable=${{ github.event.repository.default_branch == github.ref_name }} - type=sha,prefix=pr-${{ github.event.pull_request.number }}-,priority=601,enable=${{ github.event_name == 'pull_request' }} - type=sha,prefix={{branch}}-,priority=601,enable=${{ github.event_name == 'push' && github.ref_type == 'branch' }} - type=ref,event=branch,priority=600 - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern={{major}}.{{minor}} - - - name: Build and push Docker images - id: build_image - uses: docker/build-push-action@v6 - with: - context: . - file: ./scripts/k6/Dockerfile - push: true - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - cache-from: | - type=gha,scope=build-xk6 - type=registry,ref=ghcr.io/${{ github.repository_owner }}/xk6:latest - cache-to: type=gha,mode=max,scope=build-xk6 - - - name: Prepare output directory - run: mkdir -p ${{ github.workspace }}/scripts/k6/output && chmod 777 ${{ github.workspace }}/scripts/k6/output - - - name: Run k6 tests - run: | - docker run --rm \ - -v ${{ github.workspace }}/scripts/k6:/scripts \ - -e CLIENT_ID=${{ secrets.CLIENT_ID }} \ - -e GOVERNANCE_CLIENT_ID=${{ secrets.GOVERNANCE_CLIENT_ID }} \ - -e CLIENT_SECRET=${{ secrets.CLIENT_SECRET }} \ - -e GOVERNANCE_CLIENT_SECRET=${{ secrets.GOVERNANCE_CLIENT_SECRET }} \ - -e CLOUDAPI_URL=${{ secrets.CLOUDAPI_URL }} \ - -e OAUTH_ENDPOINT=${{ secrets.OAUTH_ENDPOINT }} \ - -e GOVERNANCE_OAUTH_ENDPOINT=${{ secrets.GOVERNANCE_OAUTH_ENDPOINT }} \ - --workdir /scripts \ - --entrypoint /bin/sh \ - ghcr.io/${{ github.repository_owner }}/xk6:${VERSION} \ - /scripts/run_tests.sh - shell: bash - env: - VERSION: ${{ steps.meta.outputs.version }} diff --git a/.github/workflows/sonarcloud.yml b/.github/workflows/sonarcloud.yml index 081b91b3ba..f1b0b0f4bd 100644 --- a/.github/workflows/sonarcloud.yml +++ b/.github/workflows/sonarcloud.yml @@ -35,15 +35,17 @@ on: branches: [master] workflow_dispatch: -permissions: - pull-requests: read # allows SonarCloud to decorate PRs with analysis results +permissions: {} jobs: - Analysis: + analysis: + name: Analysis runs-on: ubuntu-latest + permissions: + pull-requests: read # allows SonarCloud to decorate PRs with analysis results + steps: - # Checkout the repository - name: Checkout Repository uses: actions/checkout@v4 with: @@ -56,12 +58,9 @@ jobs: [tools] poetry = "2.0" python = "3.12" - uv = "0.5" [settings] experimental = true - jobs = 1 - pipx_uvx = true python_compile = false [env] @@ -75,6 +74,12 @@ jobs: depends = ["poetry:install:*"] EOF + - name: Cache Python venv + uses: actions/cache@v4 + with: + path: .venv + key: python-${{ hashFiles('**/poetry.lock', '.mise.toml') }} + - name: Set up Mise uses: jdx/mise-action@v2 with: @@ -82,9 +87,10 @@ jobs: experimental: true install: true - # Install dependencies - name: Install dependencies with Poetry run: mise run poetry:install + env: + MISE_JOBS: 1 # Run tests and generate coverage report - name: Run Tests with Coverage @@ -106,7 +112,7 @@ jobs: with: # Additional arguments for the SonarCloud scanner args: > - -Dsonar.projectKey=didx-xyz_aries-cloudapi-python + -Dsonar.projectKey=didx-xyz_acapy-cloud -Dsonar.organization=didx-xyz -Dsonar.coverage.exclusions=**/tests/** -Dsonar.python.coverage.reportPaths=coverage.xml diff --git a/.github/workflows/style-check.yml b/.github/workflows/style-check.yml deleted file mode 100644 index 786f62d21a..0000000000 --- a/.github/workflows/style-check.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: Python code style check - -on: - pull_request: - branches: - - master - -permissions: {} - -jobs: - style: - name: style - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Overwrite .mise.toml # It's not needed in this workflow - run: | - cat < .mise.toml - [tools] - "pipx:black" = "25.1" - "pipx:isort" = "6.0" - python = "3.12" - uv = "0.5" - - [settings] - pipx_uvx = true - EOF - - - name: Set up Mise - uses: jdx/mise-action@v2 - with: - cache: true - experimental: true - install: true - - - name: Check import style with isort - run: isort . --check --profile black --diff - - - name: Check code style with Black - run: black . --check --diff - - - name: Check Tiltfiles with Black - run: | - find . -type f -name "Tiltfile" | while read -r file; do - black --check --diff "$file" - done diff --git a/.mise.toml b/.mise.toml index 6d2d94b0eb..ff138b64d9 100644 --- a/.mise.toml +++ b/.mise.toml @@ -11,7 +11,6 @@ pre-commit = "4.1" python = "3.12" tilt = "0.33" usage = "latest" -uv = "0.5" [settings] experimental = true @@ -19,14 +18,12 @@ experimental = true # Prevents `poetry install` from running in parallel which # results in multiple threads trying to write to the same file. jobs = 1 -# Use UV instead of pipx for installing Python binaries -pipx_uvx = true # Install precompiled python binary python_compile = false [env] DOCKER_HOST_OVERRIDE = "unix:///var/run/docker.sock" -KIND_CLUSTER_NAME = "aries-cloudapi" +KIND_CLUSTER_NAME = "acapy-cloud" KIND_K8S_CONTEXT = "kind-{{env.KIND_CLUSTER_NAME}}" KIND_DOCKER_CACHE = "docker-cache" KIND_DOCKER_REGISTRY = "docker-registry" @@ -101,7 +98,7 @@ poetry run pytest . --ignore ./tilt $@ [tasks."tests:unit"] description = "Run unit tests" depends = ["poetry:install"] -run = "poetry run pytest app --ignore=app/tests/e2e $@" +run = "poetry run pytest -n auto --dist loadgroup -o 'norecursedirs=e2e' $@" [tasks."tests:e2e"] description = "Run e2e tests" @@ -110,8 +107,7 @@ run = """ #!/bin/bash cp .env.example .env source .env -# pytest app/tests/e2e -poetry run pytest --dist loadfile app/tests/e2e $@ +poetry run pytest -n auto --dist loadgroup $(find . -type d -path '*/tests/e2e' -not -path './tilt*')$@ """ [tasks.fmt] diff --git a/.pylintrc b/.pylintrc index 9bc8aa7cd5..b7ae8d8f08 100644 --- a/.pylintrc +++ b/.pylintrc @@ -1,3 +1,5 @@ +# To be replaced with ruff + [MAIN] # Analyse import fallback blocks. This can be used to support both Python 2 and diff --git a/README.md b/README.md index 2061cfd73d..1720aa9ee9 100644 --- a/README.md +++ b/README.md @@ -3,8 +3,8 @@ ![Python](https://img.shields.io/badge/python-3.12-blue.svg) [![Toolset: Mise](https://img.shields.io/badge/toolset-Mise-orange.svg?style=flat)](https://mise.jdx.dev/) [![Dev Experience: Tilt](https://img.shields.io/badge/devex-Tilt-blue.svg?style=flat)](https://tilt.dev) -[![Codacy Badge](https://app.codacy.com/project/badge/Grade/ceca5ac566f74a3a8bfb3095074117ad)](https://app.codacy.com/gh/didx-xyz/aries-cloudapi-python/dashboard?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_grade) -[![Codacy Badge](https://app.codacy.com/project/badge/Coverage/ceca5ac566f74a3a8bfb3095074117ad)](https://app.codacy.com/gh/didx-xyz/aries-cloudapi-python/dashboard?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_coverage) +[![Codacy Badge](https://app.codacy.com/project/badge/Grade/ceca5ac566f74a3a8bfb3095074117ad)](https://app.codacy.com/gh/didx-xyz/acapy-cloud/dashboard?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_grade) +[![Codacy Badge](https://app.codacy.com/project/badge/Coverage/ceca5ac566f74a3a8bfb3095074117ad)](https://app.codacy.com/gh/didx-xyz/acapy-cloud/dashboard?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_coverage) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![DIDx GitHub](https://img.shields.io/badge/GitHub-DIDx-181717.svg?style=flat&logo=github)](https://github.com/didx-xyz) diff --git a/Tiltfile b/Tiltfile index 0be430f3d5..3171de5354 100644 --- a/Tiltfile +++ b/Tiltfile @@ -25,8 +25,8 @@ update_settings( max_parallel_updates=5, ) -# Restrict to `kind-aries-cloudapi` kube context -kind_cluster_name = "kind-aries-cloudapi" +# Restrict to `kind-acapy-cloud` kube context +kind_cluster_name = "kind-acapy-cloud" allow_k8s_contexts([kind_cluster_name]) if config.tilt_subcommand in ("up", "ci"): @@ -86,7 +86,7 @@ expose = cfg.get("expose") setup_cloudapi(build_enabled, expose) if config.tilt_subcommand not in ("down"): - # _FORCE_ Kube Context to `kind-aries-cloudapi` + # _FORCE_ Kube Context to `kind-acapy-cloud` local( "kubectl config use-context " + kind_cluster_name, dir=os.path.dirname(__file__) ) diff --git a/docker-compose-ledger.yaml b/docker-compose-ledger.yaml index 63cc805205..dd562dd2cb 100644 --- a/docker-compose-ledger.yaml +++ b/docker-compose-ledger.yaml @@ -2,7 +2,7 @@ services: ledger-browser: - image: ${REGISTRY:-ghcr.io/didx-xyz}/d-cloud/ledger-nodes:${IMAGE_TAG:-latest} + image: ${REGISTRY:-ghcr.io/didx-xyz}/acapy-cloud/ledger-nodes:${IMAGE_TAG:-latest} container_name: ledger-browser platform: linux/amd64 command: "bash -c './scripts/start_webserver.sh'" @@ -35,7 +35,7 @@ services: ledger-nodes: condition: service_healthy ledger-nodes: - image: ${REGISTRY:-ghcr.io/didx-xyz}/d-cloud/ledger-nodes:${IMAGE_TAG:-latest} + image: ${REGISTRY:-ghcr.io/didx-xyz}/acapy-cloud/ledger-nodes:${IMAGE_TAG:-latest} container_name: ledger-nodes platform: linux/amd64 command: "bash -c './scripts/start_nodes.sh'" diff --git a/docker-compose.yaml b/docker-compose.yaml index 3e891e40d1..01dec52599 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -106,7 +106,7 @@ services: container_name: governance-multitenant-web build: context: . - dockerfile: ./dockerfiles/fastapi/Dockerfile + dockerfile: ./dockerfiles/app/Dockerfile ports: - 0.0.0.0:8100:8000 volumes: @@ -134,7 +134,7 @@ services: container_name: governance-ga-web build: context: . - dockerfile: ./dockerfiles/fastapi/Dockerfile + dockerfile: ./dockerfiles/app/Dockerfile ports: - 0.0.0.0:8200:8000 volumes: @@ -160,7 +160,7 @@ services: container_name: governance-tenant-web build: context: . - dockerfile: ./dockerfiles/fastapi/Dockerfile + dockerfile: ./dockerfiles/app/Dockerfile ports: - 0.0.0.0:8300:8000 volumes: @@ -188,7 +188,7 @@ services: container_name: governance-public-web build: context: . - dockerfile: ./dockerfiles/fastapi/Dockerfile + dockerfile: ./dockerfiles/app/Dockerfile ports: - 0.0.0.0:8400:8000 volumes: diff --git a/dockerfiles/fastapi/Dockerfile b/dockerfiles/app/Dockerfile similarity index 100% rename from dockerfiles/fastapi/Dockerfile rename to dockerfiles/app/Dockerfile diff --git a/helm/acapy-cloud.yaml.gotmpl b/helm/acapy-cloud.yaml.gotmpl new file mode 100644 index 0000000000..21a71a9ad6 --- /dev/null +++ b/helm/acapy-cloud.yaml.gotmpl @@ -0,0 +1,203 @@ +environments: + local: + values: + - namespace: cloudapi + image: + registry: ghcr.io/didx-xyz + tag: master + ddInjectEnabled: false + ddProfilingEnabled: + default: false + overrides: {} + deployLedger: true + natsReplicaCount: 1 + nodeSelector: {} + pgProxyEnabled: false + pgAdmin: + enabled: true + ingress: {} + tolerations: [] + dev: + values: + - namespace: acapy-cloud-dev + image: + registry: ghcr.io/didx-xyz + tag: master + ddInjectEnabled: false + ddProfilingEnabled: + default: false + overrides: {} + # multitenant-agent: true + # governance-agent: true + # multitenant-web: true + # tenant-web: true + deployLedger: true + natsReplicaCount: 1 + nodeSelector: {} + pgProxyEnabled: false + pgAdmin: + enabled: true + ingress: + ingressClassName: nginx-internal + hosts: + - host: pgadmin-acapy-cloud.dev.didxtech.com + paths: + - path: / + pathType: Prefix + tolerations: [] +--- +{{- $releases := list + "endorser" + "governance-agent" + "governance-web" + "mediator" + "multitenant-agent" + "multitenant-web" + "public-web" + "tails-server" + "tenant-web" + "trust-registry" + "waypoint" +}} + +releases: +{{- range $index, $release := $releases }} + - name: {{ $release }} + labels: + app: {{ $release }} + namespace: {{ $.Values.namespace }} + chart: ./acapy-cloud + version: 0.2.0 + values: + - ./acapy-cloud/conf/{{ $.Environment.Name }}/{{ $release }}.yaml + {{- if $.Values.pgProxyEnabled -}} + {{- if or (eq $release "governance-agent") (eq $release "multitenant-agent") }} + - ./acapy-cloud/conf/{{ $.Environment.Name }}/{{ $release }}-pg-proxy.yaml + {{- end }} + {{- end }} + - deploymentLabels: + tags.datadoghq.com/env: {{ $.Environment.Name }} + podLabels: + admission.datadoghq.com/enabled: {{ quote $.Values.ddInjectEnabled }} + tags.datadoghq.com/env: {{ $.Environment.Name }} + - nodeSelector: {{ toYaml $.Values.nodeSelector | nindent 10 }} + tolerations: {{ toYaml $.Values.tolerations | nindent 10 }} + set: + {{- if not (eq $release "mediator") }} + - name: image.registry + value: {{ $.Values.image.registry }} + - name: image.tag + value: {{ $.Values.image.tag }} + {{- end }} + - name: env.DD_PROFILING_ENABLED + value: {{ index $.Values.ddProfilingEnabled.overrides $release | default $.Values.ddProfilingEnabled.default }} +{{- end }} +{{- if .Values.deployLedger }} + - name: ledger-nodes + labels: + app: ledger-nodes + namespace: {{ .Values.namespace }} + chart: ./ledger-nodes + version: 0.2.0 + values: + - ./ledger-nodes/conf/{{ .Environment.Name }}/values.yaml + - deploymentLabels: + tags.datadoghq.com/env: {{ .Environment.Name }} + podLabels: + admission.datadoghq.com/enabled: "false" + tags.datadoghq.com/env: {{ .Environment.Name }} + sidecar.istio.io/inject: "false" + - nodeSelector: {{ toYaml .Values.nodeSelector | nindent 10 }} + tolerations: {{ toYaml .Values.tolerations | nindent 10 }} + set: + - name: image.registry + value: {{ .Values.image.registry }} + - name: image.tag + value: {{ .Values.image.tag }} + - name: ledger-browser + labels: + app: ledger-browser + namespace: {{ .Values.namespace }} + chart: ./acapy-cloud + version: 0.2.0 + values: + - ./acapy-cloud/conf/{{ .Environment.Name }}/ledger-browser.yaml + - deploymentLabels: + tags.datadoghq.com/env: {{ .Environment.Name }} + podLabels: + admission.datadoghq.com/enabled: "false" + tags.datadoghq.com/env: {{ .Environment.Name }} + - nodeSelector: {{ toYaml .Values.nodeSelector | nindent 10 }} + tolerations: {{ toYaml .Values.tolerations | nindent 10 }} + set: + - name: image.registry + value: {{ .Values.image.registry }} + - name: image.tag + value: {{ .Values.image.tag }} +{{- end }} + # https://github.com/redpanda-data/helm-charts/tree/main/charts/connect + - name: connect-cloud + labels: + app: connect-cloud + namespace: {{ .Values.namespace }} + chart: redpanda/connect + version: 3.0.3 + values: + - ./acapy-cloud/conf/{{ $.Environment.Name }}/connect-cloud.yaml + - fullnameOverride: connect-cloud + deployment: + podLabels: + admission.datadoghq.com/enabled: "false" + tags.datadoghq.com/env: {{ .Environment.Name }} + podAnnotations: + ad.datadoghq.com/logs_exclude: "true" # Disable datadog log shipping + sidecar.istio.io/inject: "false" + + # https://github.com/bitnami/charts/tree/main/bitnami/nats + - name: nats + labels: + app: nats + namespace: {{ .Values.namespace }} + chart: ./nats + values: + - ./nats/values.yaml + - nats: + replicaCount: {{ default 1 .Values.natsReplicaCount }} + + # https://github.com/bitnami/charts/tree/main/bitnami/postgresql-ha + - name: postgres + labels: + app: postgres + namespace: {{ .Values.namespace }} + chart: oci://registry-1.docker.io/bitnamicharts/postgresql-ha + version: 15.1.7 + values: + - ../tilt/acapy-cloud/postgres.yaml + + # https://github.com/rowanruseler/helm-charts/tree/main/charts/pgadmin4 + - name: pgadmin + labels: + app: pgadmin + namespace: {{ .Values.namespace }} + chart: runix/pgadmin4 + version: 1.34.0 + installed: {{ .Values.pgAdmin.enabled }} + values: + - ../tilt/acapy-cloud/pgadmin.yaml + {{- with .Values.pgAdmin.ingress }} + - ingress: + {{ toYaml . | nindent 10 }} + {{- end }} +--- +repositories: + - name: redpanda + url: https://charts.redpanda.com + - name: runix + url: https://rowanruseler.github.io/helm-charts +--- +helmDefaults: + timeout: 600 + wait: true + atomic: true + cleanupOnFail: true + createNamespace: false diff --git a/helm/acapy-cloud/README.md b/helm/acapy-cloud/README.md index 6e40c3bce5..d6d589d86f 100644 --- a/helm/acapy-cloud/README.md +++ b/helm/acapy-cloud/README.md @@ -151,7 +151,7 @@ For local development: 3. Install using Helm: ```bash -helm install my-release . -f values-.yaml +helm install my-release . -f ./conf/local/.yaml ``` ## Additional Notes diff --git a/helm/acapy-cloud/conf/dev/connect-cloud.yaml b/helm/acapy-cloud/conf/dev/connect-cloud.yaml new file mode 100644 index 0000000000..8c873469b9 --- /dev/null +++ b/helm/acapy-cloud/conf/dev/connect-cloud.yaml @@ -0,0 +1,50 @@ +# https://github.com/redpanda-data/helm-charts/tree/main/charts/connect +image: + tag: 4 +updateStrategy: + type: Recreate + +logger: + level: info + format: json + add_timestamp: true + static_fields: + "@service": connect-cloud + +http: + debug_endpoints: true + +streams: + enabled: true + streamsConfigMap: connect-cloud-streams + +deployment: + podLabels: + sidecar.istio.io/inject: "false" + +env: + - name: GOVERNANCE_ACAPY_LABEL + value: Governance + +initContainers: + - name: nc-nats + image: busybox + command: ['sh', '-c', 'until nc -z nats 4222; do echo waiting for nats; sleep 2; done;'] + - name: nats-check + image: bitnami/natscli + command: + - sh + - -c + - | + until nats --server nats://nats:4222 str info acapy_events >/dev/null 2>&1; do + echo waiting for nats acapy_events stream; + sleep 2; + done + until nats --server nats://nats:4222 str info cloudapi_aries_events >/dev/null 2>&1; do + echo waiting for nats cloudapi_aries_events stream; + sleep 2; + done + until nats --server nats://nats:4222 str info cloudapi_aries_state_monitoring >/dev/null 2>&1; do + echo waiting for nats cloudapi_aries_state_monitoring stream; + sleep 2; + done diff --git a/helm/acapy-cloud/conf/dev/endorser.yaml b/helm/acapy-cloud/conf/dev/endorser.yaml new file mode 100644 index 0000000000..5997aa0f15 --- /dev/null +++ b/helm/acapy-cloud/conf/dev/endorser.yaml @@ -0,0 +1,149 @@ +fullnameOverride: endorser + +replicaCount: 1 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/endorser.logs: '[{"source": "python.uvicorn", "service": "endorser"}]' + proxy.istio.io/config: |- + proxyMetadata: + ISTIO_META_IDLE_TIMEOUT: 0s +podLabels: + admission.datadoghq.com/enabled: "true" + +image: + name: acapy-cloud/endorser + pullPolicy: Always + tag: master + +initContainers: + - name: nc-nats + image: busybox + command: ['sh', '-c', 'until nc -z nats 4222; do echo waiting for nats; sleep 2; done;'] + - name: nats-check + image: bitnami/natscli + command: + - sh + - -c + - | + until nats --server $NATS_SERVER str info $NATS_STREAM >/dev/null 2>&1; do echo waiting for nats stream; sleep 2; done; + env: + - name: NATS_SERVER + value: "{{ .Values.env.NATS_SERVER }}" + - name: NATS_STREAM + value: "{{ .Values.env.NATS_STREAM }}" + +command: + - poetry + - run + - uvicorn + - endorser.main:app + - --log-config=/tmp/log_conf.yaml + - --reload + - --host + - 0.0.0.0 + - --port + - 3009 + +service: + name: endorser + port: 3009 + containerPort: 3009 + appProtocol: http + +livenessProbe: + httpGet: + path: /health/live + port: endorser +readinessProbe: + httpGet: + path: /health/ready + port: endorser + +autoscaling: + enabled: false + +# resources: +# requests: +# cpu: 50m +# memory: 128Mi +# limits: +# cpu: 250m +# memory: 256Mi + +secretData: + ACAPY_GOVERNANCE_AGENT_API_KEY: adminApiKey + +env: + LOG_LEVEL: info + PYTHONPATH: / + + ACAPY_GOVERNANCE_AGENT_URL: http://governance-agent:3021 + + # Trust registry + TRUST_REGISTRY_URL: http://trust-registry:8000 + + GOVERNANCE_ACAPY_LABEL: Governance + ENABLE_SERIALIZE_LOGS: "TRUE" + + NATS_CREDS_FILE: "" + NATS_SERVER: nats://nats:4222 + NATS_SUBJECT: cloudapi.aries.events + NATS_STREAM: cloudapi_aries_events + ENDORSER_DURABLE_CONSUMER: endorser + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 0 + +extraVolumes: + - name: logs + emptyDir: {} +extraVolumeMounts: + - name: logs + mountPath: /logs + +podAntiAffinityPreset: soft +nodeAffinityPreset: + type: soft + key: node.kubernetes.io/lifecycle + values: + - spot + +configFiles: + log_conf.yaml: + path: /tmp/log_conf.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + default: + "()": uvicorn.logging.DefaultFormatter + format: '%(asctime)s %(name)s %(levelname)s %(message)s' + use_colors: null + access: + "()": uvicorn.logging.AccessFormatter + format: '%(asctime)s %(name)s %(levelname)s %(client_addr)s - "%(request_line)s" %(status_code)s' + handlers: + default: + formatter: default + class: logging.StreamHandler + stream: ext://sys.stderr + access: + formatter: access + class: logging.StreamHandler + stream: ext://sys.stdout + loggers: + uvicorn: + level: INFO + handlers: + - default + propagate: no + uvicorn.error: + level: INFO + uvicorn.access: + level: INFO + handlers: + - access + propagate: no diff --git a/helm/acapy-cloud/conf/dev/governance-agent-pg-proxy.yaml b/helm/acapy-cloud/conf/dev/governance-agent-pg-proxy.yaml new file mode 100644 index 0000000000..0408cf3d1e --- /dev/null +++ b/helm/acapy-cloud/conf/dev/governance-agent-pg-proxy.yaml @@ -0,0 +1,3 @@ +secretData: + ACAPY_WALLET_STORAGE_CONFIG: '{ "max_connections":10, "min_connections":1, "url":"cloudapi-pgpool:5432" }' + WALLET_DB_HOST: cloudapi-pgpool diff --git a/helm/acapy-cloud/conf/dev/governance-agent.yaml b/helm/acapy-cloud/conf/dev/governance-agent.yaml new file mode 100644 index 0000000000..afc5dff764 --- /dev/null +++ b/helm/acapy-cloud/conf/dev/governance-agent.yaml @@ -0,0 +1,251 @@ +fullnameOverride: governance-agent + +replicaCount: 1 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/governance-agent.logs: '[{"source": "python", "service": "governance-agent", "auto_multi_line_detection": true}]' + +image: + name: acapy-cloud/governance-agent + pullPolicy: Always + tag: master + +podLabels: + admission.datadoghq.com/enabled: "false" + +command: + - aca-py + - start + - --inbound-transport + - http + - 0.0.0.0 + - 3020 + - --admin + - 0.0.0.0 + - 3021 + - --plugin + - nats_events.v1_0.nats_queue.events + - --plugin-config-value + - nats_queue.connection.connection_url="$(NATS_SERVER)" + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 10 + +ingressDomain: acapy-cloud.dev.didxtech.com +ingress: + internal: + enabled: true + className: nginx-internal + rules: + - host: governance-agent-didcomm-{{ .Values.ingressDomain}} + paths: + - path: / + port: 3020 # didcomm + internal-e2e: + enabled: true + className: nginx-internal + rules: + - host: governance-agent-{{ .Values.ingressDomain}} + paths: + - path: / + port: 3021 # http + +service: + # if set, will run Pods on Node Network + hostNetwork: false + port: 3021 # http + containerPort: 3021 # http + appProtocol: tcp + +addPorts: + - port: 3020 # didcomm + containerPort: 3020 # didcomm + protocol: TCP + +livenessProbe: + httpGet: + path: /status/live + port: "{{ trunc 15 .Release.Name }}" + initialDelaySeconds: 300 + timeoutSeconds: 30 +readinessProbe: + httpGet: + path: /status/ready + port: "{{ trunc 15 .Release.Name }}" + initialDelaySeconds: 5 + timeoutSeconds: 30 + +# resources: +# requests: +# cpu: 100m +# memory: 256Mi +# limits: +# cpu: 500m +# memory: 512Mi + +initContainers: + - name: wait-for-ledger-browser + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://ledger-browser:8000/status -o /dev/null; do + echo "waiting for ledger-browser to be healthy" + sleep 10 + done + - name: register-did + image: curlimages/curl:latest + env: + - name: ACAPY_WALLET_SEED + valueFrom: + secretKeyRef: + name: '{{ include "acapy-cloud.fullname" . }}-env' + key: ACAPY_WALLET_SEED + command: + - "/bin/sh" + - "-c" + - | + STATUS=$(curl -s -X POST \ + -o /dev/null \ + -w "%{http_code}" \ + -H "Content-Type: application/json" -d "{\"seed\": \"$ACAPY_WALLET_SEED\"}" \ + "http://ledger-browser:8000/register" + ) + if [ $STATUS -ne 200 ]; then + echo "Failed to register DID. Status code: $STATUS" + exit 1 + fi + - name: nc-nats + image: busybox + command: ['sh', '-c', 'until nc -z nats 4222; do echo waiting for nats; sleep 2; done;'] + - name: nats-check + image: bitnami/natscli + command: + - sh + - -c + - | + until nats --server $NATS_SERVER str info $NATS_STREAM >/dev/null 2>&1; do echo waiting for nats stream; sleep 2; done; + env: + - name: NATS_SERVER + value: "{{ .Values.env.NATS_SERVER }}" + - name: NATS_STREAM + value: "{{ .Values.env.NATS_STREAM }}" + +persistence: + enabled: false + +autoscaling: + enabled: false + +# Sensitive environment variables are sourced from k8s secrets: +# - generated with secretData, or +# - pre-populated with external tooling +# TODO: Helm secret logic to create new secret if not exist +secretData: + ACAPY_ADMIN_API_KEY: adminApiKey + ACAPY_LABEL: Governance + ACAPY_TENANT_AGENT_API_KEY: adminApiKey + ACAPY_WALLET_KEY: verySecretGovernanceWalletKey + ACAPY_WALLET_NAME: governance + ACAPY_WALLET_SEED: verySecretPaddedWalletSeedPadded + + ACAPY_WALLET_STORAGE_CONFIG: '{ "max_connections":10, "min_connections":1, "url":"cloudapi-postgresql:5432" }' + ACAPY_WALLET_STORAGE_CREDS: '{ "account": "governance", "admin_account": "governance", "admin_password": "governance", "password": "governance" }' + WALLET_DB_ADMIN_PASS: governance + WALLET_DB_ADMIN_USER: governance + WALLET_DB_HOST: cloudapi-postgresql + WALLET_DB_PASS: governance + WALLET_DB_PORT: 5432 + WALLET_DB_USER: governance + +env: + ACAPY_LOG_LEVEL: info + # NATS related + NATS_CREDS_FILE: "" # NATS in Local dev has no auth + NATS_SERVER: nats://nats:4222 + NATS_SUBJECT: cloudapi.aries.events + NATS_STREAM: cloudapi_aries_events + # for aca-py + ADMIN_URL: http://governance-agent:3021 + ACAPY_OUTBOUND_TRANSPORT: http + ACAPY_ENDPOINT: http://governance-agent:3020 + # Tails server + # Should be changed further + ACAPY_TAILS_SERVER_BASE_URL: http://tails-server:6543 + ACAPY_WALLET_TYPE: askar + ACAPY_WALLET_STORAGE_TYPE: postgres_storage + ACAPY_AUTO_PROVISION: true + # Ledger + ACAPY_GENESIS_URL: http://ledger-browser:8000/genesis + ACAPY_PUBLIC_INVITES: true + ACAPY_ENDORSER_ROLE: endorser + # ## DO NOT CHANGE VARIABLES BELOW + # ## Unless you know exactly what you are doing + # ## Changes will probably break CloudAPI + # Optional Helper Configurations - See https://github.com/openwallet-foundation/acapy/blob/main/acapy_agent/config/argparse.py + ACAPY_AUTO_ACCEPT_INVITES: false + ACAPY_AUTO_ACCEPT_REQUESTS: false + ACAPY_AUTO_PING_CONNECTION: true + ACAPY_AUTO_RESPOND_MESSAGES: false + ACAPY_AUTO_RESPOND_CREDENTIAL_PROPOSAL: false + ACAPY_AUTO_RESPOND_CREDENTIAL_OFFER: false + ACAPY_AUTO_RESPOND_CREDENTIAL_REQUEST: false + ACAPY_AUTO_RESPOND_PRESENTATION_PROPOSAL: false + ACAPY_AUTO_RESPOND_PRESENTATION_REQUEST: false + ACAPY_AUTO_STORE_CREDENTIAL: true + ACAPY_AUTO_VERIFY_PRESENTATION: true + ACAPY_PRESERVE_EXCHANGE_RECORDS: false + ACAPY_AUTO_ENDORSE_TRANSACTIONS: false + + ACAPY_ACCEPT_TAA: "[service_agreement,1.1]" + + ACAPY_REQUESTS_THROUGH_PUBLIC_DID: true + ACAPY_EMIT_NEW_DIDCOMM_PREFIX: true + ACAPY_EMIT_NEW_DIDCOMM_MIME_TYPE: true + + # ACAPY_LOG_CONFIG: /home/aries/logging_config.yaml + +podAntiAffinityPreset: soft +nodeAffinityPreset: + type: soft + key: node.kubernetes.io/lifecycle + values: + - spot + +configFiles: + logging_config.yml: + path: /home/aries/logging_config.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + json_formatter: + (): pythonjsonlogger.jsonlogger.JsonFormatter + format: '%(asctime)s %(name)s %(levelname)s %(pathname)s:%(lineno)d %(message)s' + handlers: + stream_handler: + class: logging.StreamHandler + level: DEBUG + formatter: json_formatter + stream: ext://sys.stderr + timed_file_handler: + class: logging.handlers.TimedRotatingFileHandler + level: DEBUG + formatter: json_formatter + filename: '/home/aries/log/acapy-agent.log' + when: 'd' + interval: 7 + backupCount: 1 + loggers: + '': + level: ERROR + handlers: + - stream_handler + - timed_file_handler diff --git a/helm/acapy-cloud/conf/dev/governance-web.yaml b/helm/acapy-cloud/conf/dev/governance-web.yaml new file mode 100644 index 0000000000..b3e4967a2f --- /dev/null +++ b/helm/acapy-cloud/conf/dev/governance-web.yaml @@ -0,0 +1,213 @@ +fullnameOverride: governance-web + +replicaCount: 1 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/governance-web.logs: '[{"source": "python.uvicorn", "service": "governance-web"}]' + +image: + name: acapy-cloud/app + pullPolicy: Always + tag: master + +command: + - poetry + - run + - uvicorn + - app.main:app + - --log-config=/tmp/log_conf.yaml + - --reload + - --host + - 0.0.0.0 + - --port + - 8000 + +ingressDomain: acapy-cloud.dev.didxtech.com +ingress: + internal: + enabled: true + className: nginx-internal + rules: + - host: governance-web-{{ .Values.ingressDomain }} + paths: + - path: /governance + cloudapi-internal: + enabled: true + className: nginx-internal + annotations: + # Retool needs the below + nginx.ingress.kubernetes.io/cors-allow-headers: x-api-key + nginx.ingress.kubernetes.io/enable-cors: "true" + rules: + - host: '{{ .Values.ingressDomain }}' + paths: + - path: /governance + +service: + # if set, will run Pods on Node Network + hostNetwork: false + port: 8000 + containerPort: 8000 + appProtocol: http + +livenessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" +readinessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" + +# resources: +# requests: +# cpu: 50m +# memory: 256Mi +# limits: +# cpu: 250m +# memory: 256Mi + +initContainers: + - name: wait-governance-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://governance-agent:3020 -o /dev/null; do + echo "waiting for governance-agent to be healthy" + sleep 2 + done + +persistence: + enabled: false + +autoscaling: + enabled: false + + +podLabels: + admission.datadoghq.com/enabled: "true" + sidecar.istio.io/inject: "true" + +secretData: + ACAPY_GOVERNANCE_AGENT_API_KEY: adminApiKey + +env: + LOG_LEVEL: info + ACAPY_GOVERNANCE_AGENT_URL: http://governance-agent:3021 + ACAPY_TENANT_AGENT_URL: http://multitenant-agent:3021 + TRUST_REGISTRY_URL: http://trust-registry:8000 + OPENAPI_NAME: CloudAPI Governance + PYTHONPATH: / + ACAPY_ENDORSER_ALIAS: endorser + ROLE: governance + ROOT_PATH: /governance + ENABLE_SERIALIZE_LOGS: "TRUE" + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 0 + +extraVolumes: + - name: logs + emptyDir: {} +extraVolumeMounts: + - name: logs + mountPath: /logs + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 15 + +podAntiAffinityPreset: soft +nodeAffinityPreset: + type: soft + key: node.kubernetes.io/lifecycle + values: + - spot + +configFiles: + log_conf.yaml: + path: /tmp/log_conf.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + default: + "()": uvicorn.logging.DefaultFormatter + format: '%(asctime)s %(name)s %(levelname)s %(message)s' + use_colors: null + access: + "()": uvicorn.logging.AccessFormatter + format: '%(asctime)s %(name)s %(levelname)s %(client_addr)s - "%(request_line)s" %(status_code)s' + handlers: + default: + formatter: default + class: logging.StreamHandler + stream: ext://sys.stderr + access: + formatter: access + class: logging.StreamHandler + stream: ext://sys.stdout + loggers: + uvicorn: + level: INFO + handlers: + - default + propagate: no + uvicorn.error: + level: INFO + uvicorn.access: + level: INFO + handlers: + - access + propagate: no + +istio: + auth: + authn: # Configures cloudapi Keycloak realm as JWT issuer for governance + enabled: false + conf: + jwtRules: + - issuer: http://acapy-cloud.dev.didxtech.com/auth/realms/cloudapi + authz: # Authorisation config + enabled: false + conf: + rules: + - to: + - operation: + methods: ["GET"] + paths: + - /governance/docs + - /governance/docs/* + - /governance/openapi.json + - when: + - key: request.auth.claims[iss] + values: + - http://acapy-cloud.dev.didxtech.com/auth/realms/cloudapi + - key: request.auth.claims[client_id] + values: + - governance + to: + - operation: + methods: ["*"] + paths: + - /governance + - /governance/* + - when: # exclude internal admin url from Istio authN, i.e., can use it unauthenticated internally + - key: request.headers[host] + values: + - governance-web.acapy-cloud.dev.didxtech.com + to: + - operation: + methods: ["*"] + paths: + - /governance + - /governance/* diff --git a/helm/acapy-cloud/conf/dev/ledger-browser.yaml b/helm/acapy-cloud/conf/dev/ledger-browser.yaml new file mode 100644 index 0000000000..5e631625f5 --- /dev/null +++ b/helm/acapy-cloud/conf/dev/ledger-browser.yaml @@ -0,0 +1,86 @@ +fullnameOverride: ledger-browser + +replicaCount: 1 + +strategy: + type: Recreate + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/ledger-browser.logs: '[{"source": "grok.ledger-browser", "service": "ledger-browser", "auto_multi_line_detection": true}]' + +podLabels: + admission.datadoghq.com/enabled: "false" + sidecar.istio.io/inject: "false" + +image: + name: acapy-cloud/ledger-nodes + pullPolicy: Always + tag: latest + +ingressDomain: acapy-cloud.dev.didxtech.com +ingress: + internal: + enabled: true + className: nginx-internal + rules: + - host: ledger-browser-{{ .Values.ingressDomain }} + paths: + - path: / + port: 8000 + +service: + hostNetwork: false + port: 8000 + containerPort: 8000 + appProtocol: tcp + +command: + - bash + - -c + - | + cp ./config/sample_aml.json ./config/aml.json + cp ./config/sample_taa.json ./config/taa.json + + if [ ! -f "/home/indy/ledger/sandbox/pool_transactions_genesis" ] && [ -z "${GENESIS_URL}" ] && [ -z "${GENESIS_FILE}" ]; then + echo "Ledger does not exist - Creating genesis data..." + bash ./scripts/init_genesis.sh + fi + + # Remap ports + sed -i \ + 's/"client_port":[0-9]\+/"client_port":9702/g; s/"node_port":[0-9]\+/"node_port":9701/g' \ + /home/indy/ledger/sandbox/pool_transactions_genesis + + python -m server.server + +livenessProbe: + httpGet: + path: /status/text + port: "{{ trunc 15 .Release.Name }}" + timeoutSeconds: 10 +readinessProbe: + httpGet: + path: /status/text + port: "{{ trunc 15 .Release.Name }}" + +# resources: +# requests: +# cpu: 100m +# memory: 386Mi +# limits: +# cpu: 500m +# memory: 386Mi + +secretData: + LEDGER_SEED: 000000000000000000000000Trustee1 + +env: + LOG_LEVEL: info + MAX_FETCH: 50000 + RESYNC_TIME: 120 + REGISTER_NEW_DIDS: True + LEDGER_INSTANCE_NAME: Indy Ledger Browser + +extraConfigmapNamesForEnvFrom: + - ledger-nodes-ips diff --git a/helm/acapy-cloud/conf/dev/mediator.yaml b/helm/acapy-cloud/conf/dev/mediator.yaml new file mode 100644 index 0000000000..2f7808deaf --- /dev/null +++ b/helm/acapy-cloud/conf/dev/mediator.yaml @@ -0,0 +1,219 @@ +fullnameOverride: mediator + +replicaCount: 1 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/mediator.logs: '[{"source": "python", "service": "mediator", "auto_multi_line_detection": true}]' + +image: + registry: ghcr.io/openwallet-foundation + name: acapy-agent + pullPolicy: Always + tag: py3.12-1.2.1 + +podLabels: + admission.datadoghq.com/enabled: "false" + +command: + - aca-py + - start + - --inbound-transport + - http + - 0.0.0.0 + - 3000 + - --inbound-transport + - ws + - 0.0.0.0 + - 3001 + - --outbound-transport + - ws + - --outbound-transport + - http + - --admin + - 0.0.0.0 + - 3002 + - --endpoint + - http://mediator-{{ .Values.ingressDomain }} + - ws://mediator-{{ .Values.ingressDomain }} + +ingressDomain: acapy-cloud.dev.didxtech.com +ingress: + internal: + enabled: true + className: nginx-internal + annotations: + # Websockets config + # https://kubernetes.github.io/ingress-nginx/user-guide/miscellaneous/#websockets + # 1 hour proxy read/write timeout + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" + nginx.ingress.kubernetes.io/configuration-snippet: | + if ($http_connection ~* "upgrade") { + proxy_pass http://mediator.{{ .Release.Namespace }}.svc.cluster.local:3001; + break; + } + rules: + - host: mediator-{{ .Values.ingressDomain }} + paths: + - path: / + port: 3000 + internal-admin: + enabled: false + className: nginx-internal + rules: + - host: mediator-admin-{{ .Values.ingressDomain }} + paths: + - path: / + port: 3002 + +service: + port: 3000 + containerPort: 3000 + appProtocol: http + +addPorts: + - name: websocket + port: 3001 + containerPort: 3001 + protocol: TCP + appProtocol: http + - name: admin + port: 3002 + containerPort: 3002 + protocol: TCP + appProtocol: http + +livenessProbe: + httpGet: + path: /status/live + port: admin + initialDelaySeconds: 300 + timeoutSeconds: 30 +readinessProbe: + httpGet: + path: /status/ready + port: admin + initialDelaySeconds: 5 + timeoutSeconds: 30 + +# resources: +# requests: +# cpu: 100m +# memory: 256Mi +# limits: +# cpu: 250m +# memory: 384Mi + +initContainers: + - name: wait-governance-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://governance-agent:3020 -o /dev/null; do + echo "waiting for governance-agent to be healthy" + sleep 2 + done + - name: wait-multitenant-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://multitenant-agent:3020; do + echo "waiting for multitenant-agent to be healthy" + sleep 2 + done + +persistence: + enabled: false + +autoscaling: + enabled: false + +# Sensitive environment variables are sourced from k8s secrets: +# - generated with secretData, or +# - pre-populated with external tooling +# TODO: Helm secret logic to create new secret if not exist +secretData: + ACAPY_ADMIN_API_KEY: adminApiKey + ACAPY_WALLET_KEY: verySecureMediatorWalletKey + + ACAPY_WALLET_STORAGE_CONFIG: '{ "max_connections":10, "min_connections":1, "url":"cloudapi-postgresql:5432" }' + ACAPY_WALLET_STORAGE_CREDS: '{ "account":"mediator", "admin_account":"mediator", "admin_password":"mediator", "password":"mediator" }' + WALLET_DB_ADMIN_PASS: mediator + WALLET_DB_ADMIN_USER: mediator + WALLET_DB_HOST: cloudapi-postgresql + WALLET_DB_PASS: mediator + WALLET_DB_PORT: 5432 + WALLET_DB_USER: mediator + +env: + ACAPY_LOG_LEVEL: info + ACAPY_LABEL: Aca-Py Mediator + ACAPY_WALLET_NAME: mediator + # Mediator does not use a ledger + ACAPY_NO_LEDGER: true + # Wallet + ACAPY_WALLET_TYPE: askar + ACAPY_AUTO_PROVISION: true + # Mediation + ACAPY_MEDIATION_OPEN: true + ACAPY_ENABLE_UNDELIVERED_QUEUE: true + # Connections + ACAPY_DEBUG_CONNECTIONS: true + ACAPY_AUTO_ACCEPT_INVITES: true + ACAPY_AUTO_ACCEPT_REQUESTS: true + ACAPY_AUTO_PING_CONNECTION: true + # Print admin invite + # ACAPY_MEDIATION_CONNECTIONS_INVITE: true + ACAPY_INVITE_LABEL: Aca-Py Mediator + ACAPY_INVITE_MULTI_USE: true + ACAPY_CONNECTIONS_INVITE: true + + ACAPY_WALLET_STORAGE_TYPE: postgres_storage + + ACAPY_EMIT_NEW_DIDCOMM_PREFIX: true + ACAPY_EMIT_NEW_DIDCOMM_MIME_TYPE: true + + # ACAPY_LOG_CONFIG: /home/aries/logging_config.yaml + +podAntiAffinityPreset: soft +nodeAffinityPreset: + type: soft + key: node.kubernetes.io/lifecycle + values: + - spot + +configFiles: + logging_config.yml: + path: /home/aries/logging_config.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + json_formatter: + (): pythonjsonlogger.jsonlogger.JsonFormatter + format: '%(asctime)s %(name)s %(levelname)s %(pathname)s:%(lineno)d %(message)s' + handlers: + stream_handler: + class: logging.StreamHandler + level: DEBUG + formatter: json_formatter + stream: ext://sys.stderr + timed_file_handler: + class: logging.handlers.TimedRotatingFileHandler + level: DEBUG + formatter: json_formatter + filename: '/home/aries/log/acapy-agent.log' + when: 'd' + interval: 7 + backupCount: 1 + loggers: + '': + level: ERROR + handlers: + - stream_handler + - timed_file_handler diff --git a/helm/acapy-cloud/conf/dev/multitenant-agent-pg-proxy.yaml b/helm/acapy-cloud/conf/dev/multitenant-agent-pg-proxy.yaml new file mode 100644 index 0000000000..0408cf3d1e --- /dev/null +++ b/helm/acapy-cloud/conf/dev/multitenant-agent-pg-proxy.yaml @@ -0,0 +1,3 @@ +secretData: + ACAPY_WALLET_STORAGE_CONFIG: '{ "max_connections":10, "min_connections":1, "url":"cloudapi-pgpool:5432" }' + WALLET_DB_HOST: cloudapi-pgpool diff --git a/helm/acapy-cloud/conf/dev/multitenant-agent.yaml b/helm/acapy-cloud/conf/dev/multitenant-agent.yaml new file mode 100644 index 0000000000..2475fec74e --- /dev/null +++ b/helm/acapy-cloud/conf/dev/multitenant-agent.yaml @@ -0,0 +1,247 @@ +fullnameOverride: multitenant-agent + +replicaCount: 1 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/multitenant-agent.logs: '[{"source": "python", "service": "multitenant-agent", "auto_multi_line_detection": true}]' + +image: + name: acapy-cloud/multitenant-agent + tag: master + pullPolicy: Always + +podLabels: + admission.datadoghq.com/enabled: "false" + +command: + - aca-py + - start + - --inbound-transport + - http + - 0.0.0.0 + - 3020 + - --admin + - 0.0.0.0 + - 3021 + - --plugin + - acapy_wallet_groups_plugin + - --auto-promote-author-did + - --plugin + - nats_events.v1_0.nats_queue.events + - --plugin-config-value + - nats_queue.connection.connection_url="$(NATS_SERVER)" + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 10 + +ingressDomain: acapy-cloud.dev.didxtech.com +ingress: + internal: + enabled: true + className: nginx-internal + rules: + - host: multitenant-agent-didcomm-{{ .Values.ingressDomain }} + paths: + - path: / + port: 3020 + internal-e2e: + enabled: true + className: nginx-internal + rules: + - host: multitenant-agent-{{ .Values.ingressDomain }} + paths: + - path: / + port: 3021 + +service: + # if set, will run Pods on Node Network + hostNetwork: false + port: 3021 + containerPort: 3021 + appProtocol: tcp + +addPorts: + - port: 3020 + containerPort: 3020 + protocol: TCP + +livenessProbe: + httpGet: + path: /status/live + port: "{{ trunc 15 .Release.Name }}" + initialDelaySeconds: 300 + timeoutSeconds: 30 +readinessProbe: + httpGet: + path: /status/ready + port: "{{ trunc 15 .Release.Name }}" + initialDelaySeconds: 5 + timeoutSeconds: 30 + +# resources: +# requests: +# cpu: 100m +# memory: 256Mi +# limits: +# cpu: 500m +# memory: 512Mi + +initContainers: + - name: wait-for-ledger-browser + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://ledger-browser:8000/status -o /dev/null; do + echo "waiting for ledger-browser to be healthy" + sleep 2 + done + - name: nc-nats + image: busybox + command: ['sh', '-c', 'until nc -z nats 4222; do echo waiting for nats; sleep 2; done;'] + - name: nats-check + image: bitnami/natscli + command: + - sh + - -c + - | + until nats --server $NATS_SERVER str info $NATS_STREAM >/dev/null 2>&1; do echo waiting for nats stream >/dev/null 2>&1; sleep 2; done; + env: + - name: NATS_SERVER + value: "{{ .Values.env.NATS_SERVER }}" + - name: NATS_STREAM + value: "{{ .Values.env.NATS_STREAM }}" + +persistence: + enabled: true + mountPath: /home/aries/.indy_client + capacity: 1Gi + storageClassName: efs + accessMode: ReadWriteOnce + +autoscaling: + enabled: false + +# Sensitive environment variables are sourced from k8s secrets: +# - generated with secretData, or +# - pre-populated with external tooling +# TODO: Helm secret logic to create new secret if not exist +secretData: + ACAPY_WALLET_KEY: verySecretMultitenantWalletKey + ACAPY_ADMIN_API_KEY: adminApiKey + ACAPY_MULTITENANT_JWT_SECRET: verySecretMultitenantJwtSecret + ACAPY_GOVERNANCE_AGENT_API_KEY: adminApiKey + ACAPY_LABEL: Multitenant + ACAPY_WALLET_NAME: multitenant + ACAPY_MULTITENANCY_CONFIGURATION: '{ "wallet_type":"single-wallet-askar", "wallet_name":"multitenant" }' + + ACAPY_WALLET_STORAGE_CONFIG: '{ "max_connections":10, "min_connections":1, "url":"cloudapi-postgresql:5432" }' + ACAPY_WALLET_STORAGE_CREDS: '{ "account":"multitenant", "admin_account":"multitenant", "admin_password":"multitenant", "password":"multitenant" }' + WALLET_DB_ADMIN_PASS: multitenant + WALLET_DB_ADMIN_USER: multitenant + WALLET_DB_HOST: cloudapi-postgresql + WALLET_DB_PASS: multitenant + WALLET_DB_PORT: 5432 + WALLET_DB_USER: multitenant + +env: + ACAPY_LOG_LEVEL: info + # NATS related + NATS_CREDS_FILE: "" # NATS in Local dev has no auth + NATS_SERVER: nats://nats:4222 + NATS_SUBJECT: cloudapi.aries.events + NATS_STREAM: cloudapi_aries_events + # for aca-py + ADMIN_URL: http://multitenant-agent:3021 + ACAPY_OUTBOUND_TRANSPORT: http + ACAPY_ADMIN: "[0.0.0.0,3021]" + ACAPY_ENDPOINT: http://multitenant-agent:3020 + # Tails server + ACAPY_TAILS_SERVER_BASE_URL: http://tails-server:6543 + + ACAPY_WALLET_TYPE: askar + ACAPY_WALLET_STORAGE_TYPE: postgres_storage + ACAPY_AUTO_PROVISION: true + # Ledger + ACAPY_GENESIS_URL: http://ledger-browser:8000/genesis + + # Multi-tenant Configuration + ACAPY_MULTITENANT: true + ACAPY_MULTITENANT_ADMIN: false + ACAPY_PUBLIC_INVITES: true + # ## DO NOT CHANGE VARIABLES BELOW + # ## Unless you know exactly what you are doing + # ## Changes will probably break CloudAPI + # Optional Helper Configurations - See https://github.com/openwallet-foundation/acapy/blob/main/acapy_agent/config/argparse.py + ACAPY_AUTO_ACCEPT_INVITES: true + ACAPY_AUTO_ACCEPT_REQUESTS: true + ACAPY_AUTO_PING_CONNECTION: true + ACAPY_AUTO_RESPOND_MESSAGES: false + ACAPY_AUTO_RESPOND_CREDENTIAL_PROPOSAL: false + ACAPY_AUTO_RESPOND_CREDENTIAL_OFFER: false + ACAPY_AUTO_RESPOND_CREDENTIAL_REQUEST: false + ACAPY_AUTO_RESPOND_PRESENTATION_PROPOSAL: false + ACAPY_AUTO_RESPOND_PRESENTATION_REQUEST: false + ACAPY_AUTO_STORE_CREDENTIAL: true + ACAPY_AUTO_VERIFY_PRESENTATION: true + ACAPY_PRESERVE_EXCHANGE_RECORDS: false + ACAPY_CREATE_REVOCATION_TRANSACTIONS: true + # Endorser + ACAPY_ENDORSER_ROLE: author + ACAPY_AUTO_REQUEST_ENDORSEMENT: true + ACAPY_AUTO_WRITE_TRANSACTIONS: true + ACAPY_ENDORSER_ALIAS: endorser + + ACAPY_REQUESTS_THROUGH_PUBLIC_DID: true + ACAPY_EMIT_NEW_DIDCOMM_PREFIX: true + ACAPY_EMIT_NEW_DIDCOMM_MIME_TYPE: true + + # ## From mt-agent-env secret + # ACAPY_MULTITENANCY_CONFIGURATION: '{"wallet_type":"askar-profile","wallet_name":"xxx"}' + + # ACAPY_LOG_CONFIG: /home/aries/logging_config.yaml + +podAntiAffinityPreset: soft +nodeAffinityPreset: + type: soft + key: node.kubernetes.io/lifecycle + values: + - spot + +configFiles: + logging_config.yml: + path: /home/aries/logging_config.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + json_formatter: + (): pythonjsonlogger.jsonlogger.JsonFormatter + format: '%(asctime)s %(name)s %(wallet_id)s %(levelname)s %(pathname)s:%(lineno)d %(message)s' + handlers: + stream_handler: + class: logging.StreamHandler + level: DEBUG + formatter: json_formatter + stream: ext://sys.stderr + timed_file_handler: + class: logging.handlers.TimedRotatingFileHandler + level: DEBUG + formatter: json_formatter + filename: '/home/aries/log/acapy-agent.log' + when: 'd' + interval: 7 + backupCount: 1 + loggers: + '': + level: ERROR + handlers: + - stream_handler + - timed_file_handler diff --git a/helm/acapy-cloud/conf/dev/multitenant-web.yaml b/helm/acapy-cloud/conf/dev/multitenant-web.yaml new file mode 100644 index 0000000000..9280757215 --- /dev/null +++ b/helm/acapy-cloud/conf/dev/multitenant-web.yaml @@ -0,0 +1,172 @@ +fullnameOverride: multitenant-web + +replicaCount: 1 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/multitenant-web.logs: '[{"source": "python.uvicorn", "service": "multitenant-web"}]' + +image: + name: acapy-cloud/app + pullPolicy: Always + tag: master + +command: + - poetry + - run + - uvicorn + - app.main:app + - --log-config=/tmp/log_conf.yaml + - --reload + - --host + - 0.0.0.0 + - --port + - 8000 + +ingressDomain: acapy-cloud.dev.didxtech.com +ingress: + internal: + enabled: true + className: nginx-internal + rules: + - host: multitenant-web-{{ .Values.ingressDomain }} + paths: + - path: /tenant-admin + cloudapi-internal: + enabled: true + className: nginx-internal + annotations: + # Retool needs the below + nginx.ingress.kubernetes.io/cors-allow-headers: x-api-key + nginx.ingress.kubernetes.io/enable-cors: "true" + rules: + - host: '{{ .Values.ingressDomain }}' + paths: + - path: /tenant-admin + +service: + # if set, will run Pods on Node Network + hostNetwork: false + port: 8000 + containerPort: 8000 + appProtocol: http + +livenessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" +readinessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" + +# resources: +# requests: +# cpu: 50m +# memory: 256Mi +# limits: +# cpu: 250m +# memory: 256Mi + +initContainers: + - name: wait-multitenant-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://multitenant-agent:3020; do + echo "waiting for multitenant-agent to be healthy" + sleep 2 + done + +persistence: + enabled: false + +autoscaling: + enabled: false + +podLabels: + admission.datadoghq.com/enabled: "true" + sidecar.istio.io/inject: "true" + +secretData: + ACAPY_MULTITENANT_JWT_SECRET: verySecretMultitenantJwtSecret + ACAPY_TENANT_AGENT_API_KEY: adminApiKey + +env: + LOG_LEVEL: info + ACAPY_GOVERNANCE_AGENT_URL: http://governance-agent:3021 + ACAPY_TENANT_AGENT_URL: http://multitenant-agent:3021 + TRUST_REGISTRY_URL: http://trust-registry:8000 + OPENAPI_NAME: CloudAPI Multitenant Admin + PYTHONPATH: / + ACAPY_ENDORSER_ALIAS: endorser + ROLE: tenant-admin + ROOT_PATH: /tenant-admin + ENABLE_SERIALIZE_LOGS: "TRUE" + GOVERNANCE_ACAPY_LABEL: Governance + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 0 + +extraVolumes: + - name: logs + emptyDir: {} +extraVolumeMounts: + - name: logs + mountPath: /logs + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 15 + +podAntiAffinityPreset: soft +nodeAffinityPreset: + type: soft + key: node.kubernetes.io/lifecycle + values: + - spot + +configFiles: + log_conf.yaml: + path: /tmp/log_conf.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + default: + "()": uvicorn.logging.DefaultFormatter + format: '%(asctime)s %(name)s %(levelname)s %(message)s' + use_colors: null + access: + "()": uvicorn.logging.AccessFormatter + format: '%(asctime)s %(name)s %(levelname)s %(client_addr)s - "%(request_line)s" %(status_code)s' + handlers: + default: + formatter: default + class: logging.StreamHandler + stream: ext://sys.stderr + access: + formatter: access + class: logging.StreamHandler + stream: ext://sys.stdout + loggers: + uvicorn: + level: INFO + handlers: + - default + propagate: no + uvicorn.error: + level: INFO + uvicorn.access: + level: INFO + handlers: + - access + propagate: no diff --git a/helm/acapy-cloud/conf/dev/public-web.yaml b/helm/acapy-cloud/conf/dev/public-web.yaml new file mode 100644 index 0000000000..1688d0e7ff --- /dev/null +++ b/helm/acapy-cloud/conf/dev/public-web.yaml @@ -0,0 +1,181 @@ +fullnameOverride: public-web + +replicaCount: 1 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/public-web.logs: '[{"source": "python.uvicorn", "service": "public-web"}]' + +image: + name: acapy-cloud/app + pullPolicy: Always + tag: master + +command: + - poetry + - run + - uvicorn + - app.main:app + - --log-config=/tmp/log_conf.yaml + - --reload + - --host + - 0.0.0.0 + - --port + - 8000 + +ingressDomain: acapy-cloud.dev.didxtech.com +ingress: + internal: + enabled: true + className: nginx-internal + rules: + - host: public-web-{{ .Values.ingressDomain }} + paths: + - path: /public + cloudapi-internal: + enabled: true + className: nginx-internal + annotations: + # Retool needs the below + nginx.ingress.kubernetes.io/cors-allow-headers: x-api-key + nginx.ingress.kubernetes.io/enable-cors: "true" + rules: + - host: '{{ .Values.ingressDomain }}' + paths: + - path: /public + +service: + # if set, will run Pods on Node Network + hostNetwork: false + port: 8000 + containerPort: 8000 + appProtocol: http + +livenessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" +readinessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" + +# resources: +# requests: +# cpu: 50m +# memory: 256Mi +# limits: +# cpu: 250m +# memory: 256Mi + +initContainers: + - name: wait-governance-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://governance-agent:3020 -o /dev/null; do + echo "waiting for governance-agent to be healthy" + sleep 2 + done + - name: wait-multitenant-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://multitenant-agent:3020; do + echo "waiting for multitenant-agent to be healthy" + sleep 2 + done + +persistence: + enabled: false + +autoscaling: + enabled: false + + +podLabels: + admission.datadoghq.com/enabled: "true" + +extraSecretNamesForEnvFrom: {} + +secretData: {} + +env: + LOG_LEVEL: info + # ACAPY_GOVERNANCE_AGENT_URL: http://governance-agent:3021 + # ACAPY_TENANT_AGENT_URL: http://multitenant-agent:3021 + TRUST_REGISTRY_URL: http://trust-registry:8000 + OPENAPI_NAME: CloudAPI Public + PYTHONPATH: / + # ACAPY_ENDORSER_ALIAS: endorser + ROLE: public + ROOT_PATH: /public + ENABLE_SERIALIZE_LOGS: "TRUE" + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 0 + +extraVolumes: + - name: logs + emptyDir: {} +extraVolumeMounts: + - name: logs + mountPath: /logs + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 15 + +podAntiAffinityPreset: soft +nodeAffinityPreset: + type: soft + key: node.kubernetes.io/lifecycle + values: + - spot + +configFiles: + log_conf.yaml: + path: /tmp/log_conf.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + default: + "()": uvicorn.logging.DefaultFormatter + format: '%(asctime)s %(name)s %(levelname)s %(message)s' + use_colors: null + access: + "()": uvicorn.logging.AccessFormatter + format: '%(asctime)s %(name)s %(levelname)s %(client_addr)s - "%(request_line)s" %(status_code)s' + handlers: + default: + formatter: default + class: logging.StreamHandler + stream: ext://sys.stderr + access: + formatter: access + class: logging.StreamHandler + stream: ext://sys.stdout + loggers: + uvicorn: + level: INFO + handlers: + - default + propagate: no + uvicorn.error: + level: INFO + uvicorn.access: + level: INFO + handlers: + - access + propagate: no diff --git a/helm/acapy-cloud/conf/dev/tails-server.yaml b/helm/acapy-cloud/conf/dev/tails-server.yaml new file mode 100644 index 0000000000..6f9a7e812a --- /dev/null +++ b/helm/acapy-cloud/conf/dev/tails-server.yaml @@ -0,0 +1,71 @@ +fullnameOverride: tails-server + +replicaCount: 1 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/tails-server.logs: '[{"source": "python.acapy", "service": "tails-server"}]' +podLabels: + admission.datadoghq.com/enabled: "true" + +image: + name: acapy-cloud/tails-server + # registry: ghcr.io/bcgov + # tag: 1.1 + pullPolicy: Always + tag: master + +ingressDomain: acapy-cloud.dev.didxtech.com +ingress: + internal: + enabled: true + className: nginx-internal + rules: + - host: tails-server-{{ .Values.ingressDomain }} + paths: + - path: / + port: 6543 + +args: + - tails-server + - --host + - 0.0.0.0 + - --port + - 6543 + - --storage-path + - /tails-server-db + - --log-level + - INFO + +service: + port: 6543 + containerPort: 6543 + appProtocol: tcp + +livenessProbe: + tcpSocket: + port: "{{ trunc 15 .Release.Name }}" +readinessProbe: + tcpSocket: + port: "{{ trunc 15 .Release.Name }}" + initialDelaySeconds: 5 + +persistence: + enabled: true + mountPath: /tails-server-db + capacity: 10Gi + storageClassName: efs + accessMode: ReadWriteOnce + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 65534 + +# resources: +# requests: +# cpu: 50m +# memory: 128Mi +# limits: +# cpu: 250m +# memory: 256Mi diff --git a/helm/acapy-cloud/conf/dev/tenant-web.yaml b/helm/acapy-cloud/conf/dev/tenant-web.yaml new file mode 100644 index 0000000000..0c4c617aa0 --- /dev/null +++ b/helm/acapy-cloud/conf/dev/tenant-web.yaml @@ -0,0 +1,190 @@ +fullnameOverride: tenant-web + +replicaCount: 1 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/tenant-web.logs: '[{"source": "python.uvicorn", "service": "tenant-web"}]' + +image: + name: acapy-cloud/app + pullPolicy: Always + tag: master + +command: + - poetry + - run + - uvicorn + - app.main:app + - --log-config=/tmp/log_conf.yaml + - --reload + - --host + - 0.0.0.0 + - --port + - 8000 + +ingressDomain: acapy-cloud.dev.didxtech.com +ingress: + internal: + enabled: true + className: nginx-internal + rules: + - host: tenant-web-{{ .Values.ingressDomain }} + paths: + - path: /tenant + cloudapi-internal: + enabled: true + className: nginx-internal + annotations: + # Retool needs the below + nginx.ingress.kubernetes.io/cors-allow-headers: x-api-key + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/client-body-timeout: "120" + nginx.ingress.kubernetes.io/proxy-connect-timeout: "120" + nginx.ingress.kubernetes.io/proxy-read-timeout: "120" + nginx.ingress.kubernetes.io/proxy-send-timeout: "120" + rules: + - host: '{{ .Values.ingressDomain }}' + paths: + - path: /tenant + +service: + # if set, will run Pods on Node Network + hostNetwork: false + port: 8000 + containerPort: 8000 + appProtocol: http + +livenessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" +readinessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" + +# resources: +# requests: +# cpu: 50m +# memory: 256Mi +# limits: +# cpu: 250m +# memory: 256Mi + +initContainers: + - name: wait-governance-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://governance-agent:3020 -o /dev/null; do + echo "waiting for governance-agent to be healthy" + sleep 2 + done + - name: wait-multitenant-agent + image: curlimages/curl + command: + - sh + - -c + - | + until curl -s http://multitenant-agent:3020; do + echo "waiting for multitenant-agent to be healthy" + sleep 2 + done + +persistence: + enabled: false + +autoscaling: + enabled: false + +secretData: + ACAPY_GOVERNANCE_AGENT_API_KEY: adminApiKey + ACAPY_MULTITENANT_JWT_SECRET: verySecretMultitenantJwtSecret + ACAPY_TENANT_AGENT_API_KEY: adminApiKey # This is, potentially, not needed + +podLabels: + admission.datadoghq.com/enabled: "true" + +env: + LOG_LEVEL: info + ACAPY_GOVERNANCE_AGENT_URL: http://governance-agent:3021 + ACAPY_TENANT_AGENT_URL: http://multitenant-agent:3021 + TRUST_REGISTRY_URL: http://trust-registry:8000 + OPENAPI_NAME: CloudAPI Tenant + PYTHONPATH: / + ACAPY_ENDORSER_ALIAS: endorser + ROLE: tenant + ROOT_PATH: /tenant + ACAPY_TAILS_SERVER_BASE_URL: http://tails-server:6543 + ENABLE_SERIALIZE_LOGS: "TRUE" + GOVERNANCE_ACAPY_LABEL: Governance + REGISTRY_CREATION_TIMEOUT: 120 + REGISTRY_SIZE: 100 + WAYPOINT_URL: http://waypoint:3010 + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 0 + +extraVolumes: + - name: logs + emptyDir: {} +extraVolumeMounts: + - name: logs + mountPath: /logs + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 15 + +podAntiAffinityPreset: soft +nodeAffinityPreset: + type: soft + key: node.kubernetes.io/lifecycle + values: + - spot + +configFiles: + log_conf.yaml: + path: /tmp/log_conf.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + default: + "()": uvicorn.logging.DefaultFormatter + format: '%(asctime)s %(name)s %(levelname)s %(message)s' + use_colors: null + access: + "()": uvicorn.logging.AccessFormatter + format: '%(asctime)s %(name)s %(levelname)s %(client_addr)s - "%(request_line)s" %(status_code)s' + handlers: + default: + formatter: default + class: logging.StreamHandler + stream: ext://sys.stderr + access: + formatter: access + class: logging.StreamHandler + stream: ext://sys.stdout + loggers: + uvicorn: + level: INFO + handlers: + - default + propagate: no + uvicorn.error: + level: INFO + uvicorn.access: + level: INFO + handlers: + - access + propagate: no diff --git a/helm/acapy-cloud/conf/dev/trust-registry.yaml b/helm/acapy-cloud/conf/dev/trust-registry.yaml new file mode 100644 index 0000000000..763646b780 --- /dev/null +++ b/helm/acapy-cloud/conf/dev/trust-registry.yaml @@ -0,0 +1,140 @@ +fullnameOverride: trust-registry + +replicaCount: 1 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/trust-registry.logs: '[{"source": "python.uvicorn", "service": "trust-registry"}]' +podLabels: + admission.datadoghq.com/enabled: "true" + +image: + name: acapy-cloud/trust-registry + pullPolicy: Always + tag: master + +command: + - poetry + - run + - uvicorn + - trustregistry.main:app + - --log-config=/tmp/log_conf.yaml + - --reload + - --host + - 0.0.0.0 + - --port + - 8000 + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 5 + +ingressDomain: acapy-cloud.dev.didxtech.com +ingress: + internal: + enabled: true + className: nginx-internal + rules: + - host: trust-registry-{{ .Values.ingressDomain }} + paths: + - path: / + port: 8000 + +service: + appProtocol: tcp + hostNetwork: false + port: 8000 + containerPort: 8000 + +livenessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" +readinessProbe: + httpGet: + path: /docs + port: "{{ trunc 15 .Release.Name }}" + +# resources: +# requests: +# cpu: 50m +# memory: 256Mi +# limits: +# cpu: 250m +# memory: 256Mi + +autoscaling: + enabled: false + +secretData: + POSTGRES_DATABASE_URL: postgresql://trust-registry:trust-registry@cloudapi-postgresql:5432/trust-registry?sslmode=prefer + +env: + LOG_LEVEL: warning + PYTHONPATH: / + OPENAPI_NAME: Trustregistry + POSTGRES_POOL_SIZE: 10 + POSTGRES_MAX_OVERFLOW: 20 + POSTGRES_POOL_RECYCLE: 600 # 10 minutes + POSTGRES_POOL_TIMEOUT: 30 + ENABLE_SERIALIZE_LOGS: "TRUE" + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 0 + +extraVolumes: + - name: logs + emptyDir: {} +extraVolumeMounts: + - name: logs + mountPath: /logs + +podAntiAffinityPreset: soft +nodeAffinityPreset: + type: soft + key: node.kubernetes.io/lifecycle + values: + - spot + +configFiles: + log_conf.yaml: + path: /tmp/log_conf.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + default: + "()": uvicorn.logging.DefaultFormatter + format: '%(asctime)s %(name)s %(levelname)s %(message)s' + use_colors: null + access: + "()": uvicorn.logging.AccessFormatter + format: '%(asctime)s %(name)s %(levelname)s %(client_addr)s - "%(request_line)s" %(status_code)s' + handlers: + default: + formatter: default + class: logging.StreamHandler + stream: ext://sys.stderr + access: + formatter: access + class: logging.StreamHandler + stream: ext://sys.stdout + loggers: + uvicorn: + level: INFO + handlers: + - default + propagate: no + uvicorn.error: + level: INFO + uvicorn.access: + level: INFO + handlers: + - access + propagate: no diff --git a/helm/acapy-cloud/conf/dev/waypoint.yaml b/helm/acapy-cloud/conf/dev/waypoint.yaml new file mode 100644 index 0000000000..cbde807427 --- /dev/null +++ b/helm/acapy-cloud/conf/dev/waypoint.yaml @@ -0,0 +1,157 @@ +fullnameOverride: waypoint + +replicaCount: 1 + +podAnnotations: + sidecar.istio.io/proxyCPU: 10m + ad.datadoghq.com/waypoint.logs: '[{"source": "python.uvicorn", "service": "waypoint"}]' + proxy.istio.io/config: |- + proxyMetadata: + ISTIO_META_IDLE_TIMEOUT: 0s +podLabels: + admission.datadoghq.com/enabled: "true" + +image: + name: acapy-cloud/waypoint + pullPolicy: Always + tag: master + +command: + - poetry + - run + - uvicorn + - waypoint.main:app + - --log-config=/tmp/log_conf.yaml + - --reload + - --host + - 0.0.0.0 + - --port + - 3010 + +initContainers: + - name: nc-nats + image: busybox + command: ['sh', '-c', 'until nc -z nats 4222; do echo waiting for nats; sleep 2; done;'] + - name: nats-check + image: bitnami/natscli + command: + - sh + - -c + - | + until nats --server $NATS_SERVER str info $NATS_STREAM >/dev/null 2>&1; do echo waiting for nats stream >/dev/null 2>&1; sleep 2; done; + env: + - name: NATS_SERVER + value: "{{ .Values.env.NATS_SERVER }}" + - name: NATS_STREAM + value: "{{ .Values.env.NATS_STATE_STREAM }}" + +ingressDomain: acapy-cloud.dev.didxtech.com +ingress: + internal: + enabled: true + className: nginx-internal + rules: + - host: waypoint-{{ .Values.ingressDomain }} + paths: + - path: / + +service: + # if set, will run Pods on Node Network + appProtocol: http + hostNetwork: false + port: 3010 + containerPort: 3010 + +livenessProbe: + httpGet: + path: /health/live + port: "{{ trunc 15 .Release.Name }}" +readinessProbe: + httpGet: + path: /health/ready + port: "{{ trunc 15 .Release.Name }}" + +lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - sleep 15 + +# resources: +# requests: +# cpu: 50m +# memory: 384Mi +# limits: +# cpu: 250m +# memory: 512Mi + +autoscaling: + enabled: false + +env: + LOG_LEVEL: info + OPENAPI_NAME: waypoint + PYTHONPATH: "/" + ENABLE_SERIALIZE_LOGS: "TRUE" + NATS_CREDS_FILE: "" # NATS in Local dev has no auth + NATS_SERVER: nats://nats:4222 + NATS_STATE_SUBJECT: cloudapi.aries.state_monitoring + NATS_STATE_STREAM: cloudapi_aries_state_monitoring + +podSecurityContext: + fsGroup: 65534 +securityContext: + runAsUser: 0 + +extraVolumes: + - name: logs + emptyDir: {} +extraVolumeMounts: + - name: logs + mountPath: /logs + +podAntiAffinityPreset: soft +nodeAffinityPreset: + type: soft + key: node.kubernetes.io/lifecycle + values: + - spot + +configFiles: + log_conf.yaml: + path: /tmp/log_conf.yaml + content: |- + version: 1 + disable_existing_loggers: False + formatters: + default: + "()": uvicorn.logging.DefaultFormatter + format: '%(asctime)s %(name)s %(levelname)s %(message)s' + use_colors: null + access: + "()": uvicorn.logging.AccessFormatter + format: '%(asctime)s %(name)s %(levelname)s %(client_addr)s - "%(request_line)s" %(status_code)s' + handlers: + default: + formatter: default + class: logging.StreamHandler + stream: ext://sys.stderr + access: + formatter: access + class: logging.StreamHandler + stream: ext://sys.stdout + loggers: + uvicorn: + level: INFO + handlers: + - default + propagate: no + uvicorn.error: + level: INFO + uvicorn.access: + level: INFO + handlers: + - access + propagate: no diff --git a/helm/acapy-cloud/conf/local/connect-cloud.yaml b/helm/acapy-cloud/conf/local/connect-cloud.yaml new file mode 100644 index 0000000000..f53d117194 --- /dev/null +++ b/helm/acapy-cloud/conf/local/connect-cloud.yaml @@ -0,0 +1,50 @@ +# https://github.com/redpanda-data/helm-charts/tree/main/charts/connect +image: + tag: 4 +updateStrategy: + type: Recreate + +logger: + level: info + # format: json + add_timestamp: true + static_fields: + "@service": connect-cloud + +http: + debug_endpoints: true + +streams: + enabled: true + streamsConfigMap: connect-cloud-streams + +deployment: + podLabels: + sidecar.istio.io/inject: "false" + +env: + - name: GOVERNANCE_ACAPY_LABEL + value: Governance + +initContainers: + - name: nc-nats + image: busybox + command: ['sh', '-c', 'until nc -z nats 4222; do echo waiting for nats; sleep 2; done;'] + - name: nats-check + image: bitnami/natscli + command: + - sh + - -c + - | + until nats --server nats://nats:4222 str info acapy_events >/dev/null 2>&1; do + echo waiting for nats acapy_events stream; + sleep 2; + done + until nats --server nats://nats:4222 str info cloudapi_aries_events >/dev/null 2>&1; do + echo waiting for nats cloudapi_aries_events stream; + sleep 2; + done + until nats --server nats://nats:4222 str info cloudapi_aries_state_monitoring >/dev/null 2>&1; do + echo waiting for nats cloudapi_aries_state_monitoring stream; + sleep 2; + done diff --git a/helm/acapy-cloud/conf/local/endorser.yaml b/helm/acapy-cloud/conf/local/endorser.yaml index f2cf6c5b3d..ba6f9b320c 100644 --- a/helm/acapy-cloud/conf/local/endorser.yaml +++ b/helm/acapy-cloud/conf/local/endorser.yaml @@ -12,10 +12,27 @@ podLabels: admission.datadoghq.com/enabled: "true" image: - name: d-cloud/endorser + name: acapy-cloud/endorser pullPolicy: Always tag: master +initContainers: + - name: nc-nats + image: busybox + command: ['sh', '-c', 'until nc -z nats 4222; do echo waiting for nats; sleep 2; done;'] + - name: nats-check + image: bitnami/natscli + command: + - sh + - -c + - | + until nats --server $NATS_SERVER str info $NATS_STREAM >/dev/null 2>&1; do echo waiting for nats stream >/dev/null 2>&1; sleep 2; done; + env: + - name: NATS_SERVER + value: "{{ .Values.env.NATS_SERVER }}" + - name: NATS_STREAM + value: "{{ .Values.env.NATS_STREAM }}" + command: - poetry - run @@ -69,6 +86,12 @@ env: GOVERNANCE_ACAPY_LABEL: Governance ENABLE_SERIALIZE_LOGS: "FALSE" + NATS_CREDS_FILE: "" + NATS_SERVER: nats://nats:4222 + NATS_SUBJECT: cloudapi.aries.events + NATS_STREAM: cloudapi_aries_events + ENDORSER_DURABLE_CONSUMER: endorser + podSecurityContext: fsGroup: 65534 securityContext: diff --git a/helm/acapy-cloud/conf/local/governance-agent-pg-proxy.yaml b/helm/acapy-cloud/conf/local/governance-agent-pg-proxy.yaml new file mode 100644 index 0000000000..90039fd82d --- /dev/null +++ b/helm/acapy-cloud/conf/local/governance-agent-pg-proxy.yaml @@ -0,0 +1,3 @@ +secretData: + ACAPY_WALLET_STORAGE_CONFIG: '{ "max_connections":10, "min_idle_count":10, "url":"cloudapi-pgpool:5432" }' + WALLET_DB_HOST: cloudapi-pgpool diff --git a/helm/acapy-cloud/conf/local/governance-agent.yaml b/helm/acapy-cloud/conf/local/governance-agent.yaml index 276c9b89a5..376aeec7ce 100644 --- a/helm/acapy-cloud/conf/local/governance-agent.yaml +++ b/helm/acapy-cloud/conf/local/governance-agent.yaml @@ -7,7 +7,7 @@ podAnnotations: ad.datadoghq.com/governance-agent.logs: '[{"source": "python", "service": "governance-agent", "auto_multi_line_detection": true}]' image: - name: d-cloud/governance-agent + name: acapy-cloud/governance-agent pullPolicy: Always tag: master @@ -122,6 +122,21 @@ initContainers: echo "Failed to register DID. Status code: $STATUS" exit 1 fi + - name: nc-nats + image: busybox + command: ['sh', '-c', 'until nc -z nats 4222; do echo waiting for nats; sleep 2; done;'] + - name: nats-check + image: bitnami/natscli + command: + - sh + - -c + - | + until nats --server $NATS_SERVER str info $NATS_STREAM >/dev/null 2>&1; do echo waiting for nats stream >/dev/null 2>&1; sleep 2; done; + env: + - name: NATS_SERVER + value: "{{ .Values.env.NATS_SERVER }}" + - name: NATS_STREAM + value: "{{ .Values.env.NATS_STREAM }}" persistence: enabled: false @@ -141,7 +156,7 @@ secretData: ACAPY_WALLET_NAME: governance ACAPY_WALLET_SEED: verySecretPaddedWalletSeedPadded - ACAPY_WALLET_STORAGE_CONFIG: '{ "max_connections": 10, "min_idle_count": 10, "url": "cloudapi-postgresql:5432" }' + ACAPY_WALLET_STORAGE_CONFIG: '{ "max_connections":10, "min_idle_count":10, "url":"cloudapi-postgresql:5432" }' ACAPY_WALLET_STORAGE_CREDS: '{ "account": "governance", "admin_account": "governance", "admin_password": "governance", "password": "governance" }' WALLET_DB_ADMIN_PASS: governance WALLET_DB_ADMIN_USER: governance @@ -174,7 +189,7 @@ env: # ## DO NOT CHANGE VARIABLES BELOW # ## Unless you know exactly what you are doing # ## Changes will probably break CloudAPI - # Optional Helper Configurations - See https://github.com/hyperledger/aries-cloudagent-python/blob/main/aries_cloudagent/config/argparse.py + # Optional Helper Configurations - See https://github.com/openwallet-foundation/acapy/blob/main/acapy_agent/config/argparse.py ACAPY_AUTO_ACCEPT_INVITES: false ACAPY_AUTO_ACCEPT_REQUESTS: false ACAPY_AUTO_PING_CONNECTION: true diff --git a/helm/acapy-cloud/conf/local/governance-web.yaml b/helm/acapy-cloud/conf/local/governance-web.yaml index f1d74a85a5..047f380ab9 100644 --- a/helm/acapy-cloud/conf/local/governance-web.yaml +++ b/helm/acapy-cloud/conf/local/governance-web.yaml @@ -7,7 +7,7 @@ podAnnotations: ad.datadoghq.com/governance-web.logs: '[{"source": "python.uvicorn", "service": "governance-web"}]' image: - name: d-cloud/governance-web + name: acapy-cloud/app pullPolicy: Always tag: master @@ -79,16 +79,6 @@ initContainers: echo "waiting for governance-agent to be healthy" sleep 2 done - - name: wait-governance-multitenant-agent - image: curlimages/curl - command: - - sh - - -c - - | - until curl -s http://multitenant-agent:3020; do - echo "waiting for multitenant-agent to be healthy" - sleep 2 - done persistence: enabled: false diff --git a/helm/acapy-cloud/conf/local/ledger-browser.yaml b/helm/acapy-cloud/conf/local/ledger-browser.yaml index 1ae85ff1b9..4e8b74ae46 100644 --- a/helm/acapy-cloud/conf/local/ledger-browser.yaml +++ b/helm/acapy-cloud/conf/local/ledger-browser.yaml @@ -14,7 +14,7 @@ podLabels: sidecar.istio.io/inject: "false" image: - name: ledger-browser + name: acapy-cloud/ledger-nodes pullPolicy: Always tag: latest diff --git a/helm/acapy-cloud/conf/local/multitenant-agent-pg-proxy.yaml b/helm/acapy-cloud/conf/local/multitenant-agent-pg-proxy.yaml new file mode 100644 index 0000000000..90039fd82d --- /dev/null +++ b/helm/acapy-cloud/conf/local/multitenant-agent-pg-proxy.yaml @@ -0,0 +1,3 @@ +secretData: + ACAPY_WALLET_STORAGE_CONFIG: '{ "max_connections":10, "min_idle_count":10, "url":"cloudapi-pgpool:5432" }' + WALLET_DB_HOST: cloudapi-pgpool diff --git a/helm/acapy-cloud/conf/local/multitenant-agent.yaml b/helm/acapy-cloud/conf/local/multitenant-agent.yaml index a98a31165e..4ee84ed426 100644 --- a/helm/acapy-cloud/conf/local/multitenant-agent.yaml +++ b/helm/acapy-cloud/conf/local/multitenant-agent.yaml @@ -8,7 +8,7 @@ podAnnotations: ad.datadoghq.com/multitenant-agent.logs: '[{"source": "python", "service": "multitenant-agent", "auto_multi_line_detection": true}]' image: - name: d-cloud/multitenant-agent + name: acapy-cloud/multitenant-agent tag: master pullPolicy: Always @@ -104,6 +104,21 @@ initContainers: echo "waiting for ledger-browser to be healthy" sleep 2 done + - name: nc-nats + image: busybox + command: ['sh', '-c', 'until nc -z nats 4222; do echo waiting for nats; sleep 2; done;'] + - name: nats-check + image: bitnami/natscli + command: + - sh + - -c + - | + until nats --server $NATS_SERVER str info $NATS_STREAM >/dev/null 2>&1; do echo waiting for nats stream >/dev/null 2>&1; sleep 2; done; + env: + - name: NATS_SERVER + value: "{{ .Values.env.NATS_SERVER }}" + - name: NATS_STREAM + value: "{{ .Values.env.NATS_STREAM }}" persistence: enabled: true @@ -165,7 +180,7 @@ env: # ## DO NOT CHANGE VARIABLES BELOW # ## Unless you know exactly what you are doing # ## Changes will probably break CloudAPI - # Optional Helper Configurations - See https://github.com/hyperledger/aries-cloudagent-python/blob/main/aries_cloudagent/config/argparse.py + # Optional Helper Configurations - See https://github.com/openwallet-foundation/acapy/blob/main/acapy_agent/config/argparse.py ACAPY_AUTO_ACCEPT_INVITES: true ACAPY_AUTO_ACCEPT_REQUESTS: true ACAPY_AUTO_PING_CONNECTION: true diff --git a/helm/acapy-cloud/conf/local/multitenant-web.yaml b/helm/acapy-cloud/conf/local/multitenant-web.yaml index fb3a01e62b..ec28933454 100644 --- a/helm/acapy-cloud/conf/local/multitenant-web.yaml +++ b/helm/acapy-cloud/conf/local/multitenant-web.yaml @@ -7,7 +7,7 @@ podAnnotations: ad.datadoghq.com/multitenant-web.logs: '[{"source": "python.uvicorn", "service": "multitenant-web"}]' image: - name: d-cloud/multitenant-web + name: acapy-cloud/app pullPolicy: Always tag: master @@ -79,16 +79,6 @@ initContainers: echo "waiting for governance-agent to be healthy" sleep 2 done - - name: wait-multitenant-agent - image: curlimages/curl - command: - - sh - - -c - - | - until curl -s http://multitenant-agent:3020; do - echo "waiting for multitenant-agent to be healthy" - sleep 2 - done persistence: enabled: false diff --git a/helm/acapy-cloud/conf/local/public-web.yaml b/helm/acapy-cloud/conf/local/public-web.yaml index de5e4a0cdb..265b01ccd3 100644 --- a/helm/acapy-cloud/conf/local/public-web.yaml +++ b/helm/acapy-cloud/conf/local/public-web.yaml @@ -7,7 +7,7 @@ podAnnotations: ad.datadoghq.com/public-web.logs: '[{"source": "python.uvicorn", "service": "public-web"}]' image: - name: d-cloud/public-web + name: acapy-cloud/app pullPolicy: Always tag: master diff --git a/helm/acapy-cloud/conf/local/tails-server.yaml b/helm/acapy-cloud/conf/local/tails-server.yaml index 09bfe85cda..9f39162efa 100644 --- a/helm/acapy-cloud/conf/local/tails-server.yaml +++ b/helm/acapy-cloud/conf/local/tails-server.yaml @@ -10,7 +10,7 @@ podLabels: admission.datadoghq.com/enabled: "true" image: - name: d-cloud/tails-server + name: acapy-cloud/tails-server # registry: ghcr.io/bcgov # tag: 1.1 pullPolicy: Always diff --git a/helm/acapy-cloud/conf/local/tenant-web.yaml b/helm/acapy-cloud/conf/local/tenant-web.yaml index ce49346fc4..218fe8a8a1 100644 --- a/helm/acapy-cloud/conf/local/tenant-web.yaml +++ b/helm/acapy-cloud/conf/local/tenant-web.yaml @@ -7,7 +7,7 @@ podAnnotations: ad.datadoghq.com/tenant-web.logs: '[{"source": "python.uvicorn", "service": "tenant-web"}]' image: - name: d-cloud/tenant-web + name: acapy-cloud/app pullPolicy: Always tag: master diff --git a/helm/acapy-cloud/conf/local/trust-registry.yaml b/helm/acapy-cloud/conf/local/trust-registry.yaml index 2c3b9bf461..2fd42446d0 100644 --- a/helm/acapy-cloud/conf/local/trust-registry.yaml +++ b/helm/acapy-cloud/conf/local/trust-registry.yaml @@ -9,7 +9,7 @@ podLabels: admission.datadoghq.com/enabled: "true" image: - name: d-cloud/trust-registry + name: acapy-cloud/trust-registry pullPolicy: Always tag: master diff --git a/helm/acapy-cloud/conf/local/waypoint.yaml b/helm/acapy-cloud/conf/local/waypoint.yaml index 7d018de579..2fce7876a2 100644 --- a/helm/acapy-cloud/conf/local/waypoint.yaml +++ b/helm/acapy-cloud/conf/local/waypoint.yaml @@ -12,7 +12,7 @@ podLabels: admission.datadoghq.com/enabled: "true" image: - name: d-cloud/waypoint + name: acapy-cloud/waypoint pullPolicy: Always tag: master @@ -28,6 +28,23 @@ command: - --port - 3010 +initContainers: + - name: nc-nats + image: busybox + command: ['sh', '-c', 'until nc -z nats 4222; do echo waiting for nats; sleep 2; done;'] + - name: nats-check + image: bitnami/natscli + command: + - sh + - -c + - | + until nats --server $NATS_SERVER str info $NATS_STREAM >/dev/null 2>&1; do echo waiting for nats stream >/dev/null 2>&1; sleep 2; done; + env: + - name: NATS_SERVER + value: "{{ .Values.env.NATS_SERVER }}" + - name: NATS_STREAM + value: "{{ .Values.env.NATS_STATE_STREAM }}" + ingressDomain: cloudapi.127.0.0.1.nip.io ingress: internal: diff --git a/helm/acapy-cloud/templates/NOTES.txt b/helm/acapy-cloud/templates/NOTES.txt index f3307b2c5c..2ff144b246 100644 --- a/helm/acapy-cloud/templates/NOTES.txt +++ b/helm/acapy-cloud/templates/NOTES.txt @@ -1,5 +1,5 @@ -[ didx:cloud ] +[ acapy-cloud ] Installed as: {{ .Release.Name }} Namespace: {{ .Release.Namespace }} diff --git a/helm/acapy-cloud/values.yaml b/helm/acapy-cloud/values.yaml index d0317c21c7..9bf5321e18 100644 --- a/helm/acapy-cloud/values.yaml +++ b/helm/acapy-cloud/values.yaml @@ -223,7 +223,7 @@ secretData: {} ### Istio istio: peerAuth: - enabled: false + enabled: true labels: {} annotations: {} conf: @@ -231,7 +231,7 @@ istio: mode: PERMISSIVE sidecar: - enabled: false + enabled: true labels: {} annotations: {} conf: diff --git a/helm/acapy-test.yaml.gotmpl b/helm/acapy-test.yaml.gotmpl new file mode 100644 index 0000000000..823229581f --- /dev/null +++ b/helm/acapy-test.yaml.gotmpl @@ -0,0 +1,42 @@ +environments: + local: + values: + - release: acapy-test + namespace: cloudapi + lifecycle: spot + tag: master + ddInjectEnabled: false + regressionEnabled: false + dev: + values: + - release: acapy-test + namespace: acapy-cloud-dev + lifecycle: spot + tag: master + ddInjectEnabled: false + regressionEnabled: false +--- +releases: +- name: {{ .Values.release }} + namespace: {{ .Values.namespace }} + chart: ./acapy-test + values: + - ./acapy-test/conf/{{ .Environment.Name }}/values.yaml + {{- if .Values.regressionEnabled }} + - ./acapy-test/conf/{{ .Environment.Name }}/regression.yaml + {{- end }} + - labels: + tags.datadoghq.com/env: {{ .Environment.Name }} + podLabels: + admission.datadoghq.com/enabled: {{ quote .Values.ddInjectEnabled }} + tags.datadoghq.com/env: {{ .Environment.Name }} + set: + - name: image.tag + value: {{ .Values.tag }} +--- +helmDefaults: + timeout: 180 + wait: true + atomic: true + cleanupOnFail: true + createNamespace: false diff --git a/helm/acapy-test/README.md b/helm/acapy-test/README.md index 21c8c2891d..5328db059a 100644 --- a/helm/acapy-test/README.md +++ b/helm/acapy-test/README.md @@ -42,7 +42,7 @@ The following table lists the configurable parameters of the chart and their def | Parameter | Description | Default | |-----------|-------------|---------| | `image.registry` | Image registry | `ghcr.io/didx-xyz` | -| `image.name` | Image name | `d-cloud/pytest` | +| `image.name` | Image name | `acapy-cloud/pytest` | | `image.tag` | Image tag | `latest` | | `image.pullPolicy` | Image pull policy | `Always` | | `imagePullSecrets` | Image pull secrets | `[]` | diff --git a/helm/acapy-test/conf/dev/regression.yaml b/helm/acapy-test/conf/dev/regression.yaml new file mode 100644 index 0000000000..2a5ec9b0cd --- /dev/null +++ b/helm/acapy-test/conf/dev/regression.yaml @@ -0,0 +1,18 @@ +fullnameOverride: acapy-regression-test + +command: + - sh + - -c + - | + poetry run pytest \ + --junitxml="/mnt/test_output.xml" \ + --cov-report=term-missing:skip-covered \ + --cov | tee /mnt/test_coverage.txt + +env: + RUN_REGRESSION_TESTS: true + # `env.FAIL_ON_RECREATING_FIXTURES` -- Fail tests if they try to create new tenants, credentials, or connections + # - Set to `false` on a clean stack (first time running regression tests) + # - Set to `true` on a "dirty" stack (re-running regression tests) + # - This will cause tests to fail on creating new tenants, credentials, or connections as these should already exist + FAIL_ON_RECREATING_FIXTURES: false diff --git a/helm/acapy-test/conf/dev/values.yaml b/helm/acapy-test/conf/dev/values.yaml new file mode 100644 index 0000000000..cc54492b21 --- /dev/null +++ b/helm/acapy-test/conf/dev/values.yaml @@ -0,0 +1,43 @@ +command: + - sh + - -c + - | + poetry run pytest \ + -n 4 \ + --dist loadgroup \ + --junitxml="/mnt/test_output.xml" \ + --cov-report=term-missing:skip-covered \ + --cov | tee /mnt/test_coverage.txt + +env: + ACAPY_GOVERNANCE_AGENT_URL: http://governance-agent:3021 + ACAPY_TAILS_SERVER_BASE_URL: http://tails-server:6543 + ACAPY_TENANT_AGENT_URL: http://multitenant-agent:3021 + CLOUDAPI_URL: http://tenant-web:8000/tenant + GOVERNANCE_ACAPY_LABEL: Governance + GOVERNANCE_FASTAPI_ENDPOINT: http://governance-web:8000/governance + LEDGER_REGISTRATION_URL: http://ledger-browser:8000/register + REGISTRY_CREATION_TIMEOUT: 120 + SKIP_SET_PUBLIC_DID: true # skip set_public_did test to avoid conflict with two public dids, different seed + TENANT_ADMIN_FASTAPI_ENDPOINT: http://multitenant-web:8000/tenant-admin + TENANT_FASTAPI_ENDPOINT: http://tenant-web:8000/tenant + TRUST_REGISTRY_FASTAPI_ENDPOINT: http://public-web:8000/public + TRUST_REGISTRY_URL: http://trust-registry:8000 + WAYPOINT_URL: http://waypoint:3010 + +secretConfig: + ACAPY_GOVERNANCE_AGENT_API_KEY: adminApiKey + ACAPY_MULTITENANT_JWT_SECRET: verySecretMultitenantJwtSecret + ACAPY_TENANT_AGENT_API_KEY: adminApiKey + GOVERNANCE_ACAPY_API_KEY: adminApiKey + TENANT_ACAPY_API_KEY: adminApiKey + +persistence: + enabled: true + mountPath: /mnt + capacity: 1Gi + storageClassName: "" +# podSecurityContext: +# fsGroup: 65534 +# securityContext: +# runAsUser: 65534 diff --git a/helm/acapy-test/conf/local/values.yaml b/helm/acapy-test/conf/local/values.yaml index 46f57579a1..d2b627f844 100644 --- a/helm/acapy-test/conf/local/values.yaml +++ b/helm/acapy-test/conf/local/values.yaml @@ -4,7 +4,7 @@ command: - | poetry run pytest \ -n 2 \ - --dist loadfile \ + --dist loadgroup \ --junitxml="/mnt/test_output.xml" \ --cov-report=term-missing:skip-covered \ --cov | tee /mnt/test_coverage.txt diff --git a/helm/acapy-test/values.yaml b/helm/acapy-test/values.yaml index a8c86cd7b0..4fd91b96e7 100644 --- a/helm/acapy-test/values.yaml +++ b/helm/acapy-test/values.yaml @@ -4,7 +4,7 @@ fullnameOverride: acapy-test replicaCount: 1 image: - name: d-cloud/pytest + name: acapy-cloud/pytest registry: ghcr.io/didx-xyz pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. diff --git a/helm/ledger-nodes/conf/dev/values.yaml b/helm/ledger-nodes/conf/dev/values.yaml new file mode 100644 index 0000000000..0a4319b9c4 --- /dev/null +++ b/helm/ledger-nodes/conf/dev/values.yaml @@ -0,0 +1,26 @@ +fullnameOverride: ledger-nodes + +podAnnotations: {} +podLabels: + sidecar.istio.io/inject: "false" + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +persistence: + enabled: true + mountPath: /home/indy/ledger + capacity: 2Gi + storageClassName: "" + +updateStrategy: + type: OnDelete + +persistentVolumeClaimRetentionPolicy: + whenDeleted: Delete + whenScaled: Retain diff --git a/helm/ledger-nodes/values.yaml b/helm/ledger-nodes/values.yaml index 28122d1689..ab6419d296 100644 --- a/helm/ledger-nodes/values.yaml +++ b/helm/ledger-nodes/values.yaml @@ -4,7 +4,7 @@ replicaCount: 4 image: - name: d-cloud/ledger-nodes + name: acapy-cloud/ledger-nodes registry: ghcr.io/didx-xyz # Overrides the image tag whose default is the chart appVersion. tag: latest diff --git a/tilt/acapy-cloud/nats/.helmignore b/helm/nats/.helmignore similarity index 100% rename from tilt/acapy-cloud/nats/.helmignore rename to helm/nats/.helmignore diff --git a/tilt/acapy-cloud/nats/Chart.lock b/helm/nats/Chart.lock similarity index 100% rename from tilt/acapy-cloud/nats/Chart.lock rename to helm/nats/Chart.lock diff --git a/tilt/acapy-cloud/nats/Chart.yaml b/helm/nats/Chart.yaml similarity index 100% rename from tilt/acapy-cloud/nats/Chart.yaml rename to helm/nats/Chart.yaml diff --git a/tilt/acapy-cloud/nats/templates/job.yaml b/helm/nats/templates/job.yaml similarity index 96% rename from tilt/acapy-cloud/nats/templates/job.yaml rename to helm/nats/templates/job.yaml index fff3b04b19..058611533f 100644 --- a/tilt/acapy-cloud/nats/templates/job.yaml +++ b/helm/nats/templates/job.yaml @@ -17,6 +17,10 @@ spec: {{- with .Values.nats.podAnnotations }} {{- tpl (toYaml .) $ | nindent 8 }} {{- end }} + labels: + {{- with .Values.nats.podLabels }} + {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} spec: restartPolicy: {{ default "Never" .Values.postInstall.restartPolicy }} initContainers: diff --git a/tilt/acapy-cloud/nats/values.yaml b/helm/nats/values.yaml similarity index 99% rename from tilt/acapy-cloud/nats/values.yaml rename to helm/nats/values.yaml index 31c03d9673..c68259ff56 100644 --- a/tilt/acapy-cloud/nats/values.yaml +++ b/helm/nats/values.yaml @@ -163,7 +163,7 @@ nats: ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner ## - storageClass: standard + storageClass: "" ## @param persistence.size PVC Storage Request for NATS data volume ## size: 10Gi diff --git a/resources/connect-processors/cloud/streams/cloud-events.yaml b/resources/connect-processors/cloud/streams/cloud-events.yaml index 259bcaee6e..cf4b0bb260 100644 --- a/resources/connect-processors/cloud/streams/cloud-events.yaml +++ b/resources/connect-processors/cloud/streams/cloud-events.yaml @@ -1,5 +1,5 @@ input: - label: "acapy_events" + label: acapy_events nats_jetstream: urls: - ${NATS_URL:nats://nats:4222} @@ -13,7 +13,7 @@ input: user_credentials_file: ${NATS_AUTH_CREDENTIALS_FILE:""} pipeline: - threads: ${CLOUD_EVENTS_CREATE_PIPELINE_THREADS:-1} + threads: ${CLOUD_EVENTS_CREATE_PIPELINE_THREADS:-1} processors: - log: level: DEBUG diff --git a/resources/connect-processors/cloud/streams/state-monitoring.yaml b/resources/connect-processors/cloud/streams/state-monitoring.yaml index bd8a279307..8429536a5d 100644 --- a/resources/connect-processors/cloud/streams/state-monitoring.yaml +++ b/resources/connect-processors/cloud/streams/state-monitoring.yaml @@ -1,5 +1,5 @@ input: - label: "cloud_events_acapy" + label: cloud_events_acapy nats_jetstream: urls: - ${NATS_URL:nats://nats:4222} diff --git a/tilt/acapy-cloud/Tiltfile b/tilt/acapy-cloud/Tiltfile index af76315a55..86ae8950b7 100644 --- a/tilt/acapy-cloud/Tiltfile +++ b/tilt/acapy-cloud/Tiltfile @@ -3,7 +3,7 @@ load("ext://color", "color") load("ext://helm_resource", "helm_resource", "helm_repo") # https://github.com/bitnami/charts/tree/main/bitnami/postgresql-ha -postgres_version = "15.1.6" +postgres_version = "15.1.7" # https://github.com/rowanruseler/helm-charts/tree/main/charts/pgadmin4 pgadmin_version = "1.34.0" @@ -93,7 +93,7 @@ def setup_pgadmin(namespace, ingress_domain): def setup_nats(namespace): print(color.green("Installing NATS...")) - chart_dir = "./tilt/acapy-cloud/nats" + chart_dir = "./helm/nats" values_file = chart_dir + "/values.yaml" ## Setup NATS @@ -106,7 +106,7 @@ def setup_nats(namespace): ] local_resource( name="build-nats", - cmd="cd " + chart_dir + " && helm dep build --skip-refresh", + cmd="helm dep build --skip-refresh " + chart_dir, labels=["10-helm-repos"], allow_parallel=True, ) @@ -145,7 +145,7 @@ def setup_redpanda_connect_cloud(namespace): resource_name = "connect-cloud" project_root = os.getcwd() current_dir = os.path.abspath(os.path.dirname(__file__)) - values_file = current_dir + "/connect-cloud.yaml" + values_file = project_root + "/helm/acapy-cloud/conf/local/connect-cloud.yaml" helm_repo( name="redpanda", @@ -190,7 +190,8 @@ def setup_redpanda_connect_cloud(namespace): resource_deps=["cloudapi-ns"], ) - # Create helm release + ## Create helm release + # https://github.com/redpanda-data/helm-charts/tree/main/charts/connect helm_resource( name=resource_name, chart="redpanda/connect", @@ -202,11 +203,9 @@ def setup_redpanda_connect_cloud(namespace): "--values", values_file, "--set", - "logger.static_fields.@service=" + resource_name, - "--set", - "streams.enabled=true", - "--set", "streams.streamsConfigMap=" + resource_name + "-streams", + "--version", + "3.0.3", ], labels=["03-streaming"], resource_deps=[ @@ -353,7 +352,7 @@ def setup_ledger(namespace, build_enabled, ingress_domain): if build_enabled: print(color.green("Docker Build of ledger-nodes for compose deployment...")) custom_build( - registry + "/didx-xyz/d-cloud/ledger-nodes", + registry + "/didx-xyz/acapy-cloud/ledger-nodes", "docker build --network=host -t $EXPECTED_REF " + "--platform=linux/amd64 " + "--file=" @@ -450,7 +449,7 @@ def setup_cloudapi(build_enabled, expose): ), ], "image": { - "dockerfile": "./dockerfiles/fastapi/Dockerfile", + "dockerfile": "./dockerfiles/app/Dockerfile", "live_update": add_live_update( [ ("./app", "/app"), @@ -485,7 +484,7 @@ def setup_cloudapi(build_enabled, expose): ), ], "image": { - "dockerfile": "./dockerfiles/fastapi/Dockerfile", + "dockerfile": "./dockerfiles/app/Dockerfile", "live_update": add_live_update( [ ("./app", "/app"), @@ -504,7 +503,7 @@ def setup_cloudapi(build_enabled, expose): ), ], "image": { - "dockerfile": "./dockerfiles/fastapi/Dockerfile", + "dockerfile": "./dockerfiles/app/Dockerfile", "live_update": add_live_update( [ ("./app", "/app"), @@ -523,7 +522,7 @@ def setup_cloudapi(build_enabled, expose): ), ], "image": { - "dockerfile": "./dockerfiles/fastapi/Dockerfile", + "dockerfile": "./dockerfiles/app/Dockerfile", "live_update": add_live_update( [ ("./app", "/app"), diff --git a/tilt/acapy-cloud/connect-cloud.yaml b/tilt/acapy-cloud/connect-cloud.yaml deleted file mode 100644 index a1069105dd..0000000000 --- a/tilt/acapy-cloud/connect-cloud.yaml +++ /dev/null @@ -1,23 +0,0 @@ -image: - tag: 4 -updateStrategy: - type: Recreate - -logger: - level: info - # format: json - add_timestamp: true - -http: - debug_endpoints: true - -streams: - enabled: true - -deployment: - podLabels: - sidecar.istio.io/inject: "false" - -env: - - name: GOVERNANCE_ACAPY_LABEL - value: Governance diff --git a/tilt/acapy-cloud/nats/charts/.gitignore b/tilt/acapy-cloud/nats/charts/.gitignore deleted file mode 100644 index c20f8835f4..0000000000 --- a/tilt/acapy-cloud/nats/charts/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -# Ignore everything -** -# Except this `.gitignore` -!.gitignore diff --git a/tilt/acapy-cloud/pgadmin.yaml b/tilt/acapy-cloud/pgadmin.yaml index 38cd70a07d..b69f68cf1c 100644 --- a/tilt/acapy-cloud/pgadmin.yaml +++ b/tilt/acapy-cloud/pgadmin.yaml @@ -30,8 +30,9 @@ ingress: persistentVolume: enabled: false -podAnnotations: - sidecar.istio.io/proxyCPU: 10m +podAnnotations: {} +podlabels: + sidecar.istio.io/inject: "false" readinessProbe: initialDelaySeconds: 1 diff --git a/tilt/acapy-cloud/postgres.yaml b/tilt/acapy-cloud/postgres.yaml index 40ea7e311d..f902104f62 100644 --- a/tilt/acapy-cloud/postgres.yaml +++ b/tilt/acapy-cloud/postgres.yaml @@ -40,6 +40,7 @@ pgpool: passwords: trust-registry;governance;multitenant;mediator adminUsername: admin adminPassword: admin + maxPool: 1000 # https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 resourcesPreset: none tls: diff --git a/tilt/metrics/Tiltfile b/tilt/metrics/Tiltfile index 84526820a9..c5cd2a2b1c 100644 --- a/tilt/metrics/Tiltfile +++ b/tilt/metrics/Tiltfile @@ -22,7 +22,7 @@ def setup_metrics_server(): "--set", "extraArgs[1]=--kubelet-insecure-tls", "--version", - "7.3.3", + "7.3.4", "--wait", ], labels=["30-monitoring"],