diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml
new file mode 100644
index 000000000..f6082ac84
--- /dev/null
+++ b/.github/actionlint.yaml
@@ -0,0 +1,4 @@
+self-hosted-runner:
+  labels:
+    - self-hosted
+    - tiobe
diff --git a/.github/release.yaml b/.github/release.yaml
new file mode 100644
index 000000000..9ef36aca6
--- /dev/null
+++ b/.github/release.yaml
@@ -0,0 +1,8 @@
+changelog:
+  categories:
+    - title: Features
+      labels:
+        - enhancement
+    - title: Bug fixes
+      labels:
+        - bug
diff --git a/.github/workflows/check_libs.yaml b/.github/workflows/check_libs.yaml
new file mode 100644
index 000000000..77f84e890
--- /dev/null
+++ b/.github/workflows/check_libs.yaml
@@ -0,0 +1,33 @@
+# Copyright 2025 Canonical Ltd.
+# See LICENSE file for licensing details.
+name: Check libs
+
+concurrency:
+  group: ${{ github.workflow }}-${{ github.ref }}
+  cancel-in-progress: true
+
+on:
+  pull_request:
+    paths-ignore:
+      - '.gitignore'
+      - '.jujuignore'
+      - 'LICENSE'
+      - '**.md'
+      - 'renovate.json'
+
+jobs:
+  lib-check:
+    name: Check libraries
+    runs-on: ubuntu-latest
+    timeout-minutes: 5
+    if: ${{ github.event.pull_request.head.repo.full_name == 'canonical/mysql-k8s-operator' }}
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v4
+        with:
+          fetch-depth: 0
+      - name: Check libs
+        uses: canonical/charming-actions/check-libraries@2.7.0
+        with:
+          credentials: "${{ secrets.CHARMHUB_TOKEN }}"
+          github-token: "${{ secrets.GITHUB_TOKEN }}"
diff --git a/.github/workflows/check_pr.yaml b/.github/workflows/check_pr.yaml
new file mode 100644
index 000000000..e3bf9febe
--- /dev/null
+++ b/.github/workflows/check_pr.yaml
@@ -0,0 +1,18 @@
+# Copyright 2025 Canonical Ltd.
+# See LICENSE file for licensing details.
+name: Check pull request
+
+on:
+  pull_request:
+    types:
+      - opened
+      - labeled
+      - unlabeled
+      - edited
+    branches:
+      - main
+
+jobs:
+  check-pr:
+    name: Check pull request
+    uses: canonical/data-platform-workflows/.github/workflows/check_charm_pr.yaml@v30.1.3
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index f3797952b..66231b8a2 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -9,14 +9,18 @@ concurrency:
 on:
   pull_request:
   schedule:
-    - cron: '53 0 * * *' # Daily at 00:53 UTC
+    - cron: '53 0 * * *'  # Daily at 00:53 UTC
   # Triggered on push to branch "main" by .github/workflows/release.yaml
   workflow_call:
+    outputs:
+      artifact-prefix:
+        description: build_charm.yaml `artifact-prefix` output
+        value: ${{ jobs.build.outputs.artifact-prefix }}
 
 jobs:
   lint:
     name: Lint
-    uses: canonical/data-platform-workflows/.github/workflows/lint.yaml@v21.0.1
+    uses: canonical/data-platform-workflows/.github/workflows/lint.yaml@v30.1.3
 
   unit-test:
     name: Unit test charm
@@ -32,79 +36,21 @@ jobs:
       - name: Run tests
         run: tox run -e unit
       - name: Upload Coverage to Codecov
-        uses: codecov/codecov-action@v4
-
-  lib-check:
-    name: Check libraries
-    runs-on: ubuntu-latest
-    timeout-minutes: 5
-    steps:
-      - name: Checkout
-        uses: actions/checkout@v4
-        with:
-          fetch-depth: 0
-      - run: |
-          # Workaround for https://github.com/canonical/charmcraft/issues/1389#issuecomment-1880921728
-          touch requirements.txt
-      - name: Check libs
-        uses: canonical/charming-actions/check-libraries@2.6.2
-        with:
-          credentials: ${{ secrets.CHARMHUB_TOKEN }}
-          github-token: ${{ secrets.GITHUB_TOKEN }}
-          use-labels: false
-          fail-build: ${{ github.event_name == 'pull_request' }}
+        uses: codecov/codecov-action@v5
 
   build:
     name: Build charm
-    uses: canonical/data-platform-workflows/.github/workflows/build_charm.yaml@v21.0.1
-    with:
-      cache: true
+    uses: canonical/data-platform-workflows/.github/workflows/build_charm.yaml@v30.1.3
 
   integration-test:
-    strategy:
-      fail-fast: false
-      matrix:
-        juju:
-          - agent: 2.9.50  # renovate: juju-agent-pin-minor
-            libjuju: ^2
-            allure_on_amd64: false
-          - agent: 3.4.5  # renovate: juju-agent-pin-minor
-            allure_on_amd64: true
-          - snap_channel: 3.6/beta
-            allure_on_amd64: false
-        architecture:
-          - amd64
-        include:
-          - juju:
-              agent: 3.4.5  # renovate: juju-agent-pin-minor
-              allure_on_amd64: true
-            architecture: arm64
-          - juju:
-              snap_channel: 3.6/beta
-              allure_on_amd64: false
-            architecture: arm64
-    name: Integration | ${{ matrix.juju.agent || matrix.juju.snap_channel }} | ${{ matrix.architecture }}
+    name: Integration test charm
     needs:
       - lint
       - unit-test
       - build
-    uses: canonical/data-platform-workflows/.github/workflows/integration_test_charm.yaml@v21.0.1
+    uses: ./.github/workflows/integration_test.yaml
     with:
       artifact-prefix: ${{ needs.build.outputs.artifact-prefix }}
-      architecture: ${{ matrix.architecture }}
-      cloud: microk8s
-      microk8s-snap-channel: 1.31-strict/stable  # renovate: latest microk8s
-      juju-agent-version: ${{ matrix.juju.agent }}
-      juju-snap-channel: ${{ matrix.juju.snap_channel }}
-      libjuju-version-constraint: ${{ matrix.juju.libjuju }}
-      _beta_allure_report: ${{ matrix.juju.allure_on_amd64 && matrix.architecture == 'amd64' }}
-    secrets:
-      # GitHub appears to redact each line of a multi-line secret
-      # Avoid putting `{` or `}` on a line by itself so that it doesn't get redacted in logs
-      integration-test: |
-        { "AWS_ACCESS_KEY": "${{ secrets.AWS_ACCESS_KEY }}",
-          "AWS_SECRET_KEY": "${{ secrets.AWS_SECRET_KEY }}",
-          "GCP_ACCESS_KEY": "${{ secrets.GCP_ACCESS_KEY }}",
-          "GCP_SECRET_KEY": "${{ secrets.GCP_SECRET_KEY }}", }
+    secrets: inherit
     permissions:
-      contents: write  # Needed for Allure Report beta
+      contents: write  # Needed for Allure Report
diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml
new file mode 100644
index 000000000..a88dcfbbd
--- /dev/null
+++ b/.github/workflows/integration_test.yaml
@@ -0,0 +1,313 @@
+on:
+  workflow_call:
+    inputs:
+      artifact-prefix:
+        description: |
+          Prefix for charm package GitHub artifact(s)
+          
+          Use canonical/data-platform-workflows build_charm.yaml to build the charm(s)
+        required: true
+        type: string
+
+jobs:
+  collect-integration-tests:
+    name: Collect integration test spread jobs
+    runs-on: ubuntu-latest
+    timeout-minutes: 5
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v4
+      - name: Set up environment
+        run: |
+          sudo snap install charmcraft --classic
+          pipx install tox poetry
+      - name: Collect spread jobs
+        id: collect-jobs
+        shell: python
+        run: |
+          import json
+          import os
+          import subprocess
+
+          spread_jobs = (
+              subprocess.run(
+                  ["charmcraft", "test", "--list", "github-ci"], capture_output=True, check=True, text=True
+              )
+              .stdout.strip()
+              .split("\n")
+          )
+          jobs = []
+          for job in spread_jobs:
+              # Example `job`: "github-ci:ubuntu-24.04:tests/spread/test_charm.py:juju36"
+              _, runner, task, variant = job.split(":")
+              # Example: "test_charm.py"
+              task = task.removeprefix("tests/spread/")
+              if runner.endswith("-arm"):
+                  architecture = "arm64"
+              else:
+                  architecture = "amd64"
+              # Example: "test_charm.py:juju36 | amd64"
+              name = f"{task}:{variant} | {architecture}"
+              # ":" character not valid in GitHub Actions artifact
+              name_in_artifact = f"{task}-{variant}-{architecture}"
+              jobs.append({
+                  "spread_job": job,
+                  "name": name,
+                  "name_in_artifact": name_in_artifact,
+                  "runner": runner,
+              })
+          output = f"jobs={json.dumps(jobs)}"
+          print(output)
+          with open(os.environ["GITHUB_OUTPUT"], "a") as file:
+              file.write(output)
+      - name: Generate Allure default test results
+        if: ${{ github.event_name == 'schedule' && github.run_attempt == '1' }}
+        run: tox run -e integration -- tests/integration --allure-default-dir=allure-default-results
+      - name: Upload Allure default results
+        # Default test results in case the integration tests time out or runner set up fails
+        # (So that Allure report will show "unknown"/"failed" test result, instead of omitting the test)
+        if: ${{ github.event_name == 'schedule' && github.run_attempt == '1' }}
+        uses: actions/upload-artifact@v4
+        with:
+          name: allure-default-results-integration-test
+          path: allure-default-results/
+          if-no-files-found: error
+    outputs:
+      jobs: ${{ steps.collect-jobs.outputs.jobs }}
+
+  integration-test:
+    strategy:
+      fail-fast: false
+      matrix:
+        job: ${{ fromJSON(needs.collect-integration-tests.outputs.jobs) }}
+    name: ${{ matrix.job.name }}
+    needs:
+      - collect-integration-tests
+    runs-on: ${{ matrix.job.runner }}
+    timeout-minutes: 217  # Sum of steps `timeout-minutes` + 5
+    steps:
+      - name: Free up disk space
+        timeout-minutes: 1
+        run: |
+          printf '\nDisk usage before cleanup\n'
+          df --human-readable
+          # Based on https://github.com/actions/runner-images/issues/2840#issuecomment-790492173
+          rm -r /opt/hostedtoolcache/
+          printf '\nDisk usage after cleanup\n'
+          df --human-readable
+      - name: Checkout
+        timeout-minutes: 3
+        uses: actions/checkout@v4
+      - name: Set up environment
+        timeout-minutes: 5
+        run: sudo snap install charmcraft --classic
+      # TODO: remove when https://github.com/canonical/charmcraft/issues/2105 and
+      # https://github.com/canonical/charmcraft/issues/2130 fixed
+      - run: |
+          sudo snap install go --classic
+          go install github.com/snapcore/spread/cmd/spread@latest
+      - name: Download packed charm(s)
+        timeout-minutes: 5
+        uses: actions/download-artifact@v4
+        with:
+          pattern: ${{ inputs.artifact-prefix }}-*
+          merge-multiple: true
+      - name: Run spread job
+        timeout-minutes: 180
+        id: spread
+        # TODO: replace with `charmcraft test` when
+        # https://github.com/canonical/charmcraft/issues/2105 and
+        # https://github.com/canonical/charmcraft/issues/2130 fixed
+        run: ~/go/bin/spread -vv -artifacts=artifacts '${{ matrix.job.spread_job }}'
+        env:
+          AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }}
+          AWS_SECRET_KEY: ${{ secrets.AWS_SECRET_KEY }}
+          GCP_ACCESS_KEY: ${{ secrets.GCP_ACCESS_KEY }}
+          GCP_SECRET_KEY: ${{ secrets.GCP_SECRET_KEY }}
+      - name: Upload Allure results
+        timeout-minutes: 3
+        # Only upload results from one spread system & one spread variant
+        # Allure can only process one result per pytest test ID. If parameterization is done via
+        # spread instead of pytest, there will be overlapping pytest test IDs.
+        if: ${{ (success() || (failure() && steps.spread.outcome == 'failure')) && startsWith(matrix.job.spread_job, 'github-ci:ubuntu-24.04:') && endsWith(matrix.job.spread_job, ':juju36') && github.event_name == 'schedule' && github.run_attempt == '1' }}
+        uses: actions/upload-artifact@v4
+        with:
+          name: allure-results-integration-test-${{ matrix.job.name_in_artifact }}
+          path: artifacts/${{ matrix.job.spread_job }}/allure-results/
+          if-no-files-found: error
+      - timeout-minutes: 1
+        if: ${{ success() || (failure() && steps.spread.outcome == 'failure') }}
+        run: snap list
+      - name: Select model
+        timeout-minutes: 1
+        # `!contains(matrix.job.spread_job, 'juju29')` workaround for juju 2 error:
+        # "ERROR cannot acquire lock file to read controller concierge-microk8s: unable to open
+        # /tmp/juju-store-lock-3635383939333230: permission denied"
+        # Unable to workaround error with `sudo rm /tmp/juju-*`
+        if: ${{ !contains(matrix.job.spread_job, 'juju29') && (success() || (failure() && steps.spread.outcome == 'failure')) }}
+        id: juju-switch
+        run: |
+          # sudo needed since spread runs scripts as root
+          # "testing" is default model created by concierge
+          sudo juju switch testing
+          mkdir ~/logs/
+      - name: juju status
+        timeout-minutes: 1
+        if: ${{ !contains(matrix.job.spread_job, 'juju29') && (success() || (failure() && steps.spread.outcome == 'failure')) }}
+        run: sudo juju status --color --relations | tee ~/logs/juju-status.txt
+      - name: juju debug-log
+        timeout-minutes: 3
+        if: ${{ !contains(matrix.job.spread_job, 'juju29') && (success() || (failure() && steps.spread.outcome == 'failure')) }}
+        run: sudo juju debug-log --color --replay --no-tail | tee ~/logs/juju-debug-log.txt
+      - name: jhack tail
+        timeout-minutes: 3
+        if: ${{ !contains(matrix.job.spread_job, 'juju29') && (success() || (failure() && steps.spread.outcome == 'failure')) }}
+        run: sudo jhack tail --printer raw --replay --no-watch | tee ~/logs/jhack-tail.txt
+      - name: Upload logs
+        timeout-minutes: 5
+        if: ${{ !contains(matrix.job.spread_job, 'juju29') && (success() || (failure() && steps.spread.outcome == 'failure')) }}
+        uses: actions/upload-artifact@v4
+        with:
+          name: logs-integration-test-${{ matrix.job.name_in_artifact }}
+          path: ~/logs/
+          if-no-files-found: error
+      - name: Disk usage
+        timeout-minutes: 1
+        if: ${{ success() || (failure() && steps.spread.outcome == 'failure') }}
+        run: df --human-readable
+
+  allure-report:
+    # TODO future improvement: use concurrency group for job
+    name: Publish Allure report
+    if: ${{ !cancelled() && github.event_name == 'schedule' && github.run_attempt == '1' }}
+    needs:
+      - integration-test
+    runs-on: ubuntu-latest
+    timeout-minutes: 5
+    steps:
+      - name: Download Allure
+        # Following instructions from https://allurereport.org/docs/install-for-linux/#install-from-a-deb-package
+        run: gh release download --repo allure-framework/allure2 --pattern 'allure_*.deb'
+        env:
+          GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+      - name: Install Allure
+        run: |
+          sudo apt-get update
+          sudo apt-get install ./allure_*.deb -y
+      # For first run, manually create branch with no history
+      # (e.g.
+      # git checkout --orphan gh-pages-beta
+      # git rm -rf .
+      # touch .nojekyll
+      # git add .nojekyll
+      # git commit -m "Initial commit"
+      # git push origin gh-pages-beta
+      # )
+      - name: Checkout GitHub pages branch
+        uses: actions/checkout@v4
+        with:
+          ref: gh-pages-beta
+          path: repo/
+      - name: Download default test results
+        # Default test results in case the integration tests time out or runner set up fails
+        # (So that Allure report will show "unknown"/"failed" test result, instead of omitting the test)
+        uses: actions/download-artifact@v4
+        with:
+          path: allure-default-results/
+          name: allure-default-results-integration-test
+      - name: Download test results
+        uses: actions/download-artifact@v4
+        with:
+          path: allure-results/
+          pattern: allure-results-integration-test-*
+          merge-multiple: true
+      - name: Combine Allure default results & actual results
+        # For every test: if actual result available, use that. Otherwise, use default result
+        # So that, if actual result not available, Allure report will show "unknown"/"failed" test result
+        # instead of omitting the test
+        shell: python
+        run: |
+          import dataclasses
+          import json
+          import pathlib
+
+
+          @dataclasses.dataclass(frozen=True)
+          class Result:
+              test_case_id: str
+              path: pathlib.Path
+
+              def __eq__(self, other):
+                  if not isinstance(other, type(self)):
+                      return False
+                  return self.test_case_id == other.test_case_id
+
+
+          actual_results = pathlib.Path("allure-results")
+          default_results = pathlib.Path("allure-default-results")
+
+          results: dict[pathlib.Path, set[Result]] = {
+              actual_results: set(),
+              default_results: set(),
+          }
+          for directory, results_ in results.items():
+              for path in directory.glob("*-result.json"):
+                  with path.open("r") as file:
+                      id_ = json.load(file)["testCaseId"]
+                  results_.add(Result(id_, path))
+
+          actual_results.mkdir(exist_ok=True)
+
+          missing_results = results[default_results] - results[actual_results]
+          for default_result in missing_results:
+              # Move to `actual_results` directory
+              default_result.path.rename(actual_results / default_result.path.name)
+      - name: Load test report history
+        run: |
+          if [[ -d repo/_latest/history/ ]]
+          then
+            echo 'Loading history'
+            cp -r repo/_latest/history/ allure-results/
+          fi
+      - name: Create executor.json
+        shell: python
+        run: |
+          # Reverse engineered from https://github.com/simple-elf/allure-report-action/blob/eca283b643d577c69b8e4f048dd6cd8eb8457cfd/entrypoint.sh
+          import json
+
+          DATA = {
+              "name": "GitHub Actions",
+              "type": "github",
+              "buildOrder": ${{ github.run_number }},  # TODO future improvement: use run ID
+              "buildName": "Run ${{ github.run_id }}",
+              "buildUrl": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
+              "reportUrl": "../${{ github.run_number }}/",
+          }
+          with open("allure-results/executor.json", "w") as file:
+              json.dump(DATA, file)
+      - name: Generate Allure report
+        run: allure generate
+      - name: Create index.html
+        shell: python
+        run: |
+          DATA = f"""<!DOCTYPE html>
+          <meta charset="utf-8">
+          <meta http-equiv="cache-control" content="no-cache">
+          <meta http-equiv="refresh" content="0; url=${{ github.run_number }}">
+          """
+          with open("repo/index.html", "w") as file:
+              file.write(DATA)
+      - name: Update GitHub pages branch
+        working-directory: repo/
+        # TODO future improvement: commit message
+        run: |
+          mkdir '${{ github.run_number }}'
+          rm -f _latest
+          ln -s '${{ github.run_number }}' _latest
+          cp -r ../allure-report/. _latest/
+          git add .
+          git config user.name "GitHub Actions"
+          git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
+          git commit -m "Allure report ${{ github.run_number }}"
+          # Uses token set in checkout step
+          git push origin gh-pages-beta
diff --git a/.github/workflows/promote.yaml b/.github/workflows/promote.yaml
new file mode 100644
index 000000000..b71206f14
--- /dev/null
+++ b/.github/workflows/promote.yaml
@@ -0,0 +1,36 @@
+# Copyright 2025 Canonical Ltd.
+# See LICENSE file for licensing details.
+name: Promote charm
+
+on:
+  workflow_dispatch:
+    inputs:
+      from-risk:
+        description: Promote from this Charmhub risk
+        required: true
+        type: choice
+        options:
+          - edge
+          - beta
+          - candidate
+      to-risk:
+        description: Promote to this Charmhub risk
+        required: true
+        type: choice
+        options:
+          - beta
+          - candidate
+          - stable
+
+jobs:
+  promote:
+    name: Promote charm
+    uses: canonical/data-platform-workflows/.github/workflows/_promote_charm.yaml@v30.1.3
+    with:
+      track: '8.0'
+      from-risk: ${{ inputs.from-risk }}
+      to-risk: ${{ inputs.to-risk }}
+    secrets:
+      charmhub-token: ${{ secrets.CHARMHUB_TOKEN }}
+    permissions:
+      contents: write  # Needed to edit GitHub releases
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index 658086613..0a4230ced 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -21,7 +21,7 @@ jobs:
           # Workaround for https://github.com/canonical/charmcraft/issues/1389#issuecomment-1880921728
           touch requirements.txt
       - name: Check libs
-        uses: canonical/charming-actions/check-libraries@2.6.2
+        uses: canonical/charming-actions/check-libraries@2.6.3
         with:
           credentials: ${{ secrets.CHARMHUB_TOKEN }} # FIXME: current token will expire in 2023-07-04
           github-token: ${{ secrets.GITHUB_TOKEN }}
@@ -32,23 +32,18 @@ jobs:
     uses: ./.github/workflows/ci.yaml
     secrets: inherit
     permissions:
-      contents: write  # Needed for Allure Report beta
-
-  build:
-    name: Build charm
-    uses: canonical/data-platform-workflows/.github/workflows/build_charm.yaml@v21.0.1
+      contents: write  # Needed for Allure Report
 
   release:
     name: Release charm
     needs:
       - lib-check
       - ci-tests
-      - build
-    uses: canonical/data-platform-workflows/.github/workflows/release_charm.yaml@v21.0.1
+    uses: canonical/data-platform-workflows/.github/workflows/release_charm.yaml@v30.1.3
     with:
       channel: 8.0/edge
-      artifact-prefix: ${{ needs.build.outputs.artifact-prefix }}
+      artifact-prefix: ${{ needs.ci-tests.outputs.artifact-prefix }}
     secrets:
       charmhub-token: ${{ secrets.CHARMHUB_TOKEN }}
     permissions:
-      contents: write  # Needed to create GitHub release
+      contents: write  # Needed to create git tags
diff --git a/.github/workflows/sync_docs.yaml b/.github/workflows/sync_docs.yaml
index a75fbee87..116f79a56 100644
--- a/.github/workflows/sync_docs.yaml
+++ b/.github/workflows/sync_docs.yaml
@@ -10,7 +10,7 @@ on:
 jobs:
   sync-docs:
     name: Sync docs from Discourse
-    uses: canonical/data-platform-workflows/.github/workflows/sync_docs.yaml@v21.0.1
+    uses: canonical/data-platform-workflows/.github/workflows/sync_docs.yaml@v30.1.3
     with:
       reviewers: a-velasco
     permissions:
diff --git a/.github/workflows/tiobe_scan.yaml b/.github/workflows/tiobe_scan.yaml
new file mode 100644
index 000000000..ac0a81aea
--- /dev/null
+++ b/.github/workflows/tiobe_scan.yaml
@@ -0,0 +1,53 @@
+# Copyright 2025 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+# Usage documentation: static-code-analysis.md
+
+name: TICS run self-hosted test (github-action)
+
+on:
+  schedule:
+    - cron: "0 2 * * 6" # Every Saturday 2:00 AM UTC
+  workflow_dispatch:
+
+
+jobs:
+  tiobe-scan:
+    name: Tiobe scan
+    runs-on: [self-hosted, tiobe]
+    timeout-minutes: 60
+    steps:
+      - name: Checkout the project
+        uses: actions/checkout@v4
+
+      - name: Install system dependencies
+        run: sudo apt-get update && sudo apt-get install -y python3-venv
+
+      - name: Install pipx
+        run: python3 -m pip install --user pipx && python3 -m pipx ensurepath
+
+      - name: Add pipx to PATH
+        run: echo "${HOME}/.local/bin" >> "${GITHUB_PATH}"
+
+      - name: Install tox and poetry using pipx
+        run: |
+          pipx install tox
+          pipx install poetry
+
+      - name: Run tox tests to create coverage.xml
+        run: tox run -e unit
+
+      - name: Move results to necessary folder for TICS
+        run: |
+          mkdir -p .cover
+          mv coverage.xml .cover/cobertura.xml
+
+      - name: Run TICS analysis with github-action
+        uses: tiobe/tics-github-action@v3
+        with:
+          mode: qserver
+          project: mysql-k8s-operator
+          branchdir: .
+          viewerUrl: https://canonical.tiobe.com/tiobeweb/TICS/api/cfg?name=default
+          ticsAuthToken: ${{ secrets.TICSAUTHTOKEN }}
+          installTics: true
diff --git a/.gitignore b/.gitignore
index 74babdd13..051caa62f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -144,3 +144,5 @@ Makefile
 
 # local pyright settings
 pyrightconfig.json
+
+wt*/
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index f53177389..d626f88ab 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -41,7 +41,7 @@ poetry install
 tox run -e format        # update your code according to linting rules
 tox run -e lint          # code style
 tox run -e unit          # unit tests
-tox run -e integration   # integration tests
+charmcraft test lxd-vm:  # integration tests
 tox                      # runs 'lint' and 'unit' environments
 ```
 
@@ -50,7 +50,7 @@ tox                      # runs 'lint' and 'unit' environments
 Build the charm in this git repository using:
 
 ```shell
-tox run -e build-dev
+charmcraftcache pack
 ```
 
 ### Deploy
diff --git a/charmcraft.yaml b/charmcraft.yaml
index ee6ac68fa..26158cf2c 100644
--- a/charmcraft.yaml
+++ b/charmcraft.yaml
@@ -1,32 +1,89 @@
-# Copyright 2021 Canonical Ltd.
+# Copyright 2022 Canonical Ltd.
 # See LICENSE file for licensing details.
 
 type: charm
-bases:
-  - name: ubuntu
-    channel: "22.04"
-    architectures: [amd64]
-  - name: ubuntu
-    channel: "22.04"
-    architectures: [arm64]
+platforms:
+  ubuntu@22.04:amd64:
+  ubuntu@22.04:arm64:
+# Files implicitly created by charmcraft without a part:
+# - dispatch (https://github.com/canonical/charmcraft/pull/1898)
+# - manifest.yaml
+#   (https://github.com/canonical/charmcraft/blob/9ff19c328e23b50cc06f04e8a5ad4835740badf4/charmcraft/services/package.py#L259)
+# Files implicitly copied/"staged" by charmcraft without a part:
+# - actions.yaml, config.yaml, metadata.yaml
+#   (https://github.com/canonical/charmcraft/blob/9ff19c328e23b50cc06f04e8a5ad4835740badf4/charmcraft/services/package.py#L290-L293
+#   https://github.com/canonical/charmcraft/blob/9ff19c328e23b50cc06f04e8a5ad4835740badf4/charmcraft/services/package.py#L156-L157)
 parts:
+  # "poetry-deps" part name is a magic constant
+  # https://github.com/canonical/craft-parts/pull/901
+  poetry-deps:
+    plugin: nil
+    build-packages:
+      - curl
+    override-build: |
+      # Use environment variable instead of `--break-system-packages` to avoid failing on older
+      # versions of pip that do not recognize `--break-system-packages`
+      # `--user` needed (in addition to `--break-system-packages`) for Ubuntu >=24.04
+      PIP_BREAK_SYSTEM_PACKAGES=true python3 -m pip install --user --upgrade pip==24.3.1  # renovate: charmcraft-pip-latest
+
+      # Use uv to install poetry so that a newer version of Python can be installed if needed by poetry
+      curl --proto '=https' --tlsv1.2 -LsSf https://github.com/astral-sh/uv/releases/download/0.5.15/uv-installer.sh | sh  # renovate: charmcraft-uv-latest
+      # poetry 2.0.0 requires Python >=3.9
+      if ! "$HOME/.local/bin/uv" python find '>=3.9'
+      then
+        # Use first Python version that is >=3.9 and available in an Ubuntu LTS
+        # (to reduce the number of Python versions we use)
+        "$HOME/.local/bin/uv" python install 3.10.12  # renovate: charmcraft-python-ubuntu-22.04
+      fi
+      "$HOME/.local/bin/uv" tool install --no-python-downloads --python '>=3.9' poetry==2.0.0 --with poetry-plugin-export==1.8.0  # renovate: charmcraft-poetry-latest
+
+      ln -sf "$HOME/.local/bin/poetry" /usr/local/bin/poetry
+  # "charm-poetry" part name is arbitrary; use for consistency
+  # Avoid using "charm" part name since that has special meaning to charmcraft
+  charm-poetry:
+    # By default, the `poetry` plugin creates/stages these directories:
+    # - lib, src
+    #   (https://github.com/canonical/charmcraft/blob/9ff19c328e23b50cc06f04e8a5ad4835740badf4/charmcraft/parts/plugins/_poetry.py#L76-L78)
+    # - venv
+    #   (https://github.com/canonical/charmcraft/blob/9ff19c328e23b50cc06f04e8a5ad4835740badf4/charmcraft/parts/plugins/_poetry.py#L95
+    #   https://github.com/canonical/craft-parts/blob/afb0d652eb330b6aaad4f40fbd6e5357d358de47/craft_parts/plugins/base.py#L270)
+    plugin: poetry
+    source: .
+    after:
+      - poetry-deps
+    poetry-export-extra-args: ['--only', 'main,charm-libs']
+    build-packages:
+      - libffi-dev  # Needed to build Python dependencies with Rust from source
+      - libssl-dev  # Needed to build Python dependencies with Rust from source
+      - pkg-config  # Needed to build Python dependencies with Rust from source
+    override-build: |
+      # Workaround for https://github.com/canonical/charmcraft/issues/2068
+      # rustup used to install rustc and cargo, which are needed to build Python dependencies with Rust from source
+      if [[ "$CRAFT_PLATFORM" == ubuntu@20.04:* || "$CRAFT_PLATFORM" == ubuntu@22.04:* ]]
+      then
+        snap install rustup --classic
+      else
+        apt-get install rustup -y
+      fi
+
+      # If Ubuntu version < 24.04, rustup was installed from snap instead of from the Ubuntu
+      # archive—which means the rustup version could be updated at any time. Print rustup version
+      # to build log to make changes to the snap's rustup version easier to track
+      rustup --version
+
+      # rpds-py (Python package) >=0.19.0 requires rustc >=1.76, which is not available in the
+      # Ubuntu 22.04 archive. Install rustc and cargo using rustup instead of the Ubuntu archive
+      rustup set profile minimal
+      rustup default 1.83.0  # renovate: charmcraft-rust-latest
+
+      craftctl default
+      # Include requirements.txt in *.charm artifact for easier debugging
+      cp requirements.txt "$CRAFT_PART_INSTALL/requirements.txt"
+  # "files" part name is arbitrary; use for consistency
   files:
     plugin: dump
     source: .
-    prime:
+    stage:
+      - LICENSE
       - scripts
-  charm:
-    override-pull: |
-      craftctl default
-      if [[ ! -f requirements.txt ]]
-      then
-          echo 'ERROR: Use "tox run -e build-dev" instead of calling "charmcraft pack" directly' >&2
-          exit 1
-      fi
-    charm-strict-dependencies: true
-    build-packages:
-      - libffi-dev
-      - libssl-dev
-      - pkg-config
-      - rustc
-      - cargo
+      - templates
diff --git a/concierge.yaml b/concierge.yaml
new file mode 100644
index 000000000..935ff237a
--- /dev/null
+++ b/concierge.yaml
@@ -0,0 +1,16 @@
+juju:
+  model-defaults:
+    logging-config: <root>=INFO; unit=DEBUG
+providers:
+  microk8s:
+    enable: true
+    bootstrap: true
+    addons:
+      - dns
+      - hostpath-storage
+host:
+  snaps:
+    jhack:
+      channel: latest/edge
+      connections:
+        - jhack:dot-local-share-juju snapd
diff --git a/config.yaml b/config.yaml
index 63cb1dc67..26f5beabb 100644
--- a/config.yaml
+++ b/config.yaml
@@ -12,8 +12,8 @@ options:
     type: string
   profile:
     description: |
-      profile representing the scope of deployment, and used to be able to enable high-level
-      high-level customisation of sysconfigs, resource checks/allocation, warning levels, etc.
+      Profile representing the scope of deployment, and used to be able to enable high-level
+      customisation of sysconfigs, resource checks/allocation, warning levels, etc.
       Allowed values are: “production” and “testing”.
     type: string
     default: production
@@ -37,7 +37,8 @@ options:
     description: The database name for the legacy 'mysql' interface (root level access)
     type: string
   plugin-audit-enabled:
-    description: Enable the audit plugin
+    description: |
+      Audit log plugin state. When the plugin is enabled (default, audit logs will be enabled).
     type: boolean
     default: true
   plugin-audit-strategy:
@@ -50,6 +51,19 @@ options:
     description: Number of days for binary logs retention
     type: int
     default: 7
+  logs_audit_policy:
+    description: |
+      Audit log policy. Allowed values are: "all", "logins" (default), "queries".
+      Ref. at https://docs.percona.com/percona-server/8.0/audit-log-plugin.html#audit_log_policy
+    type: string
+    default: logins
+  logs_retention_period:
+    description: |
+      Specifies the retention period for rotated logs, in days. Accepts an integer value of 3 or
+      greater, or the special value "auto". When set to "auto" (default), the retention period is
+      3 days, except when COS-related, where it is 1 day
+    type: string
+    default: auto
   # Experimental features
   experimental-max-connections:
     type: int
diff --git a/docs/explanation/e-audit-logs.md b/docs/explanation/e-audit-logs.md
index 5342beccc..b883431f4 100644
--- a/docs/explanation/e-audit-logs.md
+++ b/docs/explanation/e-audit-logs.md
@@ -25,6 +25,13 @@ It's recommended to integrate the charm with [COS](/t/9900), from where the logs
     ```
     Valid value are `false` and `true`. By setting it to false, existing logs are still kept in the `archive_audit` directory.
 
+1. `logs_audit_policy` - Audit log policy:
+
+    ```bash
+    juju config mysql-k8s logs_audit_policy=queries
+    ```
+    Valid values are: "all", "logins" (default), "queries"
+
 1. `plugin-audit-strategy` - By default the audit plugin writes logs in asynchronous mode for better performance.
     To ensure logs are written to disk on more timely fashion, this configuration can be set to semi-synchronous mode:
 
diff --git a/docs/explanation/e-logs.md b/docs/explanation/e-logs.md
index 8a1ed768e..fba87ee0a 100644
--- a/docs/explanation/e-logs.md
+++ b/docs/explanation/e-logs.md
@@ -2,7 +2,7 @@
 
 This explanation goes over the types of logging in MySQL and the configuration parameters for log rotation.
 
-The charm currently has audit, error and general logs enabled by default, while slow query logs are disabled by default. All of these files are rotated if present into a separate dedicated archive folder under the logs directory.
+The charm currently has audit and error logs enabled by default. All of these files are rotated if present into a separate dedicated archive folder under the logs directory. We do not yet support the rotation of binary logs (binlog, relay log, undo log, redo log, etc).
 
 We do not yet support the rotation of binary logs (binlog, relay log, undo log, redo log, etc).
 
@@ -10,8 +10,6 @@ We do not yet support the rotation of binary logs (binlog, relay log, undo log,
 * [Log types](#log-types)
   * [Audit logs](#audit-logs)
   * [Error logs](#error-logs)
-  * [General logs](#general-logs)
-  * [Slowquery logs](#slowquery-logs)
 * [Log rotation configuration](#log-rotation-configuration)
 * [High Level Design](#high-level-design)
 
@@ -19,33 +17,24 @@ We do not yet support the rotation of binary logs (binlog, relay log, undo log,
 
 ## Log types
 
-The charm stores its logs in `/var/snap/charmed-mysql/common/var/log/mysql`. 
+The charm stores its logs in `/var/log/mysql`. 
 
 ```shell
-$ ls -lahR /var/snap/charmed-mysql/common/var/log/mysql
+$ ls -lahR /var/log/mysql
 
 /var/log/mysql:
 drwxrwx--- 2 mysql mysql 4.0K Oct 23 20:46 archive_audit
 drwxrwx--- 2 mysql mysql 4.0K Oct 23 20:46 archive_error
-drwxrwx--- 2 mysql mysql 4.0K Oct 23 20:46 archive_general
-drwxrwx--- 2 mysql mysql 4.0K Oct 23 20:45 archive_slowquery
 -rw-r----- 1 mysql mysql 1.1K Oct 23 20:46 audit.log
 -rw-r----- 1 mysql mysql 1.1K Oct 23 20:46 error.log
--rw-r----- 1 mysql mysql 1.7K Oct 23 20:46 general.log
 
-/var/snap/charmed-mysql/common/var/log/mysql/archive_audit:
--rw-r----- 1 snap_daemon root         43K Sep  3 01:24 audit.log-20240903_0124
--rw-r----- 1 snap_daemon root        109K Sep  3 01:25 audit.log-20240903_0125
+/var/log/mysql/archive_audit:
+-rw-r----- 1 snap_daemon root         43K Sep  3 01:24 audit.log-20240903_0124.gz
+-rw-r----- 1 snap_daemon root        109K Sep  3 01:25 audit.log-20240903_0125.gz
 
-/var/snap/charmed-mysql/common/var/log/mysql/archive_error:
--rw-r----- 1 mysql mysql 8.7K Oct 23 20:44 error.log-43_2045
--rw-r----- 1 mysql mysql 2.3K Oct 23 20:45 error.log-43_2046
-
-/var/snap/charmed-mysql/common/var/log/mysql/archive_general:
--rw-r----- 1 mysql mysql 8.0M Oct 23 20:45 general.log-43_2045
--rw-r----- 1 mysql mysql 4.6K Oct 23 20:46 general.log-43_2046
-
-/var/snap/charmed-mysql/common/var/log/mysql/archive_slowquery:
+/var/log/mysql/archive_error:
+-rw-r----- 1 mysql mysql 8.7K Oct 23 20:44 error.log-43_2045.gz
+-rw-r----- 1 mysql mysql 2.3K Oct 23 20:45 error.log-43_2046.gz
 ```
 
 It is recommended to set up a [COS integration] so that these log files can be streamed to Loki. This leads to better persistence and security of the logs.
@@ -112,51 +101,27 @@ For more details, see the [Audit Logs explanation].
 ```
 </details>
 
-### General logs
-
-<details>
-<summary>Example of general logs, with format <code>time thread_id command_type query_body</code></summary>
-```shell
-Time                 Id Command    Argument                                                          
-2023-10-23T20:50:02.023329Z        94 Quit                                                        
-2023-10-23T20:50:02.667063Z        95 Connect                                                       
-2023-10-23T20:50:02.667436Z        95 Query     /* xplugin authentication */ SELECT /*+ SET_VAR(SQL_MODE = 'TRADITIONAL') */ @@require_secure_transport, `authentication_string`, `plugin`, (`account_locked
-`='Y') as is_account_locked, (`password_expired`!='N') as `is_password_expired`, @@disconnect_on_expired_password as `disconnect_on_expired_password`, @@offline_mode and (`Super_priv`='N') as `is_offline_
-mode_and_not_super_user`, `ssl_type`, `ssl_cipher`, `x509_issuer`, `x509_subject` FROM mysql.user WHERE 'serverconfig' = `user` AND '%' = `host`                                                            
-2023-10-23T20:50:02.668277Z        95 Query     /* xplugin authentication */ SELECT /*+ SET_VAR(SQL_MODE = 'TRADITIONAL') */ @@require_secure_transport, `authentication_string`, `plugin`, (`account_locked
-`='Y') as is_account_locked, (`password_expired`!='N') as `is_password_expired`, @@disconnect_on_expired_password as `disconnect_on_expired_password`, @@offline_mode and (`Super_priv`='N') as `is_offline_
-mode_and_not_super_user`, `ssl_type`, `ssl_cipher`, `x509_issuer`, `x509_subject` FROM mysql.user WHERE 'serverconfig' = `user` AND '%' = `host`                                                            
-2023-10-23T20:50:02.668778Z        95 Query     select @@lower_case_table_names, @@version, connection_id(), variable_value from performance_schema.session_status where variable_name = 'mysqlx_ssl_cipher'
-2023-10-23T20:50:02.669991Z        95 Query     SET sql_log_bin = 0                       
-2023-10-23T20:50:02.670389Z        95 Query     FLUSH SLOW LOGS                              
-2023-10-23T20:50:02.670924Z        95 Quit  
-```
-</details>
+## Log rotation configuration
 
-### Slowquery logs
+Following the configuration options exposed by the charm:
 
-<details>
-<summary>Example of a slowquery log</summary>
-```shell
-Time                 Id Command    Argument
-# Time: 2023-10-23T22:22:47.564327Z
-# User@Host: serverconfig[serverconfig] @ localhost [127.0.0.1]  Id:    21
-# Query_time: 15.000332  Lock_time: 0.000000 Rows_sent: 0  Rows_examined: 1
-SET timestamp=1698099752;
-do sleep(15);
-```
-</details>
+| Configuration option | Description | Default value |
+| --- | --- | --- |
+| `plugin-audit-enabled` | Enable or disable the audit log | `true` |
+| `logs_audit_policy` | The audit log policy ("all", "logins", "queries") | `logins` |
+| `logs_retention_period` | The number of days to keep the rotated logs | `auto` |
 
-## Log rotation configuration
+The `logs_retention_period` option accepts an integer value of 3 or greater, or the special value
+`auto`. When set to "auto" (default), the retention period is 3 days, except when COS-related,
+where it is 1 day
 
-For each log (audit, error, general and slow query):
+For each log (audit, error):
 
 - The log file is rotated every minute (even if the log files are empty)
 - The rotated log file is formatted with a date suffix of `-%V-%H%M` (-weeknumber-hourminute)
-- The rotated log files are not compressed or mailed
+- The rotated log files are compressed but not mailed
 - The rotated log files are owned by the `snap_daemon` user and group
-- The rotated log files are retained for a maximum of 7 days before being deleted
-- The most recent 10080 rotated log files are retained before older rotated log files are deleted
+- By default the rotated log files are retained for 3 days before being deleted, but this can be configured.
 
 The following are logrotate config values used for log rotation:
 
@@ -171,15 +136,15 @@ The following are logrotate config values used for log rotation:
 | `dateformat` | -%V-%H%M |
 | `ifempty` | true |
 | `missingok` | true |
-| `nocompress` | true |
+| `compress` | true |
 | `nomail` | true |
 | `nosharedscripts` | true |
 | `nocopytruncate` | true |
-| `olddir` | archive_error / archive_general / archive_slowquery |
+| `olddir` | archive_<log_name> |
 
 ## High Level Design
 
-There is a cron job on the machine where the charm exists that is triggered every minute and runs `logrotate`. The logrotate utility does *not* use `copytruncate`. Instead, the existing log file is moved into the archive directory by logrotate, and then the logrotate's postrotate script invokes `juju-run` (or `juju-exec` depending on the juju version) to dispatch a custom event. This custom event's handler flushes the MySQL log with the [FLUSH](https://dev.mysql.com/doc/refman/8.0/en/flush.html) statement that will result in a new and empty log file being created under `/var/snap/charmed-mysql/common/var/log/mysql` and the rotated file's descriptor being closed.
+There is a cron job on the machine where the charm exists that is triggered every minute and runs `logrotate`. The logrotate utility does *not* use `copytruncate`. Instead, the existing log file is moved into the archive directory by logrotate, and then the logrotate's postrotate script invokes `juju-run` (or `juju-exec` depending on the juju version) to dispatch a custom event. This custom event's handler flushes the MySQL log with the [FLUSH](https://dev.mysql.com/doc/refman/8.0/en/flush.html) statement that will result in a new and empty log file being created under `/var/log/mysql` and the rotated file's descriptor being closed.
 
 We use a custom event in juju to execute the FLUSH statement in order to avoid storing any credentials on the disk. The charm code has a mechanism that will retrieve credentials from the peer relation databag or juju secrets backend, if available, and keep these credentials in memory for the duration of the event handler.
 
diff --git a/docs/explanation/e-users.md b/docs/explanation/e-users.md
index 3b78224a9..5d9bde476 100644
--- a/docs/explanation/e-users.md
+++ b/docs/explanation/e-users.md
@@ -11,8 +11,8 @@ There are two types of users in MySQL:
 The operator uses the following internal DB users:
 
 * `root` - the [initial/default](https://charmhub.io/mysql/docs/t-manage-passwords) MySQL user. Used for very initial bootstrap only.
-* `clusteradmin` - the user to manage entire MySQL InnoDB ClusterSet.
-* `serverconfig` - the user to manage local MySQL instance.
+* `clusteradmin` - the user to manage replication in the MySQL InnoDB ClusterSet.
+* `serverconfig` - the user that operates MySQL instances.
 * `monitoring` - the user for [COS integration](https://charmhub.io/mysql/docs/h-enable-monitoring).
 * `backups` - the user to [perform/list/restore backups](https://charmhub.io/mysql/docs/h-create-and-list-backups).
 * `mysql_innodb_cluster_#######` - the [internal recovery users](https://dev.mysql.com/doc/mysql-shell/8.0/en/innodb-cluster-user-accounts.html#mysql-innodb-cluster-users-created) which enable connections between the servers in the cluster. Dedicated user created for each Juju unit/InnoDB Cluster member.
@@ -87,16 +87,20 @@ To set a predefined password for the specific user, run:
 <a name="relation-users"></a>
 ## Relation/integration users explanations:
 
-The operator created a dedicated user for every application related/integrated with database. Those users are removed on the juju relation/integration removal request. However, DB data stays in place and can be reused on re-created relations (using new user credentials):
+The operator created a dedicated user for every application related/integrated with database.
+The username is composed by the relation ID and truncated uuid for the model, to ensure there is no
+username clash in cross model relations. Usernames are limited to 32 chars as per [MySQL limit](https://dev.mysql.com/doc/refman/8.0/en/user-names.html).
+Those users are removed on the juju relation/integration removal request. 
+However, DB data stays in place and can be reused on re-created relations (using new user credentials):
 
 ```shell
 mysql> select Host,User,account_locked from mysql.user where User like 'relation%';
-+------+------------+----------------+
-| Host | User       | account_locked |
-+------+------------+----------------+
-| %    | relation-8 | N              |
-| %    | relation-9 | N              |
-+------+------------+----------------+
++------+----------------------------+----------------+
+| Host | User                       | account_locked |
++------+----------------------------+----------------+
+| %    | relation-8_99200344b67b4e9 | N              |
+| %    | relation-9_99200344b67b4e9 | N              |
++------+----------------------------+----------------+
 2 row in set (0.00 sec)
 ```
 
@@ -123,4 +127,11 @@ mysql> select Host,User,account_locked from mysql.user where User like 'mysql_ro
 
 When an application charm requests a new user through the relation/integration it can specify that the user should have the `admin` role in the `extra-user-roles` field. The `admin` role enables the new user to read and write to all databases (for the `mysql` system database it can only read data) and also to create and delete non-system databases.
 
-**Note**: `extra-user-roles` is supported by modern interface `mysql_client` only and missing for legacy `mysql` interface. Read more about the supported charm interfaces [here](/t/10249).
\ No newline at end of file
+**Note**: `extra-user-roles` is supported by modern interface `mysql_client` only and missing for legacy `mysql` interface. Read more about the supported charm interfaces [here](/t/10249).
+
+
+<a name="admin-port"></a>
+### Admin Port User Access
+
+The charm mainly uses the `serverconfig` user for internal operations. For connections with this user, a special admin port is used (port `33062`), which enables the charm to operate MySQL even when users connections are saturated.
+For further information on the administrative connection, refer to [MySQL docs](https://dev.mysql.com/doc/refman/8.0/en/administrative-connection-interface.html) on the topic.
\ No newline at end of file
diff --git a/docs/how-to/h-async-deployment.md b/docs/how-to/h-async-deployment.md
index 8dd226079..f05d8fc8c 100644
--- a/docs/how-to/h-async-deployment.md
+++ b/docs/how-to/h-async-deployment.md
@@ -1,5 +1,13 @@
 # Deploy Async replication
 
+The following table shows the source and target controller/model combinations that are currently supported:
+
+|  | AWS | GCP | Azure |
+|---|---|:---:|:---:|
+| AWS | ![ check ] |  |  |
+| GCP |  | ![ check ] |  |
+| Azure |  | | ![ check ] |
+
 ## Deploy
 
 Deploy two MySQL Clusters, named `Rome` and `Lisbon`:
@@ -92,4 +100,6 @@ The two clusters works independently, this means that it's possible to independe
 juju scale-application db1 3 -m rome
 
 juju scale-application db2 3 -m lisbon
-```
\ No newline at end of file
+```
+
+[check]: https://img.shields.io/badge/%E2%9C%93-brightgreen
\ No newline at end of file
diff --git a/docs/how-to/h-configure-s3-aws.md b/docs/how-to/h-configure-s3-aws.md
index 3a325c3c8..b88a72251 100644
--- a/docs/how-to/h-configure-s3-aws.md
+++ b/docs/how-to/h-configure-s3-aws.md
@@ -1,4 +1,10 @@
-> **:information_source: Hint**: Use [Juju 3](/t/5064). Otherwise replace `juju run ...` with `juju run-action --wait ...` and `juju integrate` with `juju relate` for Juju 2.9.
+[note]
+**Note**: All commands are written for `juju >= v.3.0`
+
+If you are using an earlier version, check the [Juju 3.0 Release Notes](https://juju.is/docs/juju/roadmap#heading--juju-3-0-0---22-oct-2022).
+[/note]
+
+# Configure S3 for AWS
 
 Charmed MySQL K8s backup can be stored on any S3 compatible storage. The S3 access and configurations are managed with the [s3-integrator charm](https://charmhub.io/s3-integrator). Deploy and configure the s3-integrator charm for **[AWS S3](https://aws.amazon.com/s3/)** (click [here](/t/charmed-mysql-how-to-configure-s3-for-radosgw/10319) to backup on Ceph via RadosGW):
 ```shell
diff --git a/docs/how-to/h-configure-s3-radosgw.md b/docs/how-to/h-configure-s3-radosgw.md
index 37170e2ba..b5d4a4e39 100644
--- a/docs/how-to/h-configure-s3-radosgw.md
+++ b/docs/how-to/h-configure-s3-radosgw.md
@@ -1,11 +1,29 @@
-Charmed MySQL K8s backup can be stored on any S3 compatible storage, e.g. on [Ceph](https://ceph.com/en/) via [RadosGW](https://docs.ceph.com/en/latest/man/8/radosgw/). The S3 access and configurations are managed with the [s3-integrator charm](https://charmhub.io/s3-integrator). Deploy and configure the s3-integrator charm for **RadosGW** (click [here](/t/charmed-mysql-k8s-how-to-configure-s3-for-aws/9651) to backup on AWS S3):
+[note]
+**Note**: All commands are written for `juju >= v.3.0`
+
+If you are using an earlier version, check the [Juju 3.0 Release Notes](https://juju.is/docs/juju/roadmap#heading--juju-3-0-0---22-oct-2022).
+[/note]
+
+# Configure S3 for RadosGW
+
+A MySQL K8s backup can be stored on any S3-compatible storage. S3 access and configurations are managed with the [s3-integrator charm](https://charmhub.io/s3-integrator).
+
+This guide will teach you how to deploy and configure the s3-integrator charm on Ceph via [RadosGW](https://docs.ceph.com/en/quincy/man/8/radosgw/), send the configuration to a Charmed MySQL K8s application, and update it. 
+> For AWS, see the guide [How to configure S3 for AWS](/t/9651)
+
+## Configure s3-integrator
+First, install the MinIO client and create a bucket:
 ```shell
-# Install MinIO client and create a bucket:
 mc config host add dest https://radosgw.mycompany.fqdn <access-key> <secret-key> --api S3v4 --lookup path
 mc mb dest/backups-bucket
-
+```
+Then, deploy and run the charm:
+```shell
 juju deploy s3-integrator
-juju run-action s3-integrator/leader sync-s3-credentials access-key=<access-key> secret-key=<secret-key> --wait
+juju run s3-integrator/leader sync-s3-credentials access-key=<access-key> secret-key=<secret-key>
+```
+Lastly, use `juju config` to add your configuration parameters. For example:
+```shell
 juju config s3-integrator \
     endpoint="https://radosgw.mycompany.fqdn" \
     bucket="backups-bucket" \
@@ -15,24 +33,27 @@ juju config s3-integrator \
     s3-uri-style="path"
 ```
 
-To pass these configurations to Charmed MySQL K8s, relate the two applications:
+## Integrate with Charmed MySQL K8s
+
+To pass these configurations to Charmed MySQL K8s, integrate the two applications:
 ```shell
-juju relate s3-integrator mysql-k8s
+juju integrate s3-integrator mysql-k8s
 ```
 
-You can create/list/restore backups now:
-
+You can create, list, and restore backups now:
 ```shell
-juju run-action mysql-k8s/leader list-backups --wait
-juju run-action mysql-k8s/leader create-backup --wait
-juju run-action mysql-k8s/leader list-backups --wait
-juju run-action mysql-k8s/leader restore backup-id=<backup-id-here> --wait
+juju run mysql-k8s/leader list-backups
+juju run mysql-k8s/leader create-backup
+juju run mysql-k8s/leader list-backups
+juju run mysql-k8s/leader restore backup-id=<backup-id-here>
 ```
 
-You can also update your S3 configuration options after relating, using:
+You can also update your S3 configuration options after integrating, using:
 ```shell
 juju config s3-integrator <option>=<value>
 ```
 The s3-integrator charm [accepts many configurations](https://charmhub.io/s3-integrator/configure) - enter whatever configurations are necessary for your S3 storage.
 
-> :tipping_hand_man: **[MicroCeph TIP](https://github.com/canonical/microceph)**: make sure the `region` for `s3-integrator` matches `"sudo microceph.radosgw-admin zonegroup list"` output (use `region="default"` by default).
\ No newline at end of file
+[note]
+[MicroCeph](https://github.com/canonical/microceph)** tip: make sure the `region` for `s3-integrator` matches the `"sudo microceph.radosgw-admin zonegroup list"` output (use `region="default"` by default).
+[/note]
\ No newline at end of file
diff --git a/docs/how-to/h-create-backup.md b/docs/how-to/h-create-backup.md
index fd0f88248..faf4fbf91 100644
--- a/docs/how-to/h-create-backup.md
+++ b/docs/how-to/h-create-backup.md
@@ -1,22 +1,34 @@
+[note]
+**Note**: All commands are written for `juju >= v.3.0`
+
+If you are using an earlier version, check the [Juju 3.0 Release Notes](https://juju.is/docs/juju/roadmap#heading--juju-3-0-0---22-oct-2022).
+[/note]
+
 # How to create and list backups
 
-Creating and listing backups requires that you:
-* [Have a Charmed MySQL K8s deployed](/t/charmed-mysql-k8s-how-to-manage-units/9659)
+This guide contains recommended steps and useful commands for creating and managing backups to ensure smooth restores.
+
+## Prerequisites
+* A [deployed](/t/9659) MySQL K8s cluster
 * Access to S3 storage
-* [Have configured settings for S3 storage](/t/charmed-mysql-k8s-how-to-configure-s3/9651)
+* [Configured settings for S3 storage](/t/9651)
 
-Once Charmed MySQL K8s is `active` and `idle` (check `juju status`), you can create your first backup with the `create-backup` command:
+---
+
+## Create a backup
+
+Once `juju status` shows Charmed MySQL K8s as `active` and `idle` you can create your first backup with the `create-backup` command:
 ```shell
-juju run-action mysql-k8s/leader create-backup --wait
+juju run mysql-k8s/leader create-backup
 ```
 
-[note]
 If you have a cluster of one unit, you can run the `create-backup` action on `mysql-k8s/leader` (which will also be the primary unit). 
 
 Otherwise, you must run the `create-backup` action on a non-primary unit (see `juju status` or run `juju run-action mysql-k8s/leader get-cluster-status` to find the primary unit).
-[/note]
+
+## List backups
 
 You can list your available, failed, and in progress backups by running the `list-backups` command:
 ```shell
-juju run-action mysql-k8s/leader list-backups --wait
+juju run mysql-k8s/leader list-backups
 ```
\ No newline at end of file
diff --git a/docs/how-to/h-deploy-airgapped.md b/docs/how-to/h-deploy-airgapped.md
new file mode 100644
index 000000000..30e497a3b
--- /dev/null
+++ b/docs/how-to/h-deploy-airgapped.md
@@ -0,0 +1,111 @@
+# Deploy in an offline or air-gapped environment
+
+An air-gapped environment refers to a system that does not have access to the public internet.
+This guide goes through the special configuration steps for installing Charmed PostgreSQL k8s in an air-gapped environment.
+
+## Requirements
+
+Canonical does not prescribe how you should set up your specific air-gapped environment. However, it is assumed that it meets the following conditions:
+
+* A K8s cluster is running.
+* DNS is configured to the local nameservers.
+* [Juju is configured](https://documentation.ubuntu.com/snap-store-proxy/en/airgap-charmhub/#configure-juju) to use local air-gapped services.
+* The [`store-admin`](https://snapcraft.io/store-admin) tool is installed and configured.
+* [Air-gapped CharmHub](https://documentation.ubuntu.com/snap-store-proxy/en/airgap-charmhub/) is installed and running.
+* Local APT and LXD Images caches are reachable.
+* An air-gapped container registry (such as [Artifactory](https://jfrog.com/artifactory/)) is reachable from the K8s cluster over HTTPS
+  *  **Note**: Secure (HTTPS) OCI access is important, otherwise Juju won’t work!
+
+## Air-gapped concept summary
+
+1. [Export](https://documentation.ubuntu.com/snap-store-proxy/en/airgap-charmhub/#export-packages)
+2. [Transfer](https://en.wikipedia.org/wiki/Air_gap_(networking))
+3. [Import](https://documentation.ubuntu.com/snap-store-proxy/en/airgap-charmhub/#import-packages)
+4. [Deploy](/t/9298) 
+
+## Air-gapped day-to-day example
+
+**1.** Exporting K8s Charms and OCI Resources are currently independent processes.
+> Sseveral improvements are planned:  [#1](https://warthogs.atlassian.net/browse/PF-5369), [#2](https://warthogs.atlassian.net/browse/PF-5185)
+
+**1.1.** Charm. The necessary charm(s) can be exported as bundle OR independently (charm-by-charm). The special store-admin tool is designed to simplify the process. At the moment exporting of Charms and OCI resources are separated, but in the future the `store-admin export` [could](https://documentation.ubuntu.com/snap-store-proxy/en/airgap-charmhub/#export-charms) export all necessary OCI resource(s)) from official CharmHub.
+
+At the moment, the store-admin exports (and includes into the blob) all the OCI resources metadata only:
+```shell
+store-admin export bundle mysql-k8s-bundle --channel=8.0/edge --series=jammy --arch=amd64
+```
+
+[details="Example output"]
+
+```shell
+> store-admin export bundle mysql-k8s-bundle --channel=8.0/edge --series=jammy --arch=amd64
+Downloading mysql-k8s-bundle revision 45 (8.0/edge)
+  [####################################]  100%
+Downloading data-integrator revision 71 (edge)
+  [####################################]  100%          
+Downloading grafana-agent-k8s revision 93 (edge)
+  [####################################]  100%          
+Downloading resources for grafana-agent-k8s
+Downloading oci-image resource agent-image revision 45
+  [####################################]  100%
+Falling back to OCI image subpath from online Charmhub for 'agent-image' in charm 'grafana-agent-k8s'.
+Downloading mysql-k8s revision 201 (8.0/edge)
+  [####################################]  100%          
+Downloading resources for mysql-k8s
+Downloading oci-image resource mysql-image revision 113
+  [####################################]  100%
+Falling back to OCI image subpath from online Charmhub for 'mysql-image' in charm 'mysql-k8s'.
+Downloading mysql-router-k8s revision 164 (8.0/edge)
+  [####################################]  100%          
+Downloading resources for mysql-router-k8s
+Downloading oci-image resource mysql-router-image revision 57
+  [####################################]  100%
+Falling back to OCI image subpath from online Charmhub for 'mysql-router-image' in charm 'mysql-router-k8s'.
+Downloading mysql-test-app revision 63 (edge)
+  [####################################]  100%          
+Downloading s3-integrator revision 59 (edge)
+  [####################################]  100%          
+Downloading self-signed-certificates revision 200 (edge)
+  [####################################]  100%          
+Downloading sysbench revision 78 (edge)
+  [####################################]  100%          
+Successfully exported charm bundle mysql-k8s-bundle: /home/ubuntu/snap/store-admin/common/export/mysql-k8s-bundle-20241006T231254.tar.gz
+
+```
+
+[/details]
+
+**1.2.** OCI: for the manual OCI export, please follow [the official CharmHub guide](https://documentation.ubuntu.com/snap-store-proxy/en/airgap-charmhub/#export-oci-images).
+
+**2.** Transfer the binary blobs using the way of your choice into Air-gapped environment.
+
+```shell
+cp /home/ubuntu/snap/store-admin/common/export/mysql-k8s-bundle-20241006T231254.tar.gz /media/usb/
+...
+cp /media/usb/mysql-k8s-bundle-20241006T231254.tar.gz /var/snap/snap-store-proxy/common/charms-to-push/
+```
+> **Note**: always check [checksum](https://en.wikipedia.org/wiki/Checksum) for the transferred blobs!
+
+**3.** Upload the charm blobs into local Air-gapped CharmHub:
+```shell
+sudo snap-store-proxy push-charm-bundle /var/snap/snap-store-proxy/common/charms-to-push/mysql-k8s-bundle-20241006T231254.tar.gz
+```
+> **Note**: when [re-importing](https://documentation.ubuntu.com/snap-store-proxy/en/airgap-charmhub/#import-packages) charms or importing other revisions, make sure to provide the `--push-channel-map`.
+
+**4.** Upload the charm OCI into local Air-gapped OCI registry.
+
+For the manual OCI import, please follow [the official CharmHub guide](https://documentation.ubuntu.com/snap-store-proxy/en/airgap-charmhub/#import-packages).
+
+**5.** [Deploy and enjoy Juju charms the usual way](/t/9298):
+```shell
+juju deploy mysql-k8s --trust
+```
+> **Note**: all the Air-gapp-deployed charms revisions and OCI resources tags/revisions must match the official CharmHub revisions/tags (users can rely in [the official release notes](/t/11878)).
+
+## Additional links:
+
+* https://docs.ubuntu.com/snap-store-proxy/en/airgap
+* https://documentation.ubuntu.com/snap-store-proxy/
+* https://documentation.ubuntu.com/snap-store-proxy/en/airgap-charmhub/
+* https://ubuntu.com/kubernetes/docs/install-offline
+* https://charmed-kubeflow.io/docs/install-in-airgapped-environment
\ No newline at end of file
diff --git a/docs/how-to/h-deploy-aks.md b/docs/how-to/h-deploy-aks.md
index 0e0884863..85bcad88c 100644
--- a/docs/how-to/h-deploy-aks.md
+++ b/docs/how-to/h-deploy-aks.md
@@ -3,20 +3,21 @@
 [Azure Kubernetes Service](https://learn.microsoft.com/en-us/azure/aks/) (AKS) allows you to quickly deploy a production ready Kubernetes cluster in Azure. To access the AKS Web interface, go to [https://portal.azure.com/](https://portal.azure.com/).
 
 ## Summary
-* [Install AKS and Juju tooling](#heading--install-aks-juju)
-* [Create a new AKS cluster](#heading--create-aks-cluster)
-* [Bootstrap Juju on AKS](#heading--boostrap-juju)
-* [Deploy charms](#heading--deploy-charms)
-* [Display deployment information](#heading--display-information)
-* [Clean up](#heading--clean-up)
+* [Install AKS and Juju tooling](#install-aks-and-juju-tooling)
+  * [Authenticate](#authenticate)
+* [Create a new AKS cluster](#create-a-new-aks-cluster)
+* [Bootstrap Juju on AKS](#bootstrap-juju-on-aks)
+* [Deploy charms](#deploy-charms)
+* [Display deployment information](#display-deployment-information)
+* [Clean up](#clean-up)
 
 ---
 
-<a href="#heading--install-aks-juju"><h2 id="heading--install-aks-juju"> Install AKS and Juju tooling</h2></a>
+## Install AKS and Juju tooling
 
 Install Juju and Azure CLI tool:
 ```shell
-sudo snap install juju --classic
+sudo snap install juju
 sudo apt install --yes azure-cli
 ```
 Follow the installation guides for:
@@ -46,7 +47,7 @@ Login to your Azure account:
 az login
 ```
 
-<a href="#heading--create-aks-cluster"><h2 id="heading--create-aks-cluster"> Create a new AKS cluster</h2></a>
+## Create a new AKS cluster
 
 Export the deployment name for further use:
 ```shell
@@ -95,7 +96,7 @@ Sample output:
 Merged "aks" as current context in ~/.kube/config
 ```
 
-<a href="#heading--boostrap-juju"><h2 id="heading--boostrap-juju"> Bootstrap Juju on AKS</h2></a>
+## Bootstrap Juju on AKS
 
 Bootstrap Juju controller:
 ```shell
@@ -127,7 +128,7 @@ juju add-model welcome aks
 juju model-config logging-config='<root>=INFO;unit=DEBUG'
 ```
 
-<a href="#heading--deploy-charms"><h2 id="heading--deploy-charms">Deploy charms</h2></a>
+## Deploy charms
 
 The following command deploys MySQL K8s:
 
@@ -157,7 +158,7 @@ mysql-k8s/1   active    idle   10.244.0.15
 mysql-k8s/2   active    idle   10.244.0.16 
 ```
 
-<a href="#heading--display-information"><h2 id="heading--display-information"> Display deployment information</h2></a>
+## Display deployment information
 
 Display information about the current deployments with the following commands:
 ```shell
@@ -178,7 +179,7 @@ NAME                                STATUS   ROLES   AGE   VERSION
 aks-nodepool1-55146003-vmss000000   Ready    agent   11m   v1.28.9
 ```
 
-<a href="#heading--clean-up"><h2 id="heading--clean-up"> Clean up</h2></a>
+## Clean up
 
 [note type="caution"]
 Always clean AKS resources that are no longer necessary -  they could be costly!
diff --git a/docs/how-to/h-deploy-canonical-k8s.md b/docs/how-to/h-deploy-canonical-k8s.md
new file mode 100644
index 000000000..5597cf2fa
--- /dev/null
+++ b/docs/how-to/h-deploy-canonical-k8s.md
@@ -0,0 +1,74 @@
+# How to deploy on Canonical Kubernetes
+
+[Canonical Kubernetes](https://ubuntu.com/kubernetes) is a Kubernetes service built on Ubuntu and optimized for most major public clouds. 
+
+This guide shows you how to deploy Charmed MySQL K8s to Canonical Kubernetes.
+
+## Summary
+This guide assumes you have a spare hardware/VMs running Ubuntu 22.04 LTS (or newer). 
+
+* [Install Canonical Kubernetes](#install-canonical-kubernetes)
+* [Install Juju](#install-juju)
+* [Deploy Charmed MySQL K8s](#deploy-charmed-mysql-k8s)
+
+---
+
+## Install Canonical Kubernetes
+
+>The following instructions are a complete but summarized version of the steps for installing Canonical K8s. For more thorough instructions and details, see the official Canonical Kubernetes documentation: [Install Canonical Kubernetes from a snap](https://documentation.ubuntu.com/canonical-kubernetes/latest/src/snap/howto/install/snap/).
+
+Install, bootstrap, and check the status of Canonical K8s with the following commands:
+```shell
+sudo snap install k8s --edge --classic
+sudo k8s bootstrap
+sudo k8s status --wait-ready
+```
+
+Once Canonical K8s is up and running, [enable the local storage](https://documentation.ubuntu.com/canonical-kubernetes/latest/snap/tutorial/getting-started/#enable-local-storage) (or any another persistent volumes provider, to be used by [Juju Storage](https://juju.is/docs/juju/storage) later):
+```shell
+sudo k8s enable local-storage
+sudo k8s status --wait-ready
+```
+
+(Optional) Install kubectl tool and dump the K8s config:
+```shell
+sudo snap install kubectl --classic
+mkdir ~/.kube
+sudo k8s config > ~/.kube/config
+kubectl get namespaces # to test the credentials
+```
+
+## Install Juju
+
+Install Juju and bootstrap the first Juju controller in K8s:
+```shell
+sudo snap install juju --channel 3.6/candidate
+juju add-k8s ck8s --client --context-name="k8s"
+juju bootstrap ck8s
+```
+
+## Deploy Charmed MySQL K8s
+
+```shell
+juju add-model mysql
+juju deploy mysql-k8s --trust
+```
+
+follow the deployment progress using:
+```shell
+juju status --watch 1s
+```
+
+Example output:
+```shell
+Model   Controller  Cloud/Region  Version  SLA          Timestamp
+mysql   ck8s        ck8s          3.6-rc1  unsupported  18:32:38+01:00
+
+App         Version                  Status  Scale  Charm           Channel     Rev  Address         Exposed  Message
+mysql-k8s   8.0.37-0ubuntu0.22.04.3  active      1  mysql-k8s       8.0/stable  180  10.152.183.146  no       
+
+Unit           Workload  Agent  Address    Ports  Message
+mysql-k8s/0*   active    idle   10.1.0.11         Primary
+```
+
+>**Next steps:** Learn [how to scale your application](/t/9675), [relate with other applications](/t/9671) and [more](/t/9677)!
\ No newline at end of file
diff --git a/docs/how-to/h-deploy-eks.md b/docs/how-to/h-deploy-eks.md
index b388734b2..bae228a8d 100644
--- a/docs/how-to/h-deploy-eks.md
+++ b/docs/how-to/h-deploy-eks.md
@@ -1,21 +1,39 @@
-# Deploy Charmed MySQL K8s on EKS
+[note]
+**Note**: All commands are written for `juju >= v.3.0`
 
-[Amazon Elastic Kubernetes Service](https://aws.amazon.com/eks/) (EKS) - one of the most popular and fully automated Kubernetes service from Amazon. To access EKS WEB interface, open the Console https://console.aws.amazon.com/eks/home
+If you are using an earlier version,  check the [Juju 3.0 Release Notes](https://juju.is/docs/juju/roadmap#heading--juju-3-0-0---22-oct-2022).
+[/note]
 
-# Install EKS and Juju tooling
+# How to deploy on EKS
+
+[Amazon Elastic Kubernetes Service](https://aws.amazon.com/eks/) (EKS) is a popular, fully automated Kubernetes service. To access the EKS Web interface, go to [console.aws.amazon.com/eks/home](https://console.aws.amazon.com/eks/home).
 
-Install:
+## Summary
+* [Install EKS and Juju tooling](#heading--install-eks-juju)
+* [Create a new EKS cluster](#heading--create-eks-cluster)
+* [Bootstrap Juju on EKS](#heading--boostrap-juju)
+* [Deploy charms](#heading--deploy-charms)
+* [Display deployment information](#heading--display-information)
+* [Clean up](#heading--clean-up)
 
-* [Juju](https://juju.is/docs/juju/install-juju) (an open source orchestration engine from Canonical)
-* [kubectl](https://kubernetes.io/docs/tasks/tools/) (Kubernetes command line tool)
-* [eksctl](https://eksctl.io/installation/) (the official CLI for Amazon EKS)
-* [AWC CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) (Amazon Web Services Command Line Interface)
+---
 
-Make sure all works:
+# Install EKS and Juju tooling
 
+Install [Juju](https://juju.is/docs/juju/install-juju) and the [`kubectl` CLI tools](https://kubernetes.io/docs/tasks/tools/) via snap:
 ```shell
+sudo snap install juju
+sudo snap install kubectl --classic
+```
+Follow the installation guides for:
+* [eksctl](https://eksctl.io/installation/) - the Amazon EKS CLI
+* [AWs CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) - the Amazon Web Services CLI
+
+To check they are all correctly installed, you can run the commands demonstrated below with sample outputs:
+
+```console
 > juju version
-2.9.45-ubuntu-amd64
+3.1.7-ubuntu-amd64
 
 > kubectl version --client
 Client Version: v1.28.2
@@ -29,7 +47,9 @@ kubectl version: v1.28.2
 aws-cli/2.13.25 Python/3.11.5 Linux/6.2.0-33-generic exe/x86_64.ubuntu.23 prompt/off
 ```
 
-Create IAM account (or legacy Access keys) and login to AWS:
+### Authenticate
+
+Create an IAM account (or use legacy access keys) and login to AWS:
 ```shell
 > aws configure
 AWS Access Key ID [None]: SECRET_ACCESS_KEY_ID
@@ -45,17 +65,19 @@ Default output format [None]:
 }
 ```
 
-# Bootstrap Kubernetes cluster (EKS)
+## Create a new EKS cluster
 
-Export the deployment name to be used further:
+Export the deployment name for further use:
 ```shell
 export JUJU_NAME=eks-$USER-$RANDOM
 ```
 
-Feel free to fine-tune the location (`eu-west-3`) and/or K8s version (`1.27`):
+This following examples in this guide will use the location `eu-west-3` and K8s `v.1.27` - feel free to change this for your own deployment.
+
+Sample `cluster.yaml`:
 
 ```shell
-cat <<-EOF > cluster.yaml
+> cat <<-EOF > cluster.yaml
 ---
 apiVersion: eksctl.io/v1alpha5
 kind: ClusterConfig
@@ -91,9 +113,12 @@ nodeGroups:
         spotInstancePools: 2
 EOF
 ```
-Bootstrap EKS cluster:
+Bootstrap EKS cluster with the following command:
+```shell
+eksctl create cluster -f cluster.yaml
+```
+Sample output:
 ```shell
-> eksctl create cluster -f cluster.yaml
 ...
 2023-10-12 11:13:58 [ℹ]  using region eu-west-3
 2023-10-12 11:13:59 [ℹ]  using Kubernetes version 1.27
@@ -101,34 +126,49 @@ Bootstrap EKS cluster:
 2023-10-12 11:40:00 [✔]  EKS cluster "eks-taurus-27506" in "eu-west-3" region is ready
 ```
 
-# Bootstrap Juju on EKS
-> **TIP**: Juju 3.x https://bugs.launchpad.net/juju/+bug/2007848
 
-```shell
-# Add Juju K8s Clous
-> juju add-k8s $JUJU_NAME
+## Bootstrap Juju on EKS
+[note type="caution"]
+There is a known bug for `juju v.3.1` users: 
+bugs.launchpad.net/juju/+bug/2007848
+[/note]
 
-# Bootstrap Juju Controller
-> juju bootstrap $JUJU_NAME
+Add Juju k8s clouds:
+```shell
+juju add-k8s $JUJU_NAME
+```
+Bootstrap Juju controller:
+```shell
+juju bootstrap $JUJU_NAME
+```
+Create a new Juju model (k8s namespace)
+```shell
+juju add-model welcome
+```
+[Optional] Increase DEBUG level if you are troubleshooting charms 
+```shell
+juju model-config logging-config='<root>=INFO;unit=DEBUG'
+```
 
-# Create a new Juju model (K8s namespace)
-> juju add-model welcome
+## Deploy charms
 
-# (optional) Increase DEBUG level if you are troubleshooting charms 
-> juju model-config logging-config='<root>=INFO;unit=DEBUG'
+The following commands deploy and integrate the [MySQL K8s Bundle](https://charmhub.io/mysql-k8s-bundle) and [MySQL Test App](https://charmhub.io/mysql-test-app):
+```shell
+juju deploy mysql-k8s-bundle --channel 8.0/edge --trust
+juju deploy mysql-test-app
+juju integrate mysql-test-app mysql-k8s:database
 ```
 
-# Deploy Charms
+To track the status of the deployment, run
 ```shell
-> juju deploy mysql-k8s-bundle --channel 8.0/edge --trust
-> juju deploy mysql-test-app
-> juju relate mysql-test-app mysql-k8s:database
-> juju status --watch 1s
+juju status --watch 1s
 ```
 
-# List
-```shell
+### Display deployment information
 
+Display information about the current deployments with the following commands:
+
+```shell
 > juju controllers
 Controller         Model    User   Access     Cloud/Region      Models  Nodes  HA  Version
 eks-taurus-27506*  welcome  admin  superuser  eks-taurus-27506       2      1   -  2.9.45  
@@ -148,21 +188,29 @@ ip-192-168-51-96.eu-west-3.compute.internal    Ready    <none>   19m   v1.27.5-e
 ip-192-168-78-167.eu-west-3.compute.internal   Ready    <none>   19m   v1.27.5-eks-43840fb
 ```
 
-# Cleanup
-**Note**: always clean no-longer necessary EKS resources as they all could be costly!!!
+## Clean up
+[note type="caution"]
+Always clean EKS resources that are no longer necessary -  they could be costly!
+[/note]
 
-To [clean](https://docs.aws.amazon.com/eks/latest/userguide/delete-cluster.html) EKS cluster, resources and juju cloud, use:
-```shell
-> juju destroy-controller $JUJU_NAME --yes --destroy-all-models --destroy-storage --force
-> juju remove-cloud $JUJU_NAME
+To clean the EKS cluster, resources and juju cloud, run the following commands:
 
-> kubectl get svc --all-namespaces
-> kubectl delete svc <service-name> # Delete any services that have an associated EXTERNAL-IP value (load balancers, ...)
+```shell
+juju destroy-controller $JUJU_NAME --yes --destroy-all-models --destroy-storage --force
+juju remove-cloud $JUJU_NAME
+```
 
-> eksctl get cluster -A
-> eksctl delete cluster <cluster_name> --region eu-west-3 --force --disable-nodegroup-eviction
+List all services and then delete those that have an associated EXTERNAL-IP value (e.g. load balancers):
+```shell
+kubectl get svc --all-namespaces
+kubectl delete svc <service-name> 
+```
+Next, delete the EKS cluster  (source: [Deleting an Amazon EKS cluster]((https://docs.aws.amazon.com/eks/latest/userguide/delete-cluster.html) )) 
+```shell
+eksctl get cluster -A
+eksctl delete cluster <cluster_name> --region eu-west-3 --force --disable-nodegroup-eviction
 ```
-Remove AWS CLI user credentials (to avoid forgetting and leaking):
+Finally, remove AWS CLI user credentials (to avoid forgetting and leaking):
 ```shell
-> rm -f ~/.aws/credentials
+rm -f ~/.aws/credentials
 ```
\ No newline at end of file
diff --git a/docs/how-to/h-deploy-gke.md b/docs/how-to/h-deploy-gke.md
index e77ae38f7..898395a86 100644
--- a/docs/how-to/h-deploy-gke.md
+++ b/docs/how-to/h-deploy-gke.md
@@ -1,80 +1,117 @@
-# Deploy Charmed MySQL K8s on GKE
+# How to deploy on GKE
 
-Google Kubernetes Engine (GKE) - the most scalable and fully automated Kubernetes service from Google. To access GKE WEB interface, open https://console.cloud.google.com/compute/
+[Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine?hl=en) (GKE) is a highly scalable and fully automated Kubernetes service. To access the GKE Web interface, go to [console.cloud.google.com/compute](https://console.cloud.google.com/compute).
 
-# Install GKE and Juju tooling
-Install juju and gcloud tool using SNAP:
-```shell
-> sudo snap install juju --classic
-> sudo snap install kubectl --classic
-> sudo snap install google-cloud-cli --classic
-```
-Login to Google Account
-```shell
-> gcloud auth login
+This guide will walk you through setting up a cluster and deploying MySQL K8s on GKE.
+
+## Summary
+* [Install GKE and Juju tooling](#install-gke-and-juju-tooling)
+  * [Authenticate](#authenticate)
+  * [Configure project ID](#configure-project-id)
+  * [Install additional gcloud CLI tool](#install-additional-gcloud-cli-tool)
+* [Create a new GKE cluster](#create-a-new-gke-cluster)
+* [Bootstrap Juju on GKE](#bootstrap-juju-on-gke)
+* [Deploy charms](#deploy-charms)
+* [List clusters and clouds](#list-clusters-and-clouds)
+* [Clean up](#clean-up)
 
-Go to the following link in your browser:
+---
 
-    https://accounts.google.com/o/oauth2/...
+## Install GKE and Juju tooling
 
-Enter authorization code: 4/0Absad3s...
+Install `juju`, `kubectl`, and Google Cloud command-line tools using snap:
 
-You are now logged in as [your_account@gmail.com].
+```shell
+sudo snap install juju
+sudo snap install kubectl --classic
+sudo snap install google-cloud-cli --classic
 ```
 
-Now you need to associate this installation with GCloud project, using "Project ID" from [resource-management](https://console.cloud.google.com/cloud-resource-manager):
+### Authenticate
+Log in to a Google account with the command
 ```shell
-> gcloud config set project <PROJECT_ID>
+gcloud auth login
+```
+This will open a page in your browser starting with  `https://accounts.google.com/o/oauth2/...` where you can complete the login.
 
-Updated property [core/project].
+If successful, the command prompt will show:
+>```shell
+>You are now logged in as [<account>@gmail.com].
+>```
+
+### Configure project ID
+Next, you must associate this installation with GCloud project using "Project ID" from [resource-management](https://console.cloud.google.com/cloud-resource-manager):
+```shell
+> gcloud config set project <PROJECT_ID>
 ```
+Sample output:
+>```shell
+>Updated property [core/project].
+>```
+
+### Install additional gcloud CLI tool
+
+As a last step, install the Debian package `google-cloud-sdk-gke-gcloud-auth-plugin` using this Google guide: [Install the gcloud CLI](https://cloud.google.com/sdk/docs/install#deb).
+
+## Create a new GKE cluster
 
-As a last step, install the Debian package `google-cloud-sdk-gke-gcloud-auth-plugin` using [Google manual](https://cloud.google.com/sdk/docs/install#deb).
+This guide will use high-availability zone `europe-west1` and compute engine type `n1-standard-4` in command examples. Make sure to choose the zone and resources that best suit your use-case.
 
-# Create new GKE cluster
-The following command will start three [compute engines](https://cloud.google.com/compute/) on Google Cloud (imagine them as three physical servers in clouds) and deploy K8s cluster there.  To simplify the manual, the following command will use high-availability zone `europe-west1` and compute engine type `n1-standard-4` (which can be adopted for your needs if necessary):
+The following command will start three [compute engines](https://cloud.google.com/compute/) on Google Cloud and deploy a K8s cluster (you can imagine the compute engines as three physical servers in clouds):
 ```shell
 gcloud container clusters create --zone europe-west1-c $USER-$RANDOM --cluster-version 1.25 --machine-type n1-standard-4 --num-nodes=3 --no-enable-autoupgrade
 ```
 
-Now, let's assign our account as an admin of newly created K8s:
+Next, assign your account as an admin of the newly created K8s cluster:
 ```shell
 kubectl create clusterrolebinding cluster-admin-binding-$USER --clusterrole=cluster-admin --user=$(gcloud config get-value core/account)
 ```
 
-# Bootstrap Juju on GKE
-Bootstrap new juju controller on fresh cluster, copying commands one-by-one:
+## Bootstrap Juju on GKE
+
+> Note: [This known issue](https://bugs.launchpad.net/juju/+bug/2007575) forces unSNAPed Juju usage to add-k8s credentials on Juju.
+
 ```shell
-> juju add-k8s gke-jun-9 --storage=standard --client
-> juju bootstrap gke-jun-9
-> juju add-model welcome-model
+/snap/juju/current/bin/juju add-k8s gke-jun-9 --storage=standard --client
+juju bootstrap gke-jun-9
+juju add-model welcome-model
 ```
-At this stage Juju is ready to use GKE, check the list of currently running K8s pods and juju status:
+At this stage, Juju is ready to use GKE. Check the list of currently running K8s pods with:
 ```shell
-> kubectl get pods -n welcome-model
-> juju status
+kubectl get pods -n welcome-model
 ```
 
-# Deploy Charms
+## Deploy charms
+
+The following commands deploy and integrate the [MySQL K8s Bundle](https://charmhub.io/mysql-k8s-bundle) and [MySQL Test App](https://charmhub.io/mysql-test-app):
 ```shell
-> juju deploy mysql-k8s-bundle --channel 8.0/edge --trust
-> juju deploy mysql-test-app
-> juju relate mysql-test-app mysql-k8s:database
-> juju status --watch 1s
+juju deploy mysql-k8s-bundle --channel 8.0/edge --trust
+juju deploy mysql-test-app
+juju integrate mysql-test-app mysql-k8s:database
 ```
 
-# List
-To list GKE clusters and juju clouds, use:
+To track the status of the deployment, run
 ```shell
-> gcloud container clusters list
+juju status --watch 1s
+```
 
+## List clusters and clouds
+To list GKE clusters and juju clouds, run:
+```shell
+gcloud container clusters list
+```
+Sample output:
+```shell
 NAME          LOCATION        MASTER_VERSION   MASTER_IP      MACHINE_TYPE   NODE_VERSION     NUM_NODES  STATUS
 mykola-18187  europe-west1-c  1.25.9-gke.2300  31.210.22.127  n1-standard-4  1.25.9-gke.2300  3          RUNNING
 taurus-7485   europe-west1-c  1.25.9-gke.2300  142.142.21.25  n1-standard-4  1.25.9-gke.2300  3          RUNNING
 ```
-Juju can handle multiply clouds simultaneously. The list of clouds with registered credentials on Juju:
+Juju can handle multiple clouds simultaneously. To see a list of clouds with registered credentials on Juju, run:
+```shell
+juju clouds
+```
+Sample output:
 ```shell
-> juju clouds
 Clouds available on the controller:
 Cloud      Regions  Default       Type
 gke-jun-9  1        europe-west1  k8s  
@@ -86,21 +123,24 @@ localhost       1        localhost     lxd   1            built-in  LXD Containe
 microk8s        0                      k8s   1            built-in  A local Kubernetes context
 ```
 
-# Cleanup
-**Note**: always clean no-longer necessary GKE resources as they all could be costly!!!
-
+## Clean up
+[note type="caution"]
+**Warning**: Always clean GKE resources that are no longer necessary -  they could be costly!
+[/note]
 To clean GKE clusters and juju clouds, use:
 ```shell
-> juju destroy-controller gke-jun-9-europe-west1 --yes --destroy-all-models --destroy-storage --force
-> juju remove-cloud gke-jun-9
+juju destroy-controller gke-jun-9-europe-west1 --yes --destroy-all-models --destroy-storage --force
+juju remove-cloud gke-jun-9
 
-> gcloud container clusters list
-> gcloud container clusters delete <cluster_name> --zone europe-west1-c
+gcloud container clusters list
+gcloud container clusters delete <cluster_name> --zone europe-west1-c
 ```
 Revoke the GCloud user credentials:
 ```shell
-> gcloud auth revoke your_account@gmail.com
-
-Revoked credentials:
- - your_account@gmail.com
-```
\ No newline at end of file
+gcloud auth revoke your_account@gmail.com
+```
+You should see a confirmation output:
+>```shell
+>Revoked credentials:
+ >- your_account@gmail.com
+>```
\ No newline at end of file
diff --git a/docs/how-to/h-deploy-microk8s.md b/docs/how-to/h-deploy-microk8s.md
index fb65d2d6e..e8c6c971b 100644
--- a/docs/how-to/h-deploy-microk8s.md
+++ b/docs/how-to/h-deploy-microk8s.md
@@ -1,30 +1,35 @@
-# Deploy Charmed MySQL K8s
+# Deploy on MicroK8s
 
-Please follow the [Tutorial](/t/9677) to deploy the charm on MicroK8s.
+This guide assumes you have a running Juju and MicroK8s environment. 
 
-Short story for your Ubuntu 22.04 LTS:
+For a detailed walkthrough of setting up an environment and deploying the charm on MicroK8s, refer to the [Tutorial](/t/9677).
+
+--- 
+
+[Bootstrap](https://juju.is/docs/juju/juju-bootstrap) a juju controller and create a [model](https://juju.is/docs/juju/juju-add-model) if you haven't already:
 ```shell
-sudo snap install multipass
-multipass launch --cpus 4 --memory 8G --disk 30G --name my-vm charm-dev # tune CPU/RAM/HDD accordingly to your needs
-multipass shell my-vm
+juju bootstrap microk8s <controller name>
+juju add-model <model name>
+```
 
-juju add-model mysql
-juju deploy mysql-k8s --channel 8.0/stable --trust # --config profile=testing
-juju status --watch 1s
+Deploy MySQL:
+```shell
+juju deploy mysql-k8s --channel 8.0/stable --trust
 ```
+> :warning: The `--trust` flag is necessary to create some K8s resources.
 
-The expected result:
+> See the [`juju deploy` documentation](https://juju.is/docs/juju/juju-deploy) for all available options at deploy time.
+> 
+> See the [Configurations tab](https://charmhub.io/mysql/configurations) for specific MySQL parameters.
+
+Sample output of `juju status --watch 1s`:
 ```shell
 Model   Controller  Cloud/Region        Version  SLA          Timestamp
 mysql   overlord    microk8s/localhost  2.9.38   unsupported  22:48:57+01:00
 
 App        Version    Status  Scale  Charm      Channel     Rev  Address         Exposed  Message
-mysql-k8s  8.0.31     active      3  mysql-k8s  8.0/stable  75   10.152.183.234  no       
+mysql-k8s  8.0.31     active      1  mysql-k8s  8.0/stable  75   10.152.183.234  no       
 
 Unit          Workload  Agent  Address      Ports  Message
 mysql-k8s/0*  active    idle   10.1.84.74          Primary
-mysql-k8s/1   active    idle   10.1.84.127
-mysql-k8s/2   active    idle   10.1.84.73
-```
-
-Check the [Testing](/t/11772) reference to test your deployment.
\ No newline at end of file
+```
\ No newline at end of file
diff --git a/docs/how-to/h-deploy-multi-az.md b/docs/how-to/h-deploy-multi-az.md
new file mode 100644
index 000000000..1675c3f8d
--- /dev/null
+++ b/docs/how-to/h-deploy-multi-az.md
@@ -0,0 +1,297 @@
+# Deploy on multiple availability zones (AZ) 
+
+During the deployment to Kubernetes, it is important to spread all the
+database copies (K8s pods/Juju units) to different hardware servers,
+or even better, to the different cloud [availability zones](https://en.wikipedia.org/wiki/Availability_zone) (AZ). This will guarantee no shared service-critical components across the DB cluster (eliminate the case with all eggs in the same basket).
+
+This guide will take you through deploying a MySQL cluster on GKE using 3 available zones. All pods will be set up to sit in their dedicated zones only, which effectively guarantees database copy survival across all available AZs.
+
+[note]
+This documentation assumes that your cloud supports and provides availability zones concepts such as the K8s label `topology.kubernetes.io/zone`. This is enabled by default on EKS/GKE/AKS and supported by MicroK8s/Charmed Kubernetes.
+
+See the [Additional resources](#additional-resources) section for more details about AZ on specific clouds.
+[/note]
+
+## Summary
+* [Set up Kubernetes on Google Cloud](#set-up-kubernetes-on-google-cloud)
+* [Deploy MySQL with anti-affinity rules](#deploy-mysql-with-anti-affinity-rules)
+  * [Simulation: A node gets drained](#simulation-a-node-gets-drained)
+  * [Simulation: All nodes get cordoned](#simulation-all-nodes-get-cordoned)
+* [Additional resources](#additional-resources)
+---
+
+## Set up Kubernetes on Google Cloud
+
+Let's deploy the [MySQL Cluster on GKE (us-east4)](/t/10875) using all 3 zones there (`us-east4-a`, `us-east4-b`, `us-east4-c`) and make sure all pods always sits in the dedicated zones only.
+
+[note type="caution"]
+**Warning**: Creating the following GKE resources may cost you money - be sure to monitor your GCloud costs.
+[/note]
+
+Log into Google Cloud and bootstrap nine nodes of managed K8s on GCloud:
+```shell
+gcloud auth login
+gcloud container clusters create --zone us-east4 $USER-$RANDOM --cluster-version 1.29.8 --machine-type n1-standard-4 --num-nodes=2 --total-max-nodes=2 --no-enable-autoupgrade
+
+kubectl config get-contexts
+/snap/juju/current/bin/juju add-k8s gke --client --context-name=$(kubectl config get-contexts | grep gke | awk '{print $2}')
+juju bootstrap gke gke
+juju add-model mymodel
+```
+
+Each node is equally grouped across availability zones (three K8s nodes in each AZ). You can check them with the following `kubectl` commands:
+```shell
+kubectl get nodes
+```
+```shell
+> NAME                                          STATUS   ROLES    AGE     VERSION
+> gke-default-pool-5c034921-fmtj   Ready    <none>   3h56m   v1.29.8-gke.1278000          
+> gke-default-pool-5c034921-t1sx   Ready    <none>   3h56m   v1.29.8-gke.1278000          
+> gke-default-pool-5c034921-zkzm   Ready    <none>   3h56m   v1.29.8-gke.1278000          
+> gke-default-pool-b33634ac-l0c9   Ready    <none>   3h56m   v1.29.8-gke.1278000          
+> gke-default-pool-b33634ac-phjx   Ready    <none>   3h56m   v1.29.8-gke.1278000          
+> gke-default-pool-b33634ac-w2jv   Ready    <none>   3h56m   v1.29.8-gke.1278000          
+> gke-default-pool-d196956a-0zfc   Ready    <none>   3h56m   v1.29.8-gke.1278000          
+> gke-default-pool-d196956a-591j   Ready    <none>   3h56m   v1.29.8-gke.1278000
+> gke-default-pool-d196956a-zm6h   Ready    <none>   3h56m   v1.29.8-gke.1278000
+```
+
+```shell
+kubectl get nodes --show-labels | awk 'NR == 1 {next} {print $1,$2,$6}' | awk -F "[ /]" '{print $1" \t"$NF" \t"$2}'
+```
+```shell
+gke-default-pool-5c034921-fmtj     zone=us-east4-b         Ready
+gke-default-pool-5c034921-t1sx     zone=us-east4-b         Ready
+gke-default-pool-5c034921-zkzm     zone=us-east4-b         Ready
+gke-default-pool-b33634ac-l0c9     zone=us-east4-c         Ready
+gke-default-pool-b33634ac-phjx     zone=us-east4-c         Ready
+gke-default-pool-b33634ac-w2jv     zone=us-east4-c         Ready
+gke-default-pool-d196956a-0zfc     zone=us-east4-a         Ready
+gke-default-pool-d196956a-591j     zone=us-east4-a         Ready
+gke-default-pool-d196956a-zm6h     zone=us-east4-a         Ready
+```
+## Deploy MySQL with anti-affinity rules
+
+Juju provides the support for affinity/anti-affinity rules using **constraints**. Read more about it in this [forum post](/t/4091).
+
+The command below demonstrates how to deploy Charmed MySQL K8s with Juju constraints that create a pod anti-affinity rule:
+
+```shell
+export MYAPP="mydatabase" ; \
+juju deploy mysql-k8s ${MYAPP} --trust -n 3 \
+ --constraints="tags=anti-pod.app.kubernetes.io/name=${MYAPP},anti-pod.topology-key=topology.kubernetes.io/zone"
+```
+
+This will effectively create a K8s pod anti-affinity rule. Check with the following command:
+```shell
+kubectl get pod mydatabase-0 -o yaml -n mymodel
+```
+```yaml
+...
+spec:
+  affinity:
+    podAntiAffinity:
+      requiredDuringSchedulingIgnoredDuringExecution:
+      - labelSelector:
+          matchExpressions:
+          - key: app.kubernetes.io/name
+            operator: In
+            values:
+            - mydatabase
+        topologyKey: topology.kubernetes.io/zone
+...
+```
+This example instructs the K8s scheduler to run K8s pods that match the Juju application name `app.kubernetes.io/name: my database` label on K8s nodes that have different values of `topology.kubernetes.io/zone` label. In other words, we asked to run all MySQL instances in different Availability Zones, which is the common recommendation for any database deployment.
+
+The selector and affinity/anti-affinity rules are extremely flexible and often require cloud-specific fine-tuning for your specific needs. The rule of thumb is to always check K8s node labels actually available in your environment and choose the appropriate label on your infrastructure. 
+
+The example below shows all available labels on GKE. You may want to run database instances on different virtual machines (`kubernetes.io/hostname` label) or in different Availability Zones (`topology.kubernetes.io/zone` label) :
+```shell
+kubectl get node gke-default-pool-b33634ac-l0c9 -o yaml
+```
+```yaml
+...
+  labels:
+    beta.kubernetes.io/arch: amd64
+    beta.kubernetes.io/instance-type: n1-standard-4
+    beta.kubernetes.io/os: linux
+    cloud.google.com/gke-boot-disk: pd-balanced
+    cloud.google.com/gke-container-runtime: containerd
+    cloud.google.com/gke-cpu-scaling-level: "4"
+    cloud.google.com/gke-logging-variant: DEFAULT
+    cloud.google.com/gke-max-pods-per-node: "110"
+    cloud.google.com/gke-nodepool: default-pool
+    cloud.google.com/gke-os-distribution: cos
+    cloud.google.com/gke-provisioning: standard
+    cloud.google.com/gke-stack-type: IPV4
+    cloud.google.com/machine-family: n1
+    cloud.google.com/private-node: "false"
+    failure-domain.beta.kubernetes.io/region: us-east4
+    failure-domain.beta.kubernetes.io/zone: us-east4-c
+    kubernetes.io/arch: amd64
+    kubernetes.io/hostname: gke-default-pool-b33634ac-l0c9
+    kubernetes.io/os: linux
+    node.kubernetes.io/instance-type: n1-standard-4
+    topology.gke.io/zone: us-east4-c
+    topology.kubernetes.io/region: us-east4
+    topology.kubernetes.io/zone: us-east4-c
+...
+``` 
+
+After a successful deployment, `juju status` will show an active application:
+```shell
+Model    Controller  Cloud/Region  Version  SLA          Timestamp
+mymodel  gke         gke/us-east4  3.5.3    unsupported  22:02:32+02:00
+
+App         Version  Status  Scale  Charm      Channel     Rev  Address         Exposed  Message
+mydatabase  8.0.36   active      3  mysql-k8s  8.0/stable  180  34.118.235.169  no       
+
+Unit           Workload  Agent  Address    Ports  Message
+mydatabase/0   active    idle   10.80.5.9         
+mydatabase/1*  active    idle   10.80.6.7         Primary
+mydatabase/2   active    idle   10.80.1.6         
+```
+
+and each pod will sit in the separate AZ out of the box:
+```shell
+kubectl get pods -n mymodel -o wide
+```
+```none
+> NAME             READY   STATUS    RESTARTS   AGE   IP          NODE
+> mydatabase-0     2/2     Running   0          16m   10.80.5.7   gke-default-pool-b33634ac-l0c9  ... # us-east4-c
+> mydatabase-1     2/2     Running   0          24m   10.80.6.7   gke-default-pool-d196956a-0zfc  ... # us-east4-a
+> mydatabase-2     2/2     Running   0          24m   10.80.0.3   gke-default-pool-5c034921-zkzm  ... # us-east4-b
+> ...
+```
+### Simulation: A node gets drained
+Let's drain a node and make sure the rescheduled pod will stay the same AZ:
+```shell
+kubectl drain  --ignore-daemonsets --delete-emptydir-data  gke-default-pool-b33634ac-l0c9
+```
+```shell
+> node/gke-default-pool-b33634ac-l0c9 cordoned                                  
+> ...
+> evicting pod mymodel/mydatabase-0                       
+> ...     
+> pod/mydatabase-0 evicted                                
+> node/gke-default-pool-b33634ac-l0c9 drained
+```
+
+As we can see the newly rescheduled pod landed the new node in the same AZ `us-east4-c`:
+```shell
+kubectl get pods -n mymodel -o wide
+
+> NAME             READY   STATUS    RESTARTS   AGE   IP          NODE                                       
+> mydatabase-0     2/2     Running   0           1m   10.80.5.7   gke-default-pool-b33634ac-phjx  ... # us-east4-c
+> mydatabase-1     2/2     Running   0          35m   10.80.6.7   gke-default-pool-d196956a-0zfc  ... # us-east4-a
+> mydatabase-2     2/2     Running   0          35m   10.80.0.3   gke-default-pool-5c034921-zkzm  ... # us-east4-b
+> ...
+```
+
+### Simulation: All nodes get cordoned
+
+In case we lose (cordon) all nodes in AZ, the pod will stay pending as K8s scheduler cannot find the proper node.
+Let's simulate it:
+```shell
+kubectl drain  --ignore-daemonsets --delete-emptydir-data  gke-default-pool-b33634ac-phjx
+kubectl drain  --ignore-daemonsets --delete-emptydir-data  gke-default-pool-b33634ac-w2jv
+
+kubectl get nodes --show-labels | awk 'NR == 1 {next} {print $1,$2,$6}' | awk -F "[ /]" '{print $1" \t"$NF" \t"$2}'
+> gke-default-pool-5c034921-fmtj     zone=us-east4-b         Ready
+> gke-default-pool-5c034921-t1sx     zone=us-east4-b         Ready
+> gke-default-pool-5c034921-zkzm     zone=us-east4-b         Ready
+> gke-default-pool-b33634ac-l0c9     zone=us-east4-c         Ready,SchedulingDisabled
+> gke-default-pool-b33634ac-phjx     zone=us-east4-c         Ready,SchedulingDisabled
+> gke-default-pool-b33634ac-w2jv     zone=us-east4-c         Ready,SchedulingDisabled
+> gke-default-pool-d196956a-0zfc     zone=us-east4-a         Ready
+> gke-default-pool-d196956a-591j     zone=us-east4-a         Ready
+> gke-default-pool-d196956a-zm6h     zone=us-east4-a         Ready
+```
+
+```shell
+kubectl get pods -n mymodel
+
+> NAME                       READY   STATUS    RESTARTS   AGE
+> mydatabase-0               0/2     Pending   0          2m9s # Pending!!!
+> mydatabase-1               2/2     Running   0          96m
+> mydatabase-2               2/2     Running   0          51m
+
+kubectl describe pod mydatabase-0 -n mymodel  | tail -10
+
+> Events:
+>   Type     Reason             Age    From                Message
+>   ----     ------             ----   ----                -------
+>   Warning  FailedScheduling   3m32s  default-scheduler   0/9 nodes are available: 3 node(s) were unschedulable, 6 node(s) had volume node affinity conflict. preemption: 0/9 nodes are available: 9 Preemption is not helpful for scheduling.
+>   Warning  FailedScheduling   3m30s  default-scheduler   0/9 nodes are available: 3 node(s) were unschedulable, 6 node(s) had volume node affinity conflict. preemption: 0/9 nodes are available: 9 Preemption is not helpful for scheduling.
+>   Warning  FailedScheduling   3m27s  default-scheduler   0/9 nodes are available: 3 node(s) were unschedulable, 6 node(s) had volume node affinity conflict. preemption: 0/9 nodes are available: 9 Preemption is not helpful for scheduling.
+>   Normal   NotTriggerScaleUp  3m33s  cluster-autoscaler  pod didn't trigger scale-up:
+```
+
+The `juju status` output will indicate this problem as well:
+```shell
+Model    Controller  Cloud/Region  Version  SLA          Timestamp
+mymodel  gke         gke/us-east4  3.5.3    unsupported  22:31:00+02:00
+
+App         Version  Status  Scale  Charm      Channel     Rev  Address         Exposed  Message
+mydatabase  8.0.36   active      3  mysql-k8s  8.0/stable  180  34.118.235.169  no       installing agent
+
+Unit           Workload  Agent  Address    Ports  Message
+mydatabase/0   unknown   lost                     agent lost, see 'juju show-status-log mydatabase/0'
+mydatabase/1*  active    idle   10.80.6.7         Primary
+mydatabase/2   active    idle   10.80.1.6         
+```
+
+Let's uncordon all nodes to keep the house clean:
+```shell
+kubectl uncordon gke-default-pool-b33634ac-l0c9
+kubectl uncordon gke-default-pool-b33634ac-phjx
+kubectl uncordon gke-default-pool-b33634ac-w2jv
+```
+
+The K8s scheduler will return the pod back to AZ `us-east4-c` and Juju will automatically rejoin the database unit back to the cluster:
+```shell
+Model    Controller  Cloud/Region  Version  SLA          Timestamp
+mymodel  gke         gke/us-east4  3.5.3    unsupported  22:38:23+02:00
+
+App         Version  Status  Scale  Charm      Channel     Rev  Address         Exposed  Message
+mydatabase  8.0.36   active      3  mysql-k8s  8.0/stable  180  34.118.235.169  no   
+
+Unit           Workload  Agent  Address     Ports  Message
+mydatabase/0   active    idle   10.80.5.10         
+mydatabase/1*  active    idle   10.80.6.7          Primary
+mydatabase/2   active    idle   10.80.1.6   
+```
+
+At this point we can relax and enjoy the protection from Cloud Availability zones!
+
+To survive acomplete cloud outage, we recommend setting up [cluster-cluster asynchronous replication](/t/13458).
+
+
+## Remove GKE setup
+
+[note type="caution"]
+**Warning**: Do not forget to remove your GKE test setup - it can be costly!
+[/note]
+
+```shell
+gcloud container clusters list
+gcloud container clusters delete <gke_name> --location <gke_location>
+
+juju unregister gke --no-prompt
+juju remove-cloud gke
+```
+
+## Additional resources
+Below you will find specific information about  AZs on specific clouds and more about node selection on Kubernetes.
+
+### Cloud-specific details about multiple availability zones
+ * [General Kubernetes](https://kubernetes.io/docs/setup/best-practices/multiple-zones/)
+ * [AWS/EKS](https://aws.amazon.com/rds/features/multi-az/)
+ * [GCloud/GKE](https://cloud.google.com/kubernetes-engine/multi-cloud/docs/azure/how-to/create-cluster)
+ * [Azure/AKS](https://learn.microsoft.com/en-us/azure/aks/availability-zones)
+
+### Kubernetes strategies to choose hardware nodes
+ * [Node selector](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/)
+ * [Affinity/anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity)
+ * [Taint and toleration](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)
+* ...
\ No newline at end of file
diff --git a/docs/how-to/h-development-integrate.md b/docs/how-to/h-development-integrate.md
new file mode 100644
index 000000000..a24752fed
--- /dev/null
+++ b/docs/how-to/h-development-integrate.md
@@ -0,0 +1,45 @@
+# How to integrate a database with your charm
+
+Charmed MySQL K8s can be integrated with any charmed application that supports its interfaces. This page provides some guidance and resources for charm developers to develop, integrate, and troubleshoot their charm so that it may connect with MySQL.
+
+## Summary
+* Check supported interfaces 
+* Integrate your charm with MySQL
+* Troubleshooting & testing
+* FAQ
+
+---
+
+## Check supported interfaces
+
+First, we recommend that you check [the supported interfaces](/t/10249) of the current charm. You have the option to use modern (preferred) or legacy interfaces. 
+
+Most existing charms currently use [ops-lib-pgsql](https://github.com/canonical/ops-lib-pgsql) interface (legacy). 
+> See also: [MySQL K8s legacy charm explanation](/t/11236)
+
+ For new charms, **Canonical recommends using [data-platform-libs](https://github.com/canonical/data-platform-libs).**
+
+## Integrate your charm with MySQL
+
+> See also: 
+> * [Juju documentation | Integration](https://juju.is/docs/juju/integration)
+> * [Juju documentation | Integrate your charm with PostgreSQL](https://juju.is/docs/sdk/integrate-your-charm-with-postgresql)
+
+Refer to [mysql-test-app](https://github.com/canonical/mysql-test-app) as a practical example of implementing data-platform-libs interfaces to integrate a charm with Charmed MySQL K8s.
+
+## Troubleshooting and testing
+* To learn the basics of charm debugging, start with [Juju | How to debug a charm](https://juju.is/docs/sdk/debug-a-charm)
+* To troubleshoot Charmed MySQL, see the [Troubleshooting](/t/11886) page.
+* To test the charm, check the [Testing](/t/11772) reference
+
+## FAQ
+**Does the requirer need to set anything in relation data?**
+> It depends on the interface. Check the `mysql_client` [interface requirements](https://github.com/canonical/charm-relation-interfaces/blob/main/interfaces/mysql_client/v0/README.md).
+
+**Is there a charm library available, or does my charm need to compile the mysql relation data on its own?**
+> Yes, a library is available: [data-platform-libs](https://github.com/canonical/data-platform-libs).
+
+**How do I obtain the database url/uri?**
+>This feature is [planned](https://warthogs.atlassian.net/browse/DPE-2278) but currently missing.
+
+[Contact us](/t/11868) if you have any questions, issues and/or ideas!
\ No newline at end of file
diff --git a/docs/how-to/h-enable-alert-rules.md b/docs/how-to/h-enable-alert-rules.md
index 68e199f16..3ad390ba7 100644
--- a/docs/how-to/h-enable-alert-rules.md
+++ b/docs/how-to/h-enable-alert-rules.md
@@ -2,7 +2,7 @@
 
 This guide will show how to set up [Pushover](https://pushover.net/) to receive alert notifications from the COS Alert Manager with [Awesome Alert Rules](https://samber.github.io/awesome-prometheus-alerts/).
 
-Charmed MySQL K8s ships a pre-configured and pre-enabled [list of Awesome Alert Rules].
+Charmed MySQL K8s ships a pre-configured and pre-enabled [list of Awesome Alert Rules]. 
 
 <details><summary>Screenshot of alert rules in the Grafana web interface</summary>
 
@@ -74,4 +74,4 @@ Do you have questions? [Contact us]!
 [Contact us]: /t/11868
 [Charmed MySQL K8s operator]: /t/11869
 [COS Monitoring]: /t/9981
-[list of Awesome Alert Rules]: https://github.com/canonical/mysql-k8s-operator/tree/main/src/prometheus_alert_rules
\ No newline at end of file
+[list of Awesome Alert Rules]: /t/15838
\ No newline at end of file
diff --git a/docs/how-to/h-enable-monitoring.md b/docs/how-to/h-enable-monitoring.md
index e70367fe1..8e54f31f6 100644
--- a/docs/how-to/h-enable-monitoring.md
+++ b/docs/how-to/h-enable-monitoring.md
@@ -1,26 +1,39 @@
-# Enable monitoring
-> **:information_source: Hint**: Use [Juju 3](/t/5064). Otherwise replace `juju run ...` with `juju run-action --wait ...` and `juju integrate` with `juju relate` for Juju 2.9.
+# How to enable monitoring (COS)
 
-Enable monitoring requires that you:
+[note]
+**Note**: All commands are written for `juju >= v.3.0`
+
+For more information, check the [Juju 3.0 Release Notes](https://juju.is/docs/juju/roadmap#heading--juju-3-0-0---22-oct-2022).
+[/note]
+
+## Prerequisites
 * [Have a Charmed MySQL K8s deployed](/t/9677)
 * [Deploy `cos-lite` bundle in a Kubernetes environment](https://charmhub.io/topics/canonical-observability-stack/tutorials/install-microk8s)
 
-Switch to COS K8s environment and offer COS interfaces to be cross-model related with Charmed MySQL K8s model:
+## Offer interfaces via the COS controller
+Switch to COS K8s environment and offer COS interfaces to be cross-model related with Charmed MySQL K8s model.
+
+To switch to the Kubernetes controller, for the cos model, run
 ```shell
-# Switch to Kubernetes controller, for the cos model.
 juju switch <k8s_cos_controller>:<cos_model_name>
-
+```
+To offer the COS interfaces, run
+```shell
 juju offer grafana:grafana-dashboard
 juju offer loki:logging
 juju offer prometheus:receive-remote-write
 ```
 
-Switch to Charmed MySQL K8s model, find offers and consume them:
+## Consume offers via the MySQL model
+Next, we will switch to Charmed MySQL K8s model, find offers and consume them.
+
+We are on the Kubernetes controller for the COS model. To switch to the MySQL model, run
 ```shell
-# We are on the Kubernetes controller, for the cos model. Switch to mysql model
 juju switch <k8s_db_controller>:<mysql_model_name>
-
-juju find-offers <k8s_cos_controller>: # Do not miss ':' here!
+```
+To find offers, run the following command (make sure not to miss the ":" at the end!):
+```shell
+juju find-offers <k8s_cos_controller>:
 ```
 
 A similar output should appear, if `k8s` is the k8s controller name and `cos` the model where `cos-lite` has been deployed:
@@ -39,14 +52,20 @@ juju consume k8s:admin/cos.loki
 juju consume k8s:admin/cos.prometheus
 ```
 
-Now, deploy '[grafana-agent-k8s](https://charmhub.io/grafana-agent-k8s)' and integrate (relate) it with Charmed MySQL K8s, later integrate (relate) `grafana-agent-k8s` with consumed COS offers:
+## Deploy and integrate Grafana
+First, deploy [grafana-agent-k8s](https://charmhub.io/grafana-agent-k8s):
 ```shell
 juju deploy grafana-agent-k8s --trust
-
+```
+Then, integrate (relate) it with Charmed MySQL K8s:
+```shell
 juju relate grafana-agent-k8s grafana
 juju relate grafana-agent-k8s loki
 juju relate grafana-agent-k8s prometheus
+```
 
+Finally, integrate (relate) `grafana-agent-k8s` with consumed COS offers:
+```shell
 juju relate grafana-agent-k8s mysql-k8s:grafana-dashboard
 juju relate grafana-agent-k8s mysql-k8s:logging
 juju relate grafana-agent-k8s mysql-k8s:metrics-endpoint
@@ -54,6 +73,7 @@ juju relate grafana-agent-k8s mysql-k8s:metrics-endpoint
 
 After this is complete, Grafana will show the new dashboards: `MySQL Exporter` and allows access for Charmed MySQL logs on Loki.
 
+### Sample outputs
 The example of `juju status` on Charmed MySQL K8s model:
 ```shell
 Model  Controller   Cloud/Region        Version  SLA          Timestamp
@@ -98,7 +118,8 @@ loki        loki         loki-k8s        60   1/1        logging               l
 prometheus  prometheus   prometheus-k8s  103  1/1        receive-remote-write  prometheus_scrape        requirer
 ```
 
-To connect Grafana WEB interface, follow the COS section "[Browse dashboards](https://charmhub.io/topics/canonical-observability-stack/tutorials/install-microk8s)":
+### Connect Grafana web interface
+To connect Grafana web interface, follow the COS section "[Browse dashboards](https://charmhub.io/topics/canonical-observability-stack/tutorials/install-microk8s)":
 ```shell
 juju run grafana/leader get-admin-password --model <k8s_controller>:<cos_model_name>
 ```
diff --git a/docs/how-to/h-enable-tracing.md b/docs/how-to/h-enable-tracing.md
index 5af4930c5..ef84f9c3a 100644
--- a/docs/how-to/h-enable-tracing.md
+++ b/docs/how-to/h-enable-tracing.md
@@ -9,9 +9,9 @@ If you're using `juju 2.9`, check the [`juju 3.0` Release Notes](https://juju.is
 This guide contains the steps to enable tracing with [Grafana Tempo](https://grafana.com/docs/tempo/latest/) for your MySQL K8s application. 
 
 To summarize:
-* [Deploy the Tempo charm in a COS K8s environment](#heading--deploy)
-* [Integrate it with the COS charms](#heading--integrate)
+* [Deploy the Tempo charms and their dependencies in a COS K8s environment](#heading--deploy)
 * [Offer interfaces for cross-model integrations](#heading--offer)
+* [Consume and integrate cross-model integrations](#heading--consume)
 * [View MySQL K8s traces on Grafana](#heading--view)
 
 
@@ -37,31 +37,15 @@ First, switch to the Kubernetes controller where the COS model is deployed:
 ```shell
 juju switch <k8s_controller_name>:<cos_model_name>
 ```
-Then, deploy the [`tempo-k8s`](https://charmhub.io/tempo-k8s) charm:
-```shell
-juju deploy -n 1 tempo-k8s --channel latest/edge
-```
 
-<a href="#heading--integrate"><h2 id="heading--integrate"> Integrate with the COS charms </h2></a>
+Then, deploy the dependencies of Tempo following [this tutorial](https://discourse.charmhub.io/t/tutorial-deploy-tempo-ha-on-top-of-cos-lite/15489). In particular, we would want to:
+- Deploy the minio charm
+- Deploy the s3 integrator charm
+- Add a bucket in minio using a python script
+- Configure s3 integrator with the minio credentials
 
-Integrate `tempo-k8s` with the COS charms as follows:
+Finally, deploy and integrate with Tempo HA in [a monolithic setup](https://discourse.charmhub.io/t/tutorial-deploy-tempo-ha-on-top-of-cos-lite/15489#heading--deploy-monolithic-setup). 
 
-```shell
-juju integrate tempo-k8s:grafana-dashboard grafana:grafana-dashboard
-juju integrate tempo-k8s:grafana-source grafana:grafana-source
-juju integrate tempo-k8s:ingress traefik:traefik-route
-juju integrate tempo-k8s:metrics-endpoint prometheus:metrics-endpoint
-juju integrate tempo-k8s:logging loki:logging
-```
-If you would like to instrument traces from the COS charms as well, create the following integrations:
-```shell
-juju integrate tempo-k8s:tracing alertmanager:tracing
-juju integrate tempo-k8s:tracing catalogue:tracing
-juju integrate tempo-k8s:tracing grafana:tracing
-juju integrate tempo-k8s:tracing loki:tracing
-juju integrate tempo-k8s:tracing prometheus:tracing
-juju integrate tempo-k8s:tracing traefik:tracing
-```
 
 <a href="#heading--offer"><h2 id="heading--offer"> Offer interfaces </h2></a>
 
@@ -70,7 +54,7 @@ Next, offer interfaces for cross-model integrations from the model where Charmed
 To offer the Tempo integration, run
 
 ```shell
-juju offer tempo-k8s:tracing
+juju offer <tempo_coordinator_k8s_application_name>:tracing
 ```
 
 Then, switch to the Charmed MySQL K8s model, find the offers, and integrate (relate) with them:
@@ -86,41 +70,59 @@ Below is a sample output where `k8s` is the K8s controller name and `cos` is the
 
 ```shell
 Store  URL                            Access  Interfaces
-k8s    admin/cos.tempo-k8s            admin   tracing:tracing
+k8s    admin/cos.tempo                admin   tracing:tracing
 ```
 
 Next, consume this offer so that it is reachable from the current model:
 
 ```shell
-juju consume k8s:admin/cos.tempo-k8s
+juju consume k8s:admin/cos.tempo
+```
+
+<a href="#heading--consume"><h2 id="heading--consume"> Consume interfaces </h2></a>
+
+First, deploy [Grafana Agent K8s](https://charmhub.io/grafana-agent-k8s) from the `latest/edge` channel:
+
+```shell
+juju deploy grafana-agent-k8s --channel latest/edge
+``` 
+
+Then, integrate Grafana Agent K8s with the consumed interface from the previous section:
+
+```shell
+juju integrate grafana-agent-k8s:tracing tempo:tracing
 ```
 
-Relate Charmed MySQL K8s with the above consumed interface:
+Finally, integrate Charmed MySQL K8s with Grafana Agent K8s:
 
 ```shell
-juju integrate mysql-k8s:tracing tempo-k8s:tracing
+juju integrate mysql-k8s:tracing grafana-agent-k8s:tracing-provider
 ```
 
 Wait until the model settles. The following is an example of the `juju status --relations` on the Charmed MySQL K8s model:
 
 ```shell
 Model     Controller  Cloud/Region        Version  SLA          Timestamp
-database  k8s         microk8s/localhost  3.4.3    unsupported  18:28:07Z
+database  k8s         microk8s/localhost  3.5.4    unsupported  16:33:26Z
 
-SAAS       Status  Store  URL
-tempo-k8s  active  k8s    admin/cos.tempo-k8s
+SAAS   Status  Store       URL
+tempo  active  k8s         admin/cos.tempo
 
-App        Version                  Status  Scale  Charm      Channel   Rev  Address        Exposed  Message
-mysql-k8s  8.0.36-0ubuntu0.22.04.1  active      1  mysql-k8s  8.0/edge  150  10.152.183.17  no       
+App                Version                  Status  Scale  Charm              Channel      Rev  Address         Exposed  Message
+grafana-agent-k8s  0.40.4                   active      1  grafana-agent-k8s  latest/edge   93  10.152.183.63   no       grafana-dashboards-provider: off, logging-consumer: off, send-remote-write: off
+mysql-k8s          8.0.37-0ubuntu0.22.04.3  active      1  mysql-k8s                         0  10.152.183.135  no       Primary
 
-Unit          Workload  Agent  Address       Ports  Message
-mysql-k8s/0*  active    idle   10.1.241.207         Primary
+Unit                  Workload  Agent      Address       Ports  Message
+grafana-agent-k8s/0*  active    idle       10.1.241.255         grafana-dashboards-provider: off, logging-consumer: off, send-remote-write: off
+mysql-k8s/0*          active    executing  10.1.241.253         Primary
 
-Integration provider      Requirer                  Interface    Type     Message
-mysql-k8s:database-peers  mysql-k8s:database-peers  mysql_peers  peer     
-mysql-k8s:restart         mysql-k8s:restart         rolling_op   peer     
-mysql-k8s:upgrade         mysql-k8s:upgrade         upgrade      peer     
-tempo-k8s:tracing         mysql-k8s:tracing         tracing      regular  
+Integration provider                Requirer                   Interface              Type     Message
+grafana-agent-k8s:peers             grafana-agent-k8s:peers    grafana_agent_replica  peer     
+grafana-agent-k8s:tracing-provider  mysql-k8s:tracing          tracing                regular  
+mysql-k8s:database-peers            mysql-k8s:database-peers   mysql_peers            peer     
+mysql-k8s:restart                   mysql-k8s:restart          rolling_op             peer     
+mysql-k8s:upgrade                   mysql-k8s:upgrade          upgrade                peer     
+tempo:tracing                       grafana-agent-k8s:tracing  tracing                regular  
 
 ```
 
@@ -136,4 +138,4 @@ Below is a screenshot demonstrating a Charmed MySQL trace:
 
 ![Example MySQL K8s trace with Grafana Tempo|690x382](upload://g5fWq9uz5UM2XXQFTPdeLLSeQHA.jpeg)
 
-Feel free to read through the [Tempo documentation](https://discourse.charmhub.io/t/tempo-k8s-docs-index/14005) at your leisure to explore its deployment and its integrations.
\ No newline at end of file
+Feel free to read through the [Tempo HA documentation](https://discourse.charmhub.io/t/charmed-tempo-ha/15531) at your leisure to explore its deployment and its integrations.
\ No newline at end of file
diff --git a/docs/how-to/h-external-access.md b/docs/how-to/h-external-access.md
new file mode 100644
index 000000000..b5dbbfcd0
--- /dev/null
+++ b/docs/how-to/h-external-access.md
@@ -0,0 +1,13 @@
+# How to connect DB from outside of Kubernetes
+
+## External K8s application (non-Juju)
+
+[u]Use case[/u]: the client application is a non-Juju application outside of DB K8s deployment.
+
+To connect the Charmed MySQL K8s database from outside of the Kubernetes cluster, the charm MySQL Router K8s should be deployed. Please follow the [MySQL Router K8s documentation](https://charmhub.io/mysql-router-k8s/docs/h-external-access).
+
+## External K8s relation (Juju)
+
+[u]Use case[/u]: the client application is a Juju application outside of DB K8s deployment (e.g. hybrid Juju deployment with mixed K8s and VM applications).
+
+In this case the the cross-hybrid-relation is necessary. Please [contact](/t/11868) Data team to discuss the possible option for your use case.
\ No newline at end of file
diff --git a/docs/how-to/h-integrate-db-with-your-charm.md b/docs/how-to/h-integrate-db-with-your-charm.md
deleted file mode 100644
index 283424181..000000000
--- a/docs/how-to/h-integrate-db-with-your-charm.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# How to integrate DB with my charm
-
-Please check [the supported interfaces](/t/10249) of the current charm first. You have options to use modern (preferred) or legacy interfaces. Make sure you are familiar with [Juju integration concepts](https://juju.is/docs/juju/integration).
-
-The most existing charms currently use [ops-lib-mysql](https://github.com/canonical/ops-lib-mysql) interface (legacy). Canonical recommends for new charms to use [data-platform-libs](https://github.com/canonical/data-platform-libs) instead. You can take a look at [mysql-test-app](https://github.com/canonical/mysql-test-app) for more practical examples. Consider to [read the great manual about the charm development](https://juju.is/docs/sdk/integrate-your-charm-with-postgresql). The legacy charm details are well described [here](/t/11236).
-
-
-FAQ:
-* Q: Does the requirer need to set anything in relation data?<br/>A: it depends on the interface. Check the `mysql_client` [interface requirements](https://github.com/canonical/charm-relation-interfaces/blob/main/interfaces/mysql_client/v0/README.md).
-* Q: Is there a charm library available, or does my charm need to compile the mysql relation data on its own?<br/>A: Yes, the library is available: [data-platform-libs](https://github.com/canonical/data-platform-libs).
-* Q: How do I obtain the database url/uri?<br/>A: [it is planned](https://warthogs.atlassian.net/browse/DPE-2278), but currently missing. Meanwhile use [PostgreSQL as an example](https://charmhub.io/postgresql-k8s/docs/h-develop-mycharm).
-
-Troubleshooting:
-* Please start with [Juju troubleshooting guide](https://juju.is/docs/sdk/debug-a-charm).
-* Check Charmed MySQL K8s [troubleshooting hints](/t/11886).
-
-[Contact us](/t/11868) if you have any questions, issues and/or ideas!
\ No newline at end of file
diff --git a/docs/how-to/h-integrate-intro.md b/docs/how-to/h-integrate-intro.md
deleted file mode 100644
index 51e38861d..000000000
--- a/docs/how-to/h-integrate-intro.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# How-to develop the Charm 
-
-Canonical welcomes the contribution to all open-source Juju charms, please follow [the contribution guide](https://github.com/canonical/mysql-k8s-operator/blob/main/CONTRIBUTING.md) to improve Charmed MySQL K8s. 
-
-The '[Reference](https://charmhub.io/mysql-k8s)' section contains information about:
-* [Requirements](/t/11421)
-* [Testing](/t/11772)
-
-Please also find and check topic-specific documents in section "How-to: Develop":
-* [How to integrate MySQL with my charm](/t/11885)
-* How to migrate MySQL data using: [mysqldump](/t/11992), [mydumper](/t/12006) or [backup/restore](/t/12007)
-* [Troubleshooting](/t/11886)
-* [Legacy charm details](/t/11236)
\ No newline at end of file
diff --git a/docs/how-to/h-integrate.md b/docs/how-to/h-integrate.md
new file mode 100644
index 000000000..b18514603
--- /dev/null
+++ b/docs/how-to/h-integrate.md
@@ -0,0 +1,98 @@
+[note]
+**Note**: All commands are written for `juju >= v.3.0`
+
+If you are using an earlier version, check the [Juju 3.0 Release Notes](https://juju.is/docs/juju/roadmap#heading--juju-3-0-0---22-oct-2022).
+[/note]
+
+# How to integrate with another application
+
+[Integrations](https://juju.is/docs/juju/relation) (formerly “relations”) are connections between two applications with compatible endpoints. These connections simplify the creation and management of users, passwords, and other shared data.
+
+This guide shows how to integrate Charmed MySQL K8s with both charmed and non-charmed applications.
+
+> For developer information about how to integrate your own charmed application with MySQL K8s, see [Development > How to integrate with your charm](/t/11885).
+
+## Summary
+* [Integrate with a charmed application](#integrate-with-a-charmed-application)
+  * [Modern interface](#modern-interface)
+  * [Legacy interface](#legacy-interface)
+* [Integrate with a non-charmed application](#integrate-with-a-non-charmed-application)
+* [Rotate application passwords](#rotate-application-passwords)
+
+---
+
+## Integrate with a charmed application
+
+Integrations with charmed applications are supported via the [`mysql_client`](https://github.com/canonical/charm-relation-interfaces/blob/main/interfaces/mysql_client/v0/README.md) interface, and the legacy `mysql` interface.
+
+### Modern `mysql_client` interface
+
+To integrate with a charmed application that supports the `mysql_client` interface, run
+```shell
+juju integrate mysql-k8s <charm>
+```
+
+To remove the integration, run
+```shell
+juju remove-relation mysql-k8s <charm>
+```
+
+### Legacy `mysql` interface
+[note type="caution"]
+Note that this interface is **deprecated**.
+See more information in [Explanation > Legacy charm](/t/11236).
+[/note]
+
+To integrate via the legacy interface, run
+ ```shell
+juju integrate mysql-k8s:mysql <charm>
+```
+
+Extended permissions can be requested using `mysql-root` endpoint:
+```shell
+juju integrate mysql-k8s:mysql-root <charm>
+```
+
+## Integrate with a non-charmed application
+
+To integrate with an application outside of Juju, you must use the [`data-integrator` charm](https://charmhub.io/data-integrator) to create the required credentials and endpoints.
+
+Deploy `data-integrator`:
+```shell
+juju deploy data-integrator --config database-name=<name>
+```
+
+Integrate with MySQL:
+```shell
+juju integrate data-integrator mysql-k8s
+```
+
+Use the `get-credentials` action to retrieve credentials from `data-integrator`:
+```shell
+juju run data-integrator/leader get-credentials
+```
+
+## Rotate applications password
+
+To rotate the passwords of users created for related applications, the relation should be removed and related again. That process will generate a new user and password for the application.
+
+```shell
+juju remove-relation <charm> mysql-k8s
+juju integrate <charm> mysql-k8s
+```
+
+### Internal operator user
+
+The operator user is used internally by the Charmed MySQL K8s application. The `set-password` action can be used to rotate its password.
+
+To set a specific password for the `operator` user, run
+
+```shell
+juju run mysql-k8s/leader set-password password=<password>
+```
+
+To randomly generate a password for the `operator` user, run
+
+```shell
+juju run mysql-k8s/leader set-password
+```
\ No newline at end of file
diff --git a/docs/how-to/h-manage-applications.md b/docs/how-to/h-manage-applications.md
deleted file mode 100644
index d79648e3d..000000000
--- a/docs/how-to/h-manage-applications.md
+++ /dev/null
@@ -1,54 +0,0 @@
-# How to manage related applications
-
-## Modern `mysql_client` interface:
-
-Relations to new applications are supported via the "[mysql_client](https://github.com/canonical/charm-relation-interfaces/blob/main/interfaces/mysql_client/v0/README.md)" interface. To create a relation:
-
-```shell
-juju relate mysql-k8s application
-```
-
-To remove a relation:
-
-```shell
-juju remove-relation mysql-k8s application
-```
-
-## Legacy `mysql` interface:
-
-This charm also supports the legacy relation via the `mysql` interface. Please note that these interface is deprecated.
-
- ```shell
-juju relate mysql-k8s:mysql wordpress-k8s
-```
-
-Also extended permissions can be requested using `mysql-root` endpoint:
-```shell
-juju relate mysql-k8s:mysql-root wordpress-k8s
-```
-
-
-## Rotate applications password
-
-To rotate the passwords of users created for related applications, the relation should be removed and related again. That process will generate a new user and password for the application, while retaining the requested database and data.
-
-```shell
-juju remove-relation application mysql-k8s
-juju add-relation application mysql-k8s
-```
-
-### Internal operator user
-
-The operator user is used internally by the Charmed MySQL Operator, the `set-password` action can be used to rotate its password.
-
-* To set a specific password for the operator user
-
-```shell
-juju run-action mysql-k8s/leader set-password password=<password> --wait
-```
-
-* To randomly generate a password for the operator user
-
-```shell
-juju run-action mysql-k8s/leader set-password --wait
-```
\ No newline at end of file
diff --git a/docs/how-to/h-migrate-cluster.md b/docs/how-to/h-migrate-cluster.md
index b9bfcb2e0..3860cf04e 100644
--- a/docs/how-to/h-migrate-cluster.md
+++ b/docs/how-to/h-migrate-cluster.md
@@ -1,7 +1,16 @@
-# How to restore foreign backup
+[note]
+**Note**: All commands are written for `juju >= v.3.0`
 
-This is a How-To for restoring a backup that was made from the a *different* cluster, (i.e. cluster migration via restore). To perform a basic restore please reference the [Restore How-To](/t/charmed-mysql-k8s-how-to-restore-backup/9663)
+If you are using an earlier version, check the [Juju 3.0 Release Notes](https://juju.is/docs/juju/roadmap#heading--juju-3-0-0---22-oct-2022).
+[/note]
 
+# How to migrate a cluster
+
+This is a guide on how to restore a backup that was made from a different cluster, (i.e. cluster migration via restore).
+
+To perform a basic restore please reference the [Restore How-To](/t/charmed-mysql-k8s-how-to-restore-backup/9663).
+
+## Prerequisites
 Restoring a backup from a previous cluster to a current cluster requires that you:
 - Have a single unit Charmed MySQL deployed and running
 - Access to S3 storage
@@ -9,16 +18,20 @@ Restoring a backup from a previous cluster to a current cluster requires that yo
 - Have the backups from the previous cluster in your S3-storage
 - Have the passwords from your previous cluster
 
+---
+
+<a href="#heading--manage-cluster-passwords"><h2 id="heading--manage-cluster-passwords">Manage cluster passwords</h2></a>
 When you restore a backup from an old cluster, it will restore the password from the previous cluster to your current cluster. Set the password of your current cluster to the previous cluster’s password:
 ```shell
-juju run-action mysql-k8s/leader set-password username=root password=<previous cluster password> --wait
-juju run-action mysql-k8s/leader set-password username=clusteradmin password=<previous cluster password> --wait
-juju run-action mysql-k8s/leader set-password username=serverconfig password=<previous cluster password> --wait
+juju run mysql-k8s/leader set-password username=root password=<previous cluster password>
+juju run mysql-k8s/leader set-password username=clusteradmin password=<previous cluster password>
+juju run mysql-k8s/leader set-password username=serverconfig password=<previous cluster password>
 ```
 
+## List backups
 To view the available backups to restore you can enter the command `list-backups`:
 ```shell
-juju run-action mysql-k8s/leader list-backups --wait
+juju run mysql-k8s/leader list-backups
 ```
 
 This shows a list of the available backups (it is up to you to identify which `backup-id` corresponds to the previous-cluster):
@@ -29,9 +42,10 @@ This shows a list of the available backups (it is up to you to identify which `b
       YYYY-MM-DDTHH:MM:SSZ  | physical     | finished
 ```
 
+## Restore backup
 To restore your current cluster to the state of the previous cluster, run the `restore` command and pass the correct `backup-id` to the command:
  ```shell
-juju run-action mysql-k8s/leader restore backup-id=YYYY-MM-DDTHH:MM:SSZ --wait
+juju run mysql-k8s/leader restore backup-id=YYYY-MM-DDTHH:MM:SSZ
 ```
 
 Your restore will then be in progress, once it is complete your cluster will represent the state of the previous cluster.
\ No newline at end of file
diff --git a/docs/how-to/h-migrate-mysqldump.md b/docs/how-to/h-migrate-mysqldump.md
index ee9aa3403..c6a76902c 100644
--- a/docs/how-to/h-migrate-mysqldump.md
+++ b/docs/how-to/h-migrate-mysqldump.md
@@ -1,6 +1,14 @@
-# DB data migration using 'mysqldump'
+[note]
+**Note**: All commands are written for `juju >= v.3.0`
 
-> :information_source: **NOTE**: This document describes DB **data** migration only!<br/>Use [separate manual](/t/11885) to migrate charm on new juju interfaces, etc.
+For more information, check the [Juju 3.0 Release Notes](https://juju.is/docs/juju/roadmap#heading--juju-3-0-0---22-oct-2022).
+[/note]
+
+# Migrate database data via `mysqldump`
+
+This document describes database **data** migration only!
+
+> For information about integrating charms via juju interfaces, see [How to integrate a database with my charm](/t/11885).
 
 The list of MariaDB/MySQL **legacy VM charms**:
 
@@ -21,7 +29,7 @@ The minor difference in commands necessary for each of the legacy charm, but the
 
 Before the data migration check all [limitations of the modern Charmed MySQL K8s](/t/11421#mysql-gr-limits) charm!<br/>Please check [your application compatibility](/t/11236) with Charmed MySQL K8s before migrating production data from legacy charm!
 
-> :warning: **Tip**: Always test migration in LAB before performing it in Production!
+> :warning: Always perform the migration in a test environment before performing it in production!
 
 ## Do you need to migrate?
 
@@ -32,15 +40,24 @@ A database migration is only required if the output of the following command is
 DB_CHARM= < mydb | charmed-osm-mariadb-k8s >
 juju show-application ${DB_CHARM} | yq '.[] | .charm'
 ```
-> :warning: **Tip**: No migration necessary if the output above is `mysql-k8s`! Still, this manual can be used to copy data between different installations of the same (modern) charm `mysql`, however the [backup/restore](/t/9653) is recommended for migrations between modern charms.
+[note type=caution]
+No migration is necessary if the output above is `mysql-k8s`! 
+
+Still, this manual can be used to copy data between different installations of the same (modern) charm `mysql-k8s`. The [backup/restore method](/t/9653) is recommended for migrations between modern charms.
+[/note]
 
 ## Prerequisites
 
 - Client machine with access to deployed legacy charm
-- Juju version 2.9  (check the [Juju tech details](/t/11984) for the different Juju versions)
+- Juju version 2.9+  (check the [Juju tech details](/t/11984) for the different Juju versions)
 - Enough storage in the cluster to support backup/restore of the databases.
 - `mysql-client` on client machine (install by running `sudo apt install mysql-client`).
-> :warning: **WARNING**:  the most legacy DB charms support old Ubuntu series only, while Juju 3.x does [NOT support](https://discourse.charmhub.io/t/roadmap-releases/5064#heading--juju-3-0-0---22-oct-2022) Ubuntu `bionic`. The migration to the new charm recommended in Juju 2.9.x!
+
+[note type=caution]
+Most legacy database charms support old Ubuntu series only, while Juju 3.x does [NOT support](https://discourse.charmhub.io/t/roadmap-releases/5064#heading--juju-3-0-0---22-oct-2022) Ubuntu Bionic.
+
+It is recommended to use the latest stable revision of the charm on Ubuntu Jammy and Juju 3.x
+[/note]
 
 ## Obtain existing database credentials
 
@@ -112,14 +129,14 @@ mysql \
   < "${OLD_DB_DUMP}"
 ```
 
-## Relate to modern charm 
+## Integrate with modern charm
 
 ```shell
-# relate your application and new MySQL database charm (using modern `database` endpoint)
-juju relate <your_application> mysql-k8s:database
+# integrate your application and new MySQL database charm (using modern `database` endpoint)
+juju integrate <your_application> mysql-k8s:database
 
 # IF `database` endpoint (mysql_client interface) is not yes supported, use legacy `mysql` interface: 
-juju relate <your_application> mysql-k8s:mysql
+juju integrate <your_application> mysql-k8s:mysql
 ```
 
 ## Verify DB migration
@@ -192,5 +209,6 @@ juju remove-application --destroy-storage < mydb | charmed-osm-mariadb-k8s >
 
 ## Links
 
-DB data migration is also possible using [mydumper](/t/12006).
+Database data migration is also possible using [`mydumper`](/t/12006).
+
 > :tipping_hand_man: This manual based on [Kubeflow DB migration guide](https://github.com/canonical/bundle-kubeflow/blob/main/docs/db-migration-guide.md).
\ No newline at end of file
diff --git a/docs/how-to/h-restore-backup.md b/docs/how-to/h-restore-backup.md
index d555e2991..30019c23b 100644
--- a/docs/how-to/h-restore-backup.md
+++ b/docs/how-to/h-restore-backup.md
@@ -1,17 +1,33 @@
+[note]
+**Note**: All commands are written for `juju >= v3.0`
+
+If you are using an earlier version, check the [Juju 3.0 Release Notes](https://juju.is/docs/juju/roadmap#heading--juju-3-0-0---22-oct-2022).
+[/note]
+
 # How to restore backup
 
 This is a How-To for performing a basic restore (restoring a locally made backup).
 To restore a backup that was made from the a *different* cluster, (i.e. cluster migration via restore), please reference the [Cluster Migration via Restore How-To](/t/charmed-mysql-k8s-how-to-migrate-cluster-via-restore/9661):
 
-Restoring from a backup requires that you:
+## Prerequisites
+
 - [Scale-down to the single MySQL unit (scale it up after the backup is restored).](/t/charmed-mysql-k8s-how-to-manage-units/9659)
 - Access to S3 storage
 - [Have configured settings for S3 storage](/t/charmed-mysql-k8s-how-to-configure-s3/9651)
 - [Have existing backups in your S3-storage](/t/charmed-mysql-k8s-how-to-create-and-list-backups/9653)
 
+## Summary
+
+* [List backups](#list-backups)
+* [Restore backup](#restore-backup)
+
+---
+
+## List backups
+
 To view the available backups to restore you can enter the command `list-backups`:
 ```shell
-juju run-action mysql-k8s/leader list-backups --wait
+juju run mysql-k8s/leader list-backups
 ```
 
 This should show your available backups
@@ -22,9 +38,11 @@ This should show your available backups
       YYYY-MM-DDTHH:MM:SSZ  | physical     | finished
 ```
 
+## Restore backup
+
 To restore a backup from that list, run the `restore` command and pass the `backup-id` to restore:
  ```shell
-juju run-action mysql-k8s/leader restore backup-id=YYYY-MM-DDTHH:MM:SSZ --wait
+juju run mysql-k8s/leader restore backup-id=YYYY-MM-DDTHH:MM:SSZ
 ```
 
 Your restore will then be in progress.
\ No newline at end of file
diff --git a/docs/how-to/h-rollback-major.md b/docs/how-to/h-rollback-major.md
deleted file mode 100644
index 8c7ed57f0..000000000
--- a/docs/how-to/h-rollback-major.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# Major Rollback
-
-> :information_source: **Example**: MySQL 9.0 -> MySQL 8.0
-
-Currently, the charm supports MySQL 8.0 only; therefore, minor rollbacks are only possible. Canonical is NOT planning to support in-place rollbacks for the major MySQL version change as the old MySQL cluster installation will stay nearby and can be reused for the rollback.
\ No newline at end of file
diff --git a/docs/how-to/h-rollback-minor.md b/docs/how-to/h-rollback-minor.md
index 737d9b9db..5f87aa8cc 100644
--- a/docs/how-to/h-rollback-minor.md
+++ b/docs/how-to/h-rollback-minor.md
@@ -1,62 +1,69 @@
-# Minor Rollback
+[note]
+**Note**: All commands are written for `juju >= v.3.0`
 
-> :information_source: **Example**: MySQL 8.0.34 -> MySQL 8.0.33<br/>
-(including simple charm revision bump: from revision 43 to revision 42)
+If you are using an earlier version, check the [Juju 3.0 Release Notes](https://juju.is/docs/juju/roadmap#heading--juju-3-0-0---22-oct-2022).
+[/note]
 
-> **:warning: WARNING**: do NOT trigger `rollback` during the running `upgrade` action! It may cause unpredictable MySQL Cluster state!
+# Perform a minor rollback
+**Example**: MySQL 8.0.34 -> MySQL 8.0.33<br/>
+(including charm revision bump: e.g Revision 43 -> Revision 42)
 
-## Minor rollback steps
+After a `juju refresh`, if there are any version incompatibilities in charm revisions, its dependencies, or any other unexpected failure in the upgrade process, the process will be halted and enter a failure state.
 
-1. **Prepare** "Charmed MySQL K8s" Juju application for the in-place rollback. See the step description below for all technical details executed by charm here.
-2. **Rollback**. Perform the first charm rollback, the first unit only. The unit with the maximal ordinal will be chosen.
-3. **Resume**. Continue rollback to all other units if the first unit rolled-back successfully.
-4. **Check**. Make sure the charm and cluster are in healthy state again.
-
-## Manual Rollback
+Even if the underlying MySQL cluster continue to work, it’s important to roll back the charm to a previous revision so that an update can be attempted after further inspection of the failure.
 
-After a `juju refresh`, case there any version incompatibilities in charm revisions or it dependencies, or any other unexpected failure in the upgrade process, the upgrade process will be halted an enter a failure state.
+[note type="caution"]
+**Warning:** Do NOT trigger `rollback` during the running `upgrade` action! It may cause an  unpredictable MySQL cluster state!
+[/note]
 
-Although the underlying MySQL Cluster continue to work, it’s important to rollback the charm to previous revision so an update can be later attempted after a further inspection of the failure.
-
-To execute a rollback we take the same procedure as the upgrade, the difference being the charm revision to upgrade to. In case of this tutorial example, one would refresh the charm back to revision `88`, the steps being:
+## Summary of the rollback steps
+1. **Prepare** the Charmed MySQL K8s application for the in-place rollback.
+2. **Rollback**. Perform the first charm rollback on the first unit only. The unit with the maximal ordinal will be chosen.
+3. **Resume**. Continue rolling back the rest of the units if the first unit rolled back successfully.
+4. **Check**. Make sure the charm and cluster are in healthy state again.
 
 ## Step 1: Prepare
 
-It is necessary to re-run `pre-upgrade-check` action on the leader unit, to enter the upgrade recovery state:
+To execute a rollback, we use a similar procedure to the upgrade. The difference is the charm revision to upgrade to. In this guide's example, we will refresh the charm back to revision `88`.
 
-```
+It is necessary to re-run `pre-upgrade-check` action on the leader unit, to enter the upgrade recovery state:
+```shell
 juju run mysql-k8s/leader pre-upgrade-check
 ```
 
 ## Step 2: Rollback
 
 When using charm from charmhub:
-
-```
+```shell
 juju refresh mysql-k8s --revision=88
 ```
 
-Case deploying from local charm file, one need to have the previous revision charm file and the `mysql-image` resource, then run:
-
+When deploying from a local charm file, one must have the previous revision charm file and the `mysql-image` resource, then run:
+```shell
+juju refresh mysql-k8s --path=<path to charm file> --resource mysql-image=<image URL>
 ```
+For example:
+```shell
 juju refresh mysql-k8s --path=./mysql-k8s_ubuntu-22.04-amd64.charm \
        --resource mysql-image=ghcr.io/canonical/charmed-mysql@sha256:753477ce39712221f008955b746fcf01a215785a215fe3de56f525380d14ad97
 ```
+> where `mysql-k8s_ubuntu-22.04-amd64.charm` is the previous revision charm file. 
 
-Where `mysql-k8s_ubuntu-22.04-amd64.charm` is the previous revision charm file. The reference for the resource for a given revision can be found at the `metadata.yaml` file in the [charm repository](https://github.com/canonical/mysql-k8s-operator/blob/e4beca6b34313a977eab5ab2c74fa43586f1154c/metadata.yaml#L35).
+The reference for the resource for a given revision can be found in the [`metadata.yaml`](https://github.com/canonical/mysql-k8s-operator/blob/e4beca6b34313a977eab5ab2c74fa43586f1154c/metadata.yaml) file in the charm's repository under the key `upstream-source`.
 
-The first unit will be rolled out and should rejoin the cluster after settling down. After the refresh command, the juju controller revision for the application will be back in sync with the running Charmed MySQL K8s revision.
+The first unit will be rolled out and should rejoin the cluster after settling down. After the `refresh` command, the juju controller revision for the application will be back in sync with the running Charmed MySQL K8s revision.
 
 ## Step 3: Resume
 
-There still a need to resume the upgrade on the remaining units, which is done with the `resume-upgrade` action.
-
+To resume the upgrade on the remaining units use the `resume-upgrade` action:
 ```shell
-juju run-action mysql-k8s/leader resume-upgrade --wait
+juju run mysql-k8s/leader resume-upgrade
 ```
 
-This will rollout the Pods in the remaining units, but to the same charm revision.
+This will roll out the pods in the remaining units to the same charm revision.
 
 ## Step 4: Check
 
-The future [improvement is planned](https://warthogs.atlassian.net/browse/DPE-2620) to check the state on pod/cluster on a low level. At the moment check `juju status` to make sure the cluster [state](/t/11866) is OK.
\ No newline at end of file
+Future [improvements are planned](https://warthogs.atlassian.net/browse/DPE-2621) to check the state on pods/clusters on a low level. 
+
+For now, check `juju status` to make sure the cluster [state](/t/11866) is OK.
\ No newline at end of file
diff --git a/docs/how-to/h-scale.md b/docs/how-to/h-scale.md
index d163ef3ef..4e5b8f97e 100644
--- a/docs/how-to/h-scale.md
+++ b/docs/how-to/h-scale.md
@@ -1,35 +1,52 @@
-# How to deploy and manage units
+[note]
+**Note**: All commands are written for `juju >= v.3.0`
 
-## Basic Usage
+If you are using an earlier version, check the [Juju 3.0 Release Notes](https://juju.is/docs/juju/roadmap#heading--juju-3-0-0---22-oct-2022).
+[/note]
 
-To deploy a single unit of MySQL using its default configuration
-```shell
-juju deploy mysql-k8s --channel 8.0 --trust
-```
+# How to scale units
+
+Replication in MySQL is the process of creating copies of the stored data. This provides redundancy, which means the application can provide self-healing capabilities in case one replica fails. In this context, each replica is equivalent to one juju unit.
+
+This guide will show you how to establish and change the amount of juju units used to replicate your data. 
+
+## Deploy MySQL K8s with replicas
 
-It is customary to use MySQL with replication. Hence usually more than one unit (preferably an odd number to prohibit a "split-brain" scenario) is deployed. To deploy MySQL with multiple replicas, specify the number of desired units with the `-n` option.
+To deploy MySQL K8s with multiple replicas, specify the number of desired units with the `-n` option.
 ```shell
 juju deploy mysql-k8s --channel 8.0 --trust -n <number_of_replicas>
 ```
+> It is recommended to use an odd number to prevent a [split-brain](https://en.wikipedia.org/wiki/Split-brain_(computing)) scenario.
 
-To retrieve primary replica one can use the action `get-primary` on any of the units running MySQL:
+### Primary vs. leader unit 
+
+The MySQL primary server unit may or may not be the same as the [juju leader unit](https://juju.is/docs/juju/leader).
+
+The juju leader unit is the represented in `juju status` by an asterisk (*) next to its name. 
+
+To retrieve the juju unit that corresponds to the MySQL K8s primary, use the action `get-primary` on any of the units running ` mysql-k8s`:
 ```shell
-juju run-action mysql-k8s/leader get-primary --wait
+juju run mysql-k8s/leader get-primary
 ```
 
-Similarly, the primary replica is displayed as a status message in `juju status`, however one should note that this hook gets called on regular time intervals and the primary may be outdated if the status hook has not been called recently.
+Similarly, the primary replica is displayed as a status message in `juju status`. However, one should note that this hook gets called on regular time intervals and the primary may be outdated if the status hook has not been called recently.
 
-Further we highly suggest configuring the status hook to run frequently. In addition to reporting the primary, secondaries, and other statuses, the status hook performs self healing in the case of a network cut. To change the frequency of the update status hook do:
+[note]
+**We highly suggest configuring the `update-status` hook to run frequently.** In addition to reporting the primary, secondaries, and other statuses, the [status hook](https://juju.is/docs/sdk/update-status-event) performs self-healing in the case of a network cut. 
+
+To change the frequency of the `update-status` hook, run
 ```shell
 juju model-config update-status-hook-interval=<time(s/m/h)>
 ```
-Note that this hook executes a read query to MySQL. On a production level server this should be configured to occur at a frequency that doesn't overload the server with read requests. Similarly the hook should not be configured at too quick of a frequency as this can delay other hooks from running. You can read more about status hooks [here](https://juju.is/docs/sdk/update-status-event).
-
-## Replication
+<!--Note that this hook executes a read query to PostgreSQL. On a production level server, this should be configured to occur at a frequency that doesn't overload the server with read requests. Similarly, the hook should not be configured at too quick of a frequency, as this can delay other hooks from running. -->
+[/note]
 
-Both scaling-up and scaling-down operations are performed using `juju scale-application`:
+## Scale replicas on an existing application
+Both scaling-up and scaling-down operations are performed using `juju scale-application` and specifying the total amount of units you want to have in the cluster:
 ```shell
-juju scale-application mysql-k8s <desired_num_of_units>
+juju scale-application mysql-k8s <total number of units>
 ```
 
-> **:warning: Warning**: be careful with removing all units! It can destroy your data ([Juju storage provider](https://juju.is/docs/juju/storage-provider) dependent)!
\ No newline at end of file
+[note type=negative]
+**Warning**: Do not remove the last unit, it will destroy your data!
+[/note]
\ No newline at end of file
diff --git a/docs/how-to/h-upgrade-intro.md b/docs/how-to/h-upgrade-intro.md
deleted file mode 100644
index ac75102d8..000000000
--- a/docs/how-to/h-upgrade-intro.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# Charmed MySQL K8s Upgrade
-
-Please choose the appropriate upgrade/rollback tutorial.
-
-Migration:
-
-* [Major upgrade](/t/11750), e.g. MySQL 8.0 -> MySQL 9.0.
-
-* [Major rollback](/t/11751), e.g. MySQL 9.0 -> MySQL 8.0.
-
-In-place minor upgrade:
-
-* [Minor upgrade](/t/11752), e.g. MySQL 8.0.33 -> MySQL 8.0.34<br/>
-(including charm revision bump 99 -> 102).
-
-* [Minor rollback](/t/11753), e.g. MySQL 8.0.34 -> MySQL 8.0.33<br/>
-(including charm revision return 102 -> 99).
\ No newline at end of file
diff --git a/docs/how-to/h-upgrade-major.md b/docs/how-to/h-upgrade-major.md
deleted file mode 100644
index 8b83607f0..000000000
--- a/docs/how-to/h-upgrade-major.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# Major Upgrade
-
-> :information_source: **Example**: MySQL 8.0 -> MySQL 9.0
-
-Currently, the charm supports MySQL 8.0 only; therefore, minor upgrades are only possible. The support of the following [MySQL LTS releases](https://blogs.oracle.com/mysql/post/introducing-mysql-innovation-and-longterm-support-lts-versions) is planned. Canonical is NOT planning to support in-place upgrades for the Major version change. The new MySQL cluster will have to be installed nearby, and the data will be copied from the old to the new installation. After announcing the next MySQL major version support, the appropriate manual will be published here.
\ No newline at end of file
diff --git a/docs/how-to/h-upgrade-minor.md b/docs/how-to/h-upgrade-minor.md
index d346f8145..41190b7cd 100644
--- a/docs/how-to/h-upgrade-minor.md
+++ b/docs/how-to/h-upgrade-minor.md
@@ -4,14 +4,14 @@
 If you are using an earlier version, check the [Juju 3.0 Release Notes](https://juju.is/docs/juju/roadmap#heading--juju-3-0-0---22-oct-2022).
 [/note]
 
-# Minor Upgrade
+# Perform a minor upgrade
 
-> :information_source: **Example**: MySQL 8.0.33 -> MySQL 8.0.34<br/>
-(including simple charm revision bump: from revision 99 to revision 102)
+**Example**: MySQL 8.0.33 -> MySQL 8.0.34<br/>
+(including charm revision bump: e.g. Revision 193 -> Revision 196)
 
-This is part of the [Charmed MySQL K8s Upgrade](/t/11754). Please refer to this page for more information and the overview of the content.
+This is part of the [Upgrade section](/t/11754). Refer to the landing page for more information and an overview of the content.
 
-We strongly recommend to **NOT** perform any other extraordinary operations on Charmed MySQL K8s cluster, while upgrading. As an examples, these may be (but not limited to) the following:
+We strongly recommend to **NOT** perform any other extraordinary operations on a Charmed MySQL K8s cluster, while upgrading. These may be (but not limited to) the following:
 
 1. Adding or removing units
 2. Creating or destroying new relations
@@ -20,26 +20,33 @@ We strongly recommend to **NOT** perform any other extraordinary operations on C
 
 The concurrency with other operations is not supported, and it can lead the cluster into inconsistent states.
 
-> **:warning: NOTE:** Make sure to have a [backup](/t/9653) of your data when running any type of upgrades.
+[note type=caution]
+Make sure to have a backup of your data when running any type of upgrades!
+See: [How to create a backup](/t/9653)
+[/note]
 
-> **:information_source: TIP:** It’s recommended to deploy your application in conjunction with the [Charmed MySQL Router K8s](https://charmhub.io/mysql-router-k8s). This will ensure minimal service disruption, if any.
+It is recommended to deploy your application in conjunction with [Charmed MySQL Router K8s](https://charmhub.io/mysql-router-k8s). This will ensure minimal service disruption, if any.
 
-## Minor upgrade steps
+## Summary of the upgrade steps
 
-1. **Collect** all necessary pre-upgrade information. It will be necessary for the rollback (if requested). Do NOT skip this step, it is better safe the sorry!
-2. (optional) **Scale-up**. The new unit will be the first one to be updated, and it will simplify the rollback procedure a lot in case of the upgrade failure.
-3. **Prepare** "Charmed MySQL" Juju application for the in-place upgrade. See the step description below for all technical details executed by charm here.
-4. **Upgrade** (phase 1). Once started, only one unit in a cluster will be upgraded. In case of failure, the rollback is simple: remove newly added pod (in step 2).
-5. **Resume** upgrade (phase 2). If the new pod is OK after the refresh, the upgrade can be resumed for all other units in the cluster. All units in a cluster will be executed sequentially: from biggest ordinal to the lowest one.
-6. (optional) Consider to [**Rollback**](/t/11749) in case of disaster. Please inform and include us in your case scenario troubleshooting to trace the source of the issue and prevent it in the future. [Contact us](https://chat.charmhub.io/charmhub/channels/data-platform)!
-7. (optional) **Scale-back**. Remove no longer necessary K8s pod created in step 2 (if any).
-8. Post-upgrade **Check**. Make sure all units are in the proper state and the cluster is healthy.
+1. [**Collect**](#step-1-collect) all necessary pre-upgrade information. It will be necessary for the rollback (if requested). Do not skip this step!
+2. [**Scale-up** (optional)](#step-2-scale-up-optional). The new unit will be the first one to be updated, and it will simplify the rollback procedure a lot in case of the upgrade failure.
+3. [**Prepare**](#step-3-prepare) the Charmed MySQL K8s application for the in-place upgrade.
+4. [**Upgrade**](#step-4-upgrade). Once started, only one unit in a cluster will be upgraded. In case of failure, the rollback is simple: remove newly added pod (via [step 2](#step-2-scale-up-optional)).
+5. [**Resume** upgrade](#step-5-resume). If the new pod is OK after the refresh, the upgrade can be resumed for all other units in the cluster. All units in a cluster will be executed sequentially from the largest ordinal number to the lowest.
+6. Consider a [**rollback**](#step-4-rollback-optional) in case of disaster. Please inform and include us in your case scenario troubleshooting to trace the source of the issue and prevent it in the future. [Contact us](https://chat.charmhub.io/charmhub/channels/data-platform)!
+7. [**Scale-back** (optional)](#step-7-scale-back). Remove no longer necessary K8s pod created in step 2 (if any).
+8. [Post-upgrade **check**](#step-8-check). Make sure all units are in a healthy state.
 
 ## Step 1: Collect
 
-> **:information_source: NOTE:** The step is only valid when deploying from charmhub. If the [local charm](https://juju.is/docs/sdk/deploy-a-charm) deployed (revision is small, e.g. 0-10), make sure the proper/current local revision of the `.charm` file is available BEFORE going further. You might need it for rollback.
+[note]
+This step is only valid when deploying from [charmhub](https://charmhub.io/). 
+
+If a [local charm](https://juju.is/docs/sdk/deploy-a-charm) is deployed (revision is small, e.g. 0-10), make sure the proper/current local revision of the `.charm` file is available BEFORE going further. You might need it for a rollback.
+[/note]
 
-The first step is to record the revision of the running application, as a safety measure for a rollback action. To accomplish this, simply run the `juju status` command and look for the deployed Charmed MySQL revision in the command output, e.g.:
+The first step is to record the revision of the running application as a safety measure for a rollback action. To accomplish this, run the `juju status` command and look for the deployed Charmed MySQL K8s revision in the command output, e.g:
 
 ```shell
 Model      Controller  Cloud/Region        Version  SLA          Timestamp
@@ -54,19 +61,20 @@ mysql-k8s/1   active    idle   10.1.148.138
 mysql-k8s/2   active    idle   10.1.148.143
 ```
 
-For this example, the current revision is `88` . Store it safely to use in case of rollback!
+For this example, the current revision is `88`. Store it safely to use in case of rollback!
 
 ## Step 2: Scale-up (optional)
 
 Optionally, it is recommended to scale the application up by one unit before starting the upgrade process.
 
-The new unit will be the first one to be updated, and it will assert that the upgrade is possible. In case of failure, having the extra unit will ease the rollback procedure, without disrupting service. More on the [Minor rollback](/t/11753) tutorial.
+The new unit will be the first one to be updated, and it will assert that the upgrade is possible. In case of failure, having the extra unit will ease a future rollback procedure without disrupting service. 
 
 ```shell
-juju scale-application mysql-k8s <current_units_count+1>
+juju scale-application mysql-k8s <total number of units desired>
 ```
+> To scale up by 1 unit, `<total number of units desired>` would be the current number of units + 1 
 
-Wait for the new unit up and ready.
+Wait for the new unit to be ready.
 
 ## Step 3: Prepare
 
@@ -91,20 +99,20 @@ The action will configure the charm to minimize the amount of primary switchover
 
 ## Step 4: Upgrade
 
-Use the [`juju refresh`](https://juju.is/docs/juju/juju-refresh) command to trigger the charm upgrade process. If using juju version 3 or higher, it is necessary to add the `--trust` option.
+Use the [`juju refresh`](https://juju.is/docs/juju/juju-refresh) command to trigger the charm upgrade process.
 
+Example with channel selection
 ```shell
-# example with channel selection and juju 2.9.x
-juju refresh mysql-k8s --channel 8.0/edge
-
-# example with channel selection and juju 3.x
 juju refresh mysql-k8s --channel 8.0/edge --trust
 
-# example with specific revision selection (do NOT miss OCI resource!)
-juju refresh mysql-k8s --revision=89 --resource mysql-image=...
+Example with specific revision selection (do not forget the OCI resource)
+```shell
+juju refresh mysql-k8s --revision=89 --resource mysql-image=...  --trust
 ```
 
-> **:information_source: IMPORTANT:** The upgrade will execute only on the highest ordinal unit, for the running example `mysql-k8s/2`, the `juju status` will look like*:
+The upgrade will execute only on the highest ordinal unit.
+
+For the running example `mysql-k8s/2`, `juju status` would look similar to the output below:
 
 ```shell
 Model      Controller  Cloud/Region        Version  SLA          Timestamp
@@ -120,19 +128,25 @@ mysql-k8s/2   active       idle       10.1.148.143         other units upgrading
 mysql-k8s/3   maintenance  executing  10.1.148.145         upgrading unit
 ```
 
-> **:information_source: Note:** It is expected to have some status changes during the process: waiting, maintenance, active. Do NOT trigger `rollback` procedure during the running `upgrade` procedure. Make sure `upgrade` has failed/stopped and cannot be fixed/continued before triggering `rollback`!
+**Do NOT trigger `rollback` procedure during the running `upgrade` procedure.**
+It is expected to have some status changes during the process: `waiting`, `maintenance`, `active`. 
+
+Make sure `upgrade` has failed/stopped and cannot be fixed/continued before triggering `rollback`!
 
-> **:information_source: Note:** The unit should recover shortly after, but the time can vary depending on the amount of data written to the cluster while the unit was not part of the cluster. Please be patient on the huge installations.
+**Please be patient during huge installations.**
+Each unit should recover shortly after the upgrade, but time can vary depending on the amount of data written to the cluster while the unit was not part of it. 
 
 ## Step 5: Resume
 
-After the unit is upgraded, the charm will set the unit upgrade state as completed. If deemed necessary the user can further assert the success of the upgrade. Being the unit healthy within the cluster, the next step is to resume the upgrade process, by running:
+After the unit is upgraded, the charm will set the unit upgrade state as completed. 
+
+If the unit is healthy within the cluster, the next step is to resume the upgrade process by running:
 
 ```shell
 juju run mysql-k8s/leader resume-upgrade
 ```
 
-The `resume-upgrade` will rollout the upgrade for the following unit, always from highest from lowest, and for each successful upgraded unit, the process will rollout the next automatically.
+`resume-upgrade` will rollout the upgrade for the following unit, always from highest ordinal number to lowest, and for each successful upgraded unit, the process will rollout the next automatically.
 
 ```shell
 Model      Controller  Cloud/Region        Version  SLA          Timestamp
@@ -152,23 +166,28 @@ mysql-k8s/3   active       idle       10.1.148.145
 
 The step must be skipped if the upgrade went well! 
 
-Although the underlying MySQL Cluster continue to work, it’s important to rollback the charm to previous revision so an update can be later attempted after a further inspection of the failure. Please switch to the dedicated [minor rollback](/t/11753) tutorial if necessary.
+If there was an issue with the upgrade, even if the underlying MySQL cluster continues to work, it’s important to roll back the charm to the previous revision. That way, the update can be attempted again after a further inspection of the failure. 
+
+> See: [How to perform a minor rollback](/t/11753)
 
 ## Step 7: Scale-back
 
 Case the application scale was changed for the upgrade procedure, it is now safe to scale it back to the desired unit count:
 
 ```shell
-juju scale-application mysql-k8s <unit_count>
+juju scale-application mysql-k8s <total number of units desired>
 ```
+> To scale down by 1 unit, `<total number of units desired>` would be the current number of units - 1 
 
-An example on the following video:
+Example:
 
 [![asciicast](https://asciinema.org/a/7ZMAsPWU3wv7ynZI1JvgRFG31.png)](https://asciinema.org/a/7ZMAsPWU3wv7ynZI1JvgRFG31)
 
 ## Step 8: Check
 
-The future [improvement is planned](https://warthogs.atlassian.net/browse/DPE-2620) to check the state on pod/cluster on a low level. At the moment check `juju status` to make sure the cluster [state](/t/11866) is OK.
+Future improvements are [planned](https://warthogs.atlassian.net/browse/DPE-2620) to check the state of the pod/cluster on a low level. 
+
+For now, use `juju status` to make sure the cluster [state](/t/11866) is OK.
 
 <!---
 **More TODOs:**
diff --git a/docs/how-to/h-upgrade.md b/docs/how-to/h-upgrade.md
new file mode 100644
index 000000000..f03cb5993
--- /dev/null
+++ b/docs/how-to/h-upgrade.md
@@ -0,0 +1,25 @@
+# Upgrade
+
+This section contains documentation about performing upgrades (and rollbacks) on:
+* [MySQL Server (workload)](#mysql-upgrades-workload)
+* [Juju version](#juju-upgrades)
+
+## MySQL upgrades (workload)
+There are two types of in-place workload upgrades:
+* **Major upgrades** -  E.g. MySQL `8.0` -> MySQL `9.0`
+  * *Not supported*
+* **Minor upgrades** -  E.g. MySQL `8.0.33` -> `8.0.34` (includes charm revision bump)
+  * See: [How to perform a minor upgrade](/t/11752)
+  * See: [How to perform a minor rollback](/t/11753)
+
+[note type="caution"]
+This charm only supports in-place **minor** upgrades. 
+
+To upgrade to a major MySQL version, one must install a new cluster separately and migrate the data from the old to the new installation. This documentation will be updated with the migration instructions when a new MySQL version becomes available.
+[/note]
+
+## Juju upgrades
+
+New revisions of the charm may require that you do a major or minor Juju upgrade.
+
+See: [How to upgrade Juju](/t/14333)
\ No newline at end of file
diff --git a/docs/overview.md b/docs/overview.md
index 68795507f..37c5324f7 100644
--- a/docs/overview.md
+++ b/docs/overview.md
@@ -1,22 +1,21 @@
-# Charmed MySQL K8s Documentation
+> This is **Kubernetes** operator. To deploy in IAAS/VM, see [Charmed MySQL VM](https://charmhub.io/mysql).
 
-[note type="positive"]
-This is **[K8s](https://canonical.com/data/docs)** operator. To deploy in **[IAAS/VM](https://canonical.com/data/docs)**, use [Charmed MySQL](https://charmhub.io/mysql).
-[/note]
+# Charmed MySQL K8s documentation
 
-The Charmed MySQL K8s Operator delivers automated operations management from [day 0 to day 2](https://codilime.com/blog/day-0-day-1-day-2-the-software-lifecycle-in-the-cloud-age/) on the [MySQL Community Edition](https://www.mysql.com/products/community/) relational database. It is an open source, end-to-end, production-ready data platform [on top of Juju](https://juju.is/).
+Charmed MySQL K8s is an open-source software operator that deploys and operates [MySQL Community Edition](https://www.mysql.com/products/community/) relational databases on Kubernetes via [Juju](https://juju.is/). 
 
-![image|690x424](upload://vpevillwv3S9C44LDFBxkGCxpGq.png)
+This new operator built with the [charm SDK](https://juju.is/docs/sdk) replaces [**MariaDB**](https://charmhub.io/mariadb), [**OSM MariaDB**](https://charmhub.io/charmed-osm-mariadb-k8s), [**Percona cluster**](https://charmhub.io/percona-cluster) and [**MySQL InnoDB cluster**](https://charmhub.io/mysql-innodb-cluster) operators.
 
-MySQL is the world’s most popular open source database. A relational database stores data in separate tables rather than putting all the data in one big storeroom. The database structure is organized into physical files optimized for speed. The logical data model, with objects such as data tables, views, rows, and columns, offers a flexible programming environment.
+Charmed MySQL K8s includes features such as cluster-to-cluster replication, TLS encryption, password rotation, backups, and easy integration with other applications both inside and outside of Juju. It meets the need of deploying MySQL in a structured and consistent manner while allowing the user flexibility in configuration, simplifying reliable management of MySQL in production environments.
 
-This MySQL operator charm comes in two flavours to deploy and operate MySQL on [physical/virtual machines](https://github.com/canonical/mysql-operator) and [Kubernetes](https://github.com/canonical/mysql-k8s-operator). Both offer features such as replication, TLS, password rotation, and easy to use integration with applications. The Charmed MySQL K8s Operator meets the need of deploying MySQL in a structured and consistent manner while allowing the user flexibility in configuration. It simplifies deployment, scaling, configuration and management of MySQL in production at scale in a reliable way.
+![image|690x424](upload://vpevillwv3S9C44LDFBxkGCxpGq.png)
 
-[note type="positive"]
-**"Charmed MySQL K8s", "MariaDB", "OSM MariaDB", "Percona Cluster" or "Mysql Innodb Cluster"?**
+## In this documentation
 
-This "Charmed MySQL K8s" operator is a new "[Charmed SDK](https://juju.is/docs/sdk)"-based charm to replace a "[MariaDB](https://charmhub.io/mariadb)", "[OSM MariaDB](https://charmhub.io/charmed-osm-mariadb-k8s)", "[Percona Cluster](https://charmhub.io/percona-cluster)" and "[Mysql Innodb Cluster](https://charmhub.io/mysql-innodb-cluster)" operators.<br/>Read more about [legacy charms here](https://discourse.charmhub.io/t/11236).
-[/note]
+| | |
+|--|--|
+|  [Tutorials](/t/9677)</br>  Get started - a hands-on introduction to using Charmed MySQL operator for new users </br> |  [How-to guides](/t/9659) </br> Step-by-step guides covering key operations and common tasks |
+| [Reference](https://charmhub.io/mysql-k8s/actions) </br> Technical information - specifications, APIs, architecture | [Explanation](/t/10249) </br> Concepts - discussion and clarification of key topics  |
 
 ## Project and community
 
@@ -27,13 +26,6 @@ Charmed MySQL K8s is an official distribution of MySQL. It’s an open-source pr
 - Explore [Canonical Data Fabric solutions](https://canonical.com/data)
 - [Contacts us](/t/11868) for all further questions
 
-## In this documentation
-
-| | |
-|--|--|
-|  [Tutorials](/t/9677)</br>  Get started - a hands-on introduction to using Charmed MySQL operator for new users </br> |  [How-to guides](/t/9659) </br> Step-by-step guides covering key operations and common tasks |
-| [Reference](https://charmhub.io/mysql-k8s/actions) </br> Technical information - specifications, APIs, architecture | [Explanation](/t/10249) </br> Concepts - discussion and clarification of key topics  |
-
 # Navigation
 
 [details=Navigation]
@@ -41,24 +33,28 @@ Charmed MySQL K8s is an official distribution of MySQL. It’s an open-source pr
 | Level | Path | Navlink |
 |---------|---------|-------------|
 | 1 | tutorial | [Tutorial]() |
-| 2 | t-overview | [1. Introduction](/t/9677) |
-| 2 | t-set-up | [2. Set up the environment](/t/9679) |
-| 2 | t-deploy | [3. Deploy MySQL](/t/9667) |
-| 2 | t-scale | [4. Scale replicas](/t/9675) |
-| 2 | t-manage-passwords | [5. Manage passwords](/t/9673) |
-| 2 | t-integrate | [6. Integrate applications](/t/9671)  |
-| 2 | t-enable-tls | [7. Enable TLS encryption](/t/9669) |
-| 2 | t-clean-up | [8. Clean up the environment](/t/9665) |
+| 2 | t-overview | [Overview](/t/9677) |
+| 2 | t-set-up | [1. Set up the environment](/t/9679) |
+| 2 | t-deploy | [2. Deploy MySQL](/t/9667) |
+| 2 | t-scale | [3. Scale replicas](/t/9675) |
+| 2 | t-manage-passwords | [4. Manage passwords](/t/9673) |
+| 2 | t-integrate | [5. Integrate applications](/t/9671)  |
+| 2 | t-enable-tls | [6. Enable TLS encryption](/t/9669) |
+| 2 | t-clean-up | [7. Clean up the environment](/t/9665) |
 | 1 | how-to | [How To]() |
-| 2 | h-setup | [Set up]() |
-| 3 | h-deploy-microk8s | [Deploy on MicroK8s](/t/11869) |
-| 3 | h-deploy-gke | [Deploy on GKE](/t/10875) |
-| 3 | h-deploy-eks | [Deploy on EKS](/t/12105) |
-| 3 | h-deploy-aks | [Deploy on AKS](/t/14306) |
-| 3 | h-deploy-terraform | [Deploy via Terraform](/t/14926) |
-| 3 |  h-scale | [Scale replicas](/t/9659) |
-| 3 | h-enable-tls | [Enable TLS encryption](/t/9655) |
-| 3 | h-manage-applications | [Manage client applications](/t/9657) |
+| 2 | h-setup | [Deploy]() |
+| 3 | h-deploy-canonical-k8s | [Canonical K8s](/t/15939) |
+| 3 | h-deploy-microk8s | [MicroK8s](/t/11869) |
+| 3 | h-deploy-gke | [GKE](/t/10875) |
+| 3 | h-deploy-eks | [EKS](/t/12105) |
+| 3 | h-deploy-aks | [AKS](/t/14306) |
+| 3 | h-deploy-multi-az | [Multi-AZ](/t/15727) |
+| 3 | h-deploy-terraform | [Terraform](/t/14926) |
+| 3 | h-deploy-airgapped | [Air-gapped](/t/15729) |
+| 2 | h-integrate| [Integrate with another application](/t/9657) |
+| 2 | h-external-access | [External access](/t/15728) |
+| 2 | h-scale | [Scale replicas](/t/9659) |
+| 2 | h-enable-tls | [Enable TLS](/t/9655) |
 | 2 | h-backups | [Back up and restore]() |
 | 3 | h-configure-s3-aws | [Configure S3 AWS](/t/9651) |
 | 3 | h-configure-s3-radosgw | [Configure S3 RadosGW](/t/10319) |
@@ -67,18 +63,14 @@ Charmed MySQL K8s is an official distribution of MySQL. It’s an open-source pr
 | 3 | h-migrate-cluster| [Migrate a cluster](/t/9661) |
 | 2 | h-monitoring | [Monitoring (COS)]() |
 | 3 | h-enable-monitoring | [Enable monitoring](/t/9981) |
+| 3 | h-enable-alert-rules | [Enable alert rules](/t/15488) |
 | 3 | h-enable-tracing | [Enable tracing](/t/14448) |
-| 3 | h-enable-alert-rules | [Enable Alert Rules](/t/15488) |
-| 2 | h-upgrade | [Upgrade]() |
-| 3 | h-upgrade-intro | [Overview](/t/11754) |
+| 2 | h-upgrade | [Upgrade](/t/11754) |
 | 3 | h-upgrade-juju | [Upgrade Juju](/t/14333) |
-| 3 | h-upgrade-major | [Perform a major upgrade](/t/11750) |
-| 3 | h-rollback-major | [Perform a major rollback](/t/11751) |
 | 3 | h-upgrade-minor | [Perform a minor upgrade](/t/11752) |
 | 3 | h-rollback-minor | [Perform a minor rollback](/t/11753) |
-| 2 | h-integrate-your-charm | [Integrate with your charm]() |
-| 3 | h-integrate-intro | [Intro](/t/11884) |
-| 3 | h-integrate-db-with-your-charm | [Integrate a database with your charm](/t/11885) |
+| 2 | h-development| [Development]() |
+| 3 | h-development-integrate | [Integrate with your charm](/t/11885) |
 | 3 | h-migrate-mysqldump | [Migrate data via mysqldump](/t/11992) |
 | 3 | h-migrate-mydumper | [Migrate data via mydumper](/t/12006) |
 | 3 | h-migrate-backup-restore | [Migrate data via backup/restore](/t/12007) |
@@ -91,23 +83,24 @@ Charmed MySQL K8s is an official distribution of MySQL. It’s an open-source pr
 | 3 | h-async-removal | [Removal](/t/13468) |
 | 2 | h-contribute | [Contribute](/t/14655) |
 | 1 | reference | [Reference]() |
-| 2 | r-releases | [Release Notes]() |
-| 3 | r-all-releases | [All releases](/t/11878) |
+| 2 | r-releases | [Release Notes](/t/11878) |
+| 3 | r-revision-210-211 | [Revision 210/211](/t/16133) |
 | 3 | r-revision-180-181 | [Revision 180/181](/t/15276) |
 | 3 | r-revision-153 | [Revision 153](/t/14072) |
 | 3 | r-revision-127 | [Revision 127](/t/13522) |
 | 3 | r-revision-113 | [Revision 113](/t/12221) |
 | 3 | r-revision-99 | [Revision 99](/t/11880) |
 | 3 | r-revision-75 | [Revision 75](/t/11879) |
-| 2 | r-requirements | [Requirements](/t/11421) |
+| 2 | r-system-requirements | [System requirements](/t/11421) |
 | 2 | r-testing | [Testing](/t/11772) |
 | 2 | r-profiles | [Profiles](/t/11892) |
 | 2 | r-plugins-extensions | [Plugins/extensions](/t/15482) |
+| 2 | r-alert-rules | [Alert rules](/t/15838) |
+| 2 | r-statuses | [Statuses](/t/11866) |
 | 2 | r-contacts | [Contacts](/t/11868) |
 | 1 | explanation | [Explanation]() |
 | 2 | e-architecture | [Architecture](/t/11757) |
 | 2 | e-interfaces-endpoints | [Interfaces/endpoints](/t/10249) |
-| 2 | e-statuses | [Statuses](/t/11866) |
 | 2 | e-users | [Users](/t/10791) |
 | 2 | e-logs | [Logs](/t/12080) |
 | 3 | e-audit-logs | [Audit Logs](/t/15423) |
@@ -116,4 +109,10 @@ Charmed MySQL K8s is an official distribution of MySQL. It’s an open-source pr
 | 2 | e-legacy-charm | [Legacy charm](/t/11236) |
 | 1 | search | [Search](https://canonical.com/data/docs/mysql/k8s) |
 
-[/details]
\ No newline at end of file
+[/details]
+
+<!--
+| 2 | h-development| [Development](/t/11884) |
+| 3 | h-upgrade-major | [Perform a major upgrade](/t/11750) |
+| 3 | h-rollback-major | [Perform a major rollback](/t/11751) |
+--> Charmed MySQL K8s Documentation
\ No newline at end of file
diff --git a/docs/reference/r-alert-rules.md b/docs/reference/r-alert-rules.md
new file mode 100644
index 000000000..03cca6962
--- /dev/null
+++ b/docs/reference/r-alert-rules.md
@@ -0,0 +1,23 @@
+# Alert rules
+
+This page contains a markdown version of the alert rules described in the `mysql-k8s-operator` repository.  The following file(s) are the source of truth:
+* [`prometheus_alert_rules/metrics_alert_rules.yaml`](https://github.com/canonical/mysql-k8s-operator/blob/main/src/prometheus_alert_rules/metrics_alert_rules.yaml)
+
+> This documentation describes the latest alert rule expressions. See the YAML file(s) on listed above if you require an older version.
+
+## MySQLExporterK8s
+
+| Alert | Severity | Notes |
+|------|----------|-------|
+| MySQLDown | ![critical] | MySQL instance is down.<br> |
+| MySQLTooManyConnections(>90%) | ![warning] | MySQL instance is using > 90% of `max_connections`.<br>Consider checking the client application responsible for generating those additional connections. |
+| MySQLHighThreadsRunning | ![warning] | MySQL instance is actively using > 80% of `max_connections`.<br>Consider reviewing the value of the `max-connections` config parameter or allocate more resources to your database server.  |
+| MySQLHighPreparedStatementsUtilization(>80%) | ![warning] | MySQL instance is using > 80% of `max_prepared_stmt_count`.<br>Too many prepared statements might consume a lot of memory.  |
+| MySQLSlowQueries | ![info] | MySQL instance has a slow query.<br>Consider optimizing the query by reviewing its execution plan, then rewrite the query and add any relevant indexes.  |
+| MySQLInnoDBLogWaits | ![warning] | MySQL instance has long InnoDB log waits.<br>MySQL InnoDB log writes might be stalling. Check I/O activity on your nodes to find the responsible process or query. Consider using iotop and the performance_schema.  |
+| MySQLRestarted | ![info] | MySQL instance restarted.<br>MySQL restarted less than one minute ago. If the restart was unplanned or frequent, check Loki logs (e.g. `error.log`).  |
+
+<!-- Badges -->
+[info]: https://img.shields.io/badge/info-blue
+[warning]: https://img.shields.io/badge/warning-yellow
+[critical]: https://img.shields.io/badge/critical-red
\ No newline at end of file
diff --git a/docs/reference/r-all-releases.md b/docs/reference/r-all-releases.md
deleted file mode 100644
index 72c801819..000000000
--- a/docs/reference/r-all-releases.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# Release Notes
-
-Canonical publishes here release notes for production ready revisions available in [CharmHub](https://charmhub.io) [channels](https://juju.is/docs/sdk/channel):
-
-
-| Revision | Channel | Juju 2.x | Juju 3.x |
-|---|---|---|---|
-| [revision 180/182](/t/15276) | `8.0/stable` | 2.9.32+ | 3.4.3/3.5.2+ ([#1](https://bugs.launchpad.net/juju/+bug/2065284)), ([#2](https://bugs.launchpad.net/juju/+bug/2064772)) |
-| [revision 153](/t/14072) | `8.0/stable` | 2.9.32+ | 3.4.3/3.5.2+ ([#1](https://bugs.launchpad.net/juju/+bug/2065284)), ([#2](https://bugs.launchpad.net/juju/+bug/2064772)) |
-| [revision 127](/t/13522) | `8.0/stable` |  2.9.32+ | 3.1.6+ ([#1](https://bugs.launchpad.net/juju/+bug/2029285), [#2](https://bugs.launchpad.net/juju/+bug/2029282)) |
-| [revision 113](/t/12221) | `8.0/stable` |  2.9.32+ | 3.1.6+ ([#1](https://bugs.launchpad.net/juju/+bug/2029285), [#2](https://bugs.launchpad.net/juju/+bug/2029282)) |
-| [revision 99](/t/11880) | `8.0/stable` |  2.9.32+ | 3.1.6+ ([#1](https://bugs.launchpad.net/juju/+bug/2029285), [#2](https://bugs.launchpad.net/juju/+bug/2029282)) |
-| [revision 75](/t/11879) | `8.0/stable` |  2.9.32+ | Not supported |
-
-All other [risks](https://juju.is/docs/sdk/channel#heading--risk) (`candidate`, `beta`, `edge`) are NOT recommended for production usage.
\ No newline at end of file
diff --git a/docs/reference/r-releases.md b/docs/reference/r-releases.md
new file mode 100644
index 000000000..c8794ff4d
--- /dev/null
+++ b/docs/reference/r-releases.md
@@ -0,0 +1,89 @@
+# Release Notes
+
+This page provides high-level overviews of the dependencies and features that are supported by each revision in every stable release.
+
+To learn more about the different release tracks and channels, see the [Juju documentation about channels](https://juju.is/docs/juju/channel#heading--risk).
+
+To see all releases and commits, check the [Charmed MySQL K8s Releases page on GitHub](https://github.com/canonical/mysql-k8s-operator/releases).
+
+| Release | MySQL version | Juju version | [TLS encryption](/t/9655)* | [COS monitoring](/t/9981) | [Minor version upgrades](/t/11752) | [Cross-regional async replication](/t/13458) |
+|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
+| [210], [211] | 8.0.39 | `3.5.4+` | ![check] | ![check] | ![check] | ![check] |
+| [180], [181] | 8.0.37 | `3.4.3+` | ![check] | ![check] | ![check] | ![check] |
+| [153] | 8.0.36 | `3.4.3+` | ![check] | ![check] | ![check] | ![check] |
+| [127] | 8.0.35 | `3.1.6+` |  | ![check] | ![check] |  |
+| [113] | 8.0.34 | `3.1.6+` |  | ![check] | ![check] |  |
+| [99] | 8.0.34 | `3.1.6+` |  | ![check] | ![check] |  |
+| [75] | 8.0.32 | `2.9.32+` |  | ![check] | ![check] |  |
+
+\* **TLS encryption**: Support for **`v2` or higher** of the [`tls-certificates` interface](https://charmhub.io/tls-certificates-interface/libraries/tls_certificates). This means that you can integrate with [modern TLS charms](https://charmhub.io/topics/security-with-x-509-certificates).
+
+>For more details about a particular revision, refer to its dedicated Release Notes page.
+For more details about each feature/interface, refer to the documentation linked in the column headers.
+
+## Architecture and base
+Several [revisions](https://juju.is/docs/sdk/revision) are released simultaneously for different [bases/series](https://juju.is/docs/juju/base) using the same charm code. In other words, one release contains multiple revisions.
+
+> If you do not specify a revision on deploy time, Juju will automatically choose the revision that matches your base and architecture. 
+> 
+> See: [`juju set-constraints`](https://juju.is/docs/juju/juju-set-constraints), [`juju info`](https://juju.is/docs/juju/juju-info) 
+
+### Release 210-211
+
+| Revision | amd64 | arm64 | Ubuntu 22.04 LTS
+|:--------:|:-----:|:-----:|:-----:|
+|[210]  |![check] | | ![check]  |
+|[211] |  | ![check]| ![check] |
+
+[details=Older releases]
+### Release 180-181
+
+| Revision | amd64 | arm64 | Ubuntu 22.04 LTS
+|:--------:|:-----:|:-----:|:-----:|
+|[180]  |![check] | | ![check]  |
+|[181] |  | ![check]| ![check] |
+
+### Release 153
+
+| Revision | amd64 | arm64 | Ubuntu 22.04 LTS
+|:--------:|:-----:|:-----:|:-----:|
+|[153] |![check]| | ![check]   |
+
+### Release 127
+
+| Revision | amd64 | arm64 | Ubuntu 22.04 LTS
+|:--------:|:-----:|:-----:|:-----:|
+|[127] |![check]| | ![check]   |
+
+### Release 113
+
+| Revision | amd64 | arm64 | Ubuntu 22.04 LTS
+|:--------:|:-----:|:-----:|:-----:|
+|[113] |![check]| | ![check]   |
+
+### Release 99
+
+| Revision | amd64 | arm64 | Ubuntu 22.04 LTS
+|:--------:|:-----:|:-----:|:-----:|
+|[99] |![check]| | ![check]   |
+
+### Release 75
+
+| Revision | amd64 | arm64 | Ubuntu 22.04 LTS
+|:--------:|:-----:|:-----:|:-----:|
+|[75] |![check]| | ![check]   |
+[/details]
+
+<!-- LINKS -->
+[210]: /t/16133
+[211]: /t/16133
+[180]: /t/15276
+[181]: /t/15276
+[153]: /t/14072
+[127]: /t/13522
+[113]: /t/12221
+[99]: /t/11880
+[75]: /t/11879
+
+<!-- BADGES -->
+[check]: https://img.icons8.com/color/20/checkmark--v1.png
\ No newline at end of file
diff --git a/docs/reference/r-requirements.md b/docs/reference/r-requirements.md
deleted file mode 100644
index 6e068e677..000000000
--- a/docs/reference/r-requirements.md
+++ /dev/null
@@ -1,32 +0,0 @@
-## Juju version
-
-The charm supports both [Juju 2.9 LTS](https://github.com/juju/juju/releases) and [Juju 3.x](https://github.com/juju/juju/releases), however charm revisions may require different Juju versions. Always check the [charm release notes](/t/11878) to find the minimal Juju version for your deployment.
-
-## Kubernetes requirements
-
-* Kubernetes 1.27+
-* Canonical MicroK8s 1.27+ (snap channel 1.27-strict/stable and newer)
-
-## Minimum requirements
-
-Make sure your machine meets the following requirements:
-- Ubuntu 22.04 (Jammy) or later.
-- 8GB of RAM.
-- 2 CPU threads.
-- At least 20GB of available storage.
-- Access to the internet for downloading the required OCI/ROCKs and charms.
-
-## Supported architectures
-
-The charm is based on [ROCK OCI](https://github.com/canonical/charmed-mysql-rock) named "[charmed-mysql](https://github.com/canonical/charmed-mysql-rock/pkgs/container/charmed-mysql)", which is recursively based on SNAP "[charmed-mysql](https://snapcraft.io/charmed-mysql)", which is currently available for `amd64` only! The architecture `arm64` support is planned. Please [contact us](/t/11868) if you are interested in new architecture!
-
-## Networking
-
-At the moment IPv4 is supported only (see more [info](https://warthogs.atlassian.net/browse/DPE-4695)).
-
-[Contact us](/t/11868) if you are interested in IPv6!
-
-<a name="mysql-gr-limits"></a>
-## MySQL Group Replication requirements
-* In order to integrate with this charm, every table created by the integrated application [u]must[/u] have a [u]primary key[/u]. This is required by the [group replication plugin](https://dev.mysql.com/doc/refman/8.0/en/group-replication-requirements.html) enabled in this charm.
-* the count of [Charmed MySQL K8s units](https://dev.mysql.com/doc/refman/8.0/en/group-replication-limitations.html) in a single Juju application is [u]limited to 9[/u]. Unit 10+ will start; however, they will not join the cluster but sleep in a hot-swap reserve.
\ No newline at end of file
diff --git a/docs/reference/r-revision-153.md b/docs/reference/r-revision-153.md
index 82ea59235..57a4e38fa 100644
--- a/docs/reference/r-revision-153.md
+++ b/docs/reference/r-revision-153.md
@@ -41,7 +41,7 @@ Canonical Data issues are now public on both [Jira](https://warthogs.atlassian.n
 * CLI mysql-shell updated to `8.0.36+dfsg-0ubuntu0.22.04.1~ppa4`
 * Backup tools xtrabackup/xbcloud is `8.0.35-30`
 * The Prometheus mysqld-exporter is `0.14.0-0ubuntu0.22.04.1~ppa2`
-* K8s charms [based on our ROCK OCI](https://github.com/canonical/charmed-mysql-rock) (Ubuntu LTS  `22.04.4`) revision `103`
+* K8s charms [based on our ROCK OCI](https://github.com/canonical/charmed-mysql-rock) (Ubuntu LTS  `22.04.4`) `mysql-image` resource-revision `111`
 * Principal charms support the latest Ubuntu 22.04 LTS only
 
 ## Technical notes
diff --git a/docs/reference/r-revision-180-181.md b/docs/reference/r-revision-180-181.md
index f1f170604..87b374db0 100644
--- a/docs/reference/r-revision-180-181.md
+++ b/docs/reference/r-revision-180-181.md
@@ -8,10 +8,10 @@ Dear community,
 Canonical's newest Charmed MySQL K8s operator has been published in the [8.0/stable channel].
 
 Due to the newly added support for arm64 architecture, the MySQL K8s charm now releases two revisions simultaneously:
-* Revision 180 is built for `amd64`
-* Revision 181 is built for `arm64`
+* Revision 180 is built for `amd64` ( mysql-image  r113 )
+* Revision 181 is built for `arm64` ( mysql-image  r113 )
 
-TO make sure you deploy for the right architecture, we recommend setting an [architecture constraint](https://juju.is/docs/juju/constraint#heading--arch) for your entire Juju model.
+To make sure you deploy for the right architecture, we recommend setting an [architecture constraint](https://juju.is/docs/juju/constraint#heading--arch) for your entire Juju model.
 
 Otherwise, you can specify the architecture at deploy time with the `--constraints` flag as follows:
 
@@ -21,7 +21,7 @@ juju deploy mysql-k8s --constraints arch=<arch> --trust
 where `<arch>` can be `amd64` or `arm64`.
 
 [note]
-This release of Charmed MySQL K8s requires Juju `v.3.4.3` or `3.5.2+`. See the [Technical details](#technical-details) section for more information.
+This release of Charmed MySQL K8s requires Juju `v.3.4.3` or `3.5.2+`. See the [Requirements and compatibility](#requirements-and-compatibility) section for more information.
 [/note]
 
 ---
@@ -54,42 +54,56 @@ This section contains some technical details about the charm's contents and depe
 
 If you are jumping over several stable revisions, check [previous release notes][All releases] before upgrading.
 
-### Requirements
-This release of Charmed MySQL requires Juju `v.3.4.3` or `3.5.2+`. See the guide [How to upgrade Juju for a new database revision].
+## Requirements and compatibility
+This charm revision features the following changes in dependencies:
+* (increased) MySQL version `v8.0.37`
+
+> This release of Charmed MySQL K8s requires Juju `v.3.4.3` or `3.5.2+`. See the guide [How to upgrade Juju for a new database revision].
 
 See the [system requirements] page for more details about software and hardware prerequisites.
 
-### Packaging
-This charm is based on the [`charmed-mysql` rock]. It packages:
-- mysql-server-8.0 `v8.0.37`
-  - [8.0.37-0ubuntu0.22.04.1]
-- mysql-router `v8.0.37`
-  - [8.0.37-0ubuntu0.22.04.1]
-- mysql-shell `v8.0.37`
-  - [8.0.37+dfsg-0ubuntu0.22.04.1~ppa3]
-- prometheus-mysqld-exporter `v0.14.0`
-  - [0.14.0-0ubuntu0.22.04.1~ppa2]
-- prometheus-mysqlrouter-exporter `v5.0.1`
-  - [5.0.1-0ubuntu0.22.04.1~ppa1]
-- percona-xtrabackup `v8.0.35`
-  - [8.0.35-31-0ubuntu0.22.04.1~ppa3]
-
-### Libraries and interfaces
-* **mysql `v0`**
-  * See the [Libraries tab] in MySQL VM for the API reference. <!--doesn't exist in K8s page-->
-* **grafana_agent `v0`** for integration with Grafana 
-    * Implements  `cos_agent` interface
-* **rolling_ops `v0`** for rolling operations across units 
-    * Implements `rolling_op` interface
-* **tempo_k8s `v1`, `v2`** for integration with Tempo charm
-    * Implements `tracing` interface
-* **tls_certificates_interface `v2`** for integration with TLS charms
-    * Implements `tls-certificates` interface
+### Integration tests
+Below are the charm integrations tested with this revision on different Juju environments and architectures:
+* Juju `v2.9.50` on `amd64`
+* Juju  `v3.4.5` on `amd64` and `arm64`
+
+**Juju `v2.9.50` on `amd64`:**
+
+| Software | Version | |
+|-----|-----|-----|
+| [tls-certificates-operator] | `rev 22`, `legacy/stable` | 
+
+**Juju `v3.4.5` on `amd64` and `arm64`:**
+| Software | Version | |
+|-----|-----|-----|
+| [self-signed-certificates] | `rev 155`, `latest/stable` | 
+
+**All:**
+| Software | Version | |
+|-----|-----|-----|
+| [microk8s] | `v1.28.12` | |
+| [s3-integrator] | `rev31` | |
+| [mysql-test-app] |  `0.0.2` | |
+| [mongodb-k8s] | `rev36` | |
+| [kafka-k8s] | `rev5` | |
+| [osm-keystone] | `rev10` | |
+| [osm-pol] | `rev337` | |
+| [zookeeper] | `10` | |
 
 See the [`/lib/charms` directory on GitHub] for a full list of supported libraries.
 
 See the [Integrations tab] for a full list of supported integrations/interfaces/endpoints
 
+
+### Packaging
+This charm is based on the [`charmed-mysql` rock]  (CharmHub  `mysql-image` resource-revision `113`). It packages:
+- mysql-server-8.0: [8.0.37-0ubuntu0.22.04.1]
+- mysql-router `v8.0.37`: [8.0.37-0ubuntu0.22.04.1]
+- mysql-shell `v8.0.37`: [8.0.37+dfsg-0ubuntu0.22.04.1~ppa3]
+- prometheus-mysqld-exporter `v0.14.0`: [0.14.0-0ubuntu0.22.04.1~ppa2]
+- prometheus-mysqlrouter-exporter `v5.0.1`: [5.0.1-0ubuntu0.22.04.1~ppa1]
+- percona-xtrabackup `v8.0.35`: [8.0.35-31-0ubuntu0.22.04.1~ppa3]
+
 ## Contact us
   
 Charmed MySQL K8s is an open source project that warmly welcomes community contributions, suggestions, fixes, and constructive feedback.  
@@ -110,6 +124,22 @@ Charmed MySQL K8s is an open source project that warmly welcomes community contr
 
 [`/lib/charms` directory on GitHub]: https://github.com/canonical/mysql-k8s-operator/tree/main/lib/charms
 
+[juju]: https://juju.is/docs/juju/
+[lxd]: https://documentation.ubuntu.com/lxd/en/latest/
+[data-integrator]: https://charmhub.io/data-integrator
+[s3-integrator]: https://charmhub.io/s3-integrator
+[microk8s]: https://charmhub.io/microk8s
+[tls-certificates-operator]: https://charmhub.io/tls-certificates-operator
+[self-signed-certificates]: https://charmhub.io/self-signed-certificates
+[mysql-test-app]: https://charmhub.io/mysql-test-app
+[landscape-client]: https://charmhub.io/landscape-client
+[ubuntu-advantage]: https://charmhub.io/ubuntu-advantage
+[mongodb-k8s]: https://charmhub.io/mongodb-k8s
+[kafka-k8s]: https://charmhub.io/kafka-k8s
+[osm-keystone]: https://charmhub.io/osm-keystone
+[osm-pol]: https://charmhub.io/osm-pol
+[zookeeper]: https://charmhub.io/zookeeper
+
 [`charmed-mysql` rock]: https://github.com/canonical/charmed-mysql-rock/pkgs/container/charmed-mysql
 [8.0.37-0ubuntu0.22.04.1]: https://launchpad.net/ubuntu/+source/mysql-8.0/8.0.37-0ubuntu0.22.04.3
 [8.0.37+dfsg-0ubuntu0.22.04.1~ppa3]: https://launchpad.net/~data-platform/+archive/ubuntu/mysql-shell
diff --git a/docs/reference/r-revision-210-211.md b/docs/reference/r-revision-210-211.md
new file mode 100644
index 000000000..cb995782d
--- /dev/null
+++ b/docs/reference/r-revision-210-211.md
@@ -0,0 +1,112 @@
+>Reference > Release Notes > [All revisions] > Revision 211/210
+
+# Revision 211/210
+<sub>January 6, 2025</sub>
+
+Dear community,
+
+Canonical's newest Charmed MySQL K8s operator has been published in the [8.0/stable channel]:
+* Revision 210 is built for `amd64` on Ubuntu 22.04 LTS
+* Revision 211 is built for `arm64` on Ubuntu 22.04 LTS
+
+If you are jumping over several stable revisions, check [previous release notes][All revisions] before upgrading.
+
+---
+
+## Highlights 
+* Updated MySQL to `v8.0.39` ([PR #488](https://github.com/canonical/mysql-k8s-operator/pull/488)) ([DPE-4573](https://warthogs.atlassian.net/browse/DPE-4573))
+* Added fully-featured terraform module ([PR #522](https://github.com/canonical/mysql-k8s-operator/pull/522)) ([DPE-5627](https://warthogs.atlassian.net/browse/DPE-5627))
+  * See also: [How to deploy on Terraform](/t/14926)
+* Updated COS alert rule descriptions ([PR #519](https://github.com/canonical/mysql-k8s-operator/pull/519)) ([DPE-5659](https://warthogs.atlassian.net/browse/DPE-5659))
+  * See also: [How to enable alert rules](/t/15488), 
+* Bumped juju versions ([PR #517](https://github.com/canonical/mysql-k8s-operator/pull/517))
+  * `v2.9.50` -> `v2.9.51`
+  * `v3.4.5` -> `v3.5.4`
+
+## Features and improvements
+* Integrated with Tempo HA and tested relay support of tracing traffic through `grafana-agent-k8s` ([PR #518](https://github.com/canonical/mysql-k8s-operator/pull/518)) ([DPE-5312](https://warthogs.atlassian.net/browse/DPE-5312))
+* Adopted admin address throughout charm ([PR #502](https://github.com/canonical/mysql-k8s-operator/pull/502)) ([DPE-5178](https://warthogs.atlassian.net/browse/DPE-5178))
+* Avoid ambiguous service selector when multiple `mysql` apps in a model have the same cluster-name ([PR #501](https://github.com/canonical/mysql-k8s-operator/pull/501)) ([DPE-4861](https://warthogs.atlassian.net/browse/DPE-4861))
+* Ensure that uninitialized variable not referenced in `_is_cluster_blocked` helper ([PR #507](https://github.com/canonical/mysql-k8s-operator/pull/507)) ([DPE-5481](https://warthogs.atlassian.net/browse/DPE-5481))
+* Recover from pod restarts during cluster creation during setup ([PR #499](https://github.com/canonical/mysql-k8s-operator/pull/499))
+* Added timeout on node count query ([PR #514](https://github.com/canonical/mysql-k8s-operator/pull/514)) ([DPE-5582](https://warthogs.atlassian.net/browse/DPE-5582))
+
+## Bugfixes and maintenance
+
+* Fixed unit-initialized test may break when run too early ([PR #491](https://github.com/canonical/mysql-k8s-operator/pull/491)) ([DPE-5209](https://warthogs.atlassian.net/browse/DPE-5209))
+* Common credentials fixture and `exec` timeout workaround ([PR #493](https://github.com/canonical/mysql-k8s-operator/pull/493)) ([DPE-5210](https://warthogs.atlassian.net/browse/DPE-5210))
+* Fixed /database requested wait container ([PR #500](https://github.com/canonical/mysql-k8s-operator/pull/500)) ([DPE-5385](https://warthogs.atlassian.net/browse/DPE-5385))
+* Attempted to stabilize failing integration tests ([PR #496](https://github.com/canonical/mysql-k8s-operator/pull/496))
+* Add test to ensure correct k8s endpoints created for clusters with the same name ([PR #508](https://github.com/canonical/mysql-k8s-operator/pull/508))
+* Add check to ensure peer databag populated before reconciling mysqld exporter pebble layers ([PR #505](https://github.com/canonical/mysql-k8s-operator/pull/505)) ([DPE-5417](https://warthogs.atlassian.net/browse/DPE-5417))
+* Add base in test_multi_relations to workaround libjuju bug ([PR #506](https://github.com/canonical/mysql-k8s-operator/pull/506)) ([DPE-5480](https://warthogs.atlassian.net/browse/DPE-5480))
+
+[details=Libraries, testing, and CI]
+
+* increased key logs verbosity (s/debug/info/) ([PR #513](https://github.com/canonical/mysql-k8s-operator/pull/513))
+* Run juju 3.6 nightly tests against 3.6/stable ([PR #533](https://github.com/canonical/mysql-k8s-operator/pull/533))
+* Test for multi-relation scale in/out ([PR #489](https://github.com/canonical/mysql-k8s-operator/pull/489)) ([DPE-4613](https://warthogs.atlassian.net/browse/DPE-4613))
+* Test against juju 3.6/candidate + upgrade dpw to v23.0.5 ([PR #527](https://github.com/canonical/mysql-k8s-operator/pull/527))
+* Added workflow for nightly scheduled tests with juju 3.6 ([PR #490](https://github.com/canonical/mysql-k8s-operator/pull/490)) ([DPE-4976](https://warthogs.atlassian.net/browse/DPE-4976))
+* Switch from tox build wrapper to charmcraft.yaml overrides ([PR #509](https://github.com/canonical/mysql-k8s-operator/pull/509))
+* Update canonical/charming-actions action to v2.6.3 ([PR #497](https://github.com/canonical/mysql-k8s-operator/pull/497))
+* Update codecov/codecov-action action to v5 ([PR #526](https://github.com/canonical/mysql-k8s-operator/pull/526))
+* Update data-platform-workflows to v23.1.0 ([PR #532](https://github.com/canonical/mysql-k8s-operator/pull/532))
+* Update dependency canonical/microk8s to v1.31 ([PR #495](https://github.com/canonical/mysql-k8s-operator/pull/495))
+* Update dependency cryptography to v43 [SECURITY] ([PR #498](https://github.com/canonical/mysql-k8s-operator/pull/498))
+
+[/details]
+
+## Requirements and compatibility
+* (increased) MySQL version: `v8.0.37` -> `v8.0.39`
+* (increased) Minimum Juju 2 version:`v2.9.50` -> `v2.9.51`
+* (increased) Minimum Juju 3 version:`v3.4.5` -> `v3.5.4`
+
+See the [system requirements] for more details about Juju versions and other software and hardware prerequisites.
+
+### Packaging
+
+This charm is based on the Charmed MySQL K8s [rock image]. It packages:
+* [mysql-server-8.0] `v8.0.39`
+* [mysql-router] `v8.0.39`
+* [mysql-shell] `v8.0.38`
+* [prometheus-mysqld-exporter] `v0.14.0`
+* [prometheus-mysqlrouter-exporter] `v5.0.1`
+* [percona-xtrabackup] `v8.0.35`
+
+See the [`/lib/charms` directory on GitHub] for more details about all supported libraries.
+
+See the [`metadata.yaml` file on GitHub] for a full list of supported interfaces.
+
+
+<!-- Topics -->
+[All revisions]: /t/11878
+[system requirements]: /t/11421
+
+<!-- GitHub -->
+[`/lib/charms` directory on GitHub]: https://github.com/canonical/mysql-k8s-operator/tree/main/lib/charms
+[`metadata.yaml` file on GitHub]: https://github.com/canonical/mysql-k8s-operator/blob/main/metadata.yaml
+
+<!-- Charmhub -->
+[8.0/stable channel]: https://charmhub.io/mysql?channel=8.0/stable
+
+<!-- Snap/Rock -->
+[`charmed-mysql` packaging]: https://github.com/canonical/charmed-mysql-rock
+
+[MySQL Libraries tab]: https://charmhub.io/mysql/libraries
+
+[113/114]: https://github.com/canonical/charmed-mysql-snap/releases/tag/rev114
+[rock image]: https://github.com/canonical/charmed-mysql-rock/pkgs/container/charmed-mysql
+
+[mysql-server-8.0]: https://launchpad.net/ubuntu/+source/mysql-8.0/
+[mysql-router]: https://launchpad.net/ubuntu/+source/mysql-8.0/
+[mysql-shell]: https://launchpad.net/~data-platform/+archive/ubuntu/mysql-shell
+[prometheus-mysqld-exporter]: https://launchpad.net/~data-platform/+archive/ubuntu/mysqld-exporter
+[prometheus-mysqlrouter-exporter]: https://launchpad.net/~data-platform/+archive/ubuntu/mysqlrouter-exporter
+[percona-xtrabackup]: https://launchpad.net/~data-platform/+archive/ubuntu/xtrabackup
+
+
+<!-- Badges -->
+[juju-2_amd64]: https://img.shields.io/badge/Juju_2.9.51-amd64-darkgreen?labelColor=ea7d56 
+[juju-3_amd64]: https://img.shields.io/badge/Juju_3.4.6-amd64-darkgreen?labelColor=E95420 
+[juju-3_arm64]: https://img.shields.io/badge/Juju_3.4.6-arm64-blue?labelColor=E95420
\ No newline at end of file
diff --git a/docs/explanation/e-statuses.md b/docs/reference/r-statuses.md
similarity index 93%
rename from docs/explanation/e-statuses.md
rename to docs/reference/r-statuses.md
index da84250c1..fe31c746b 100644
--- a/docs/explanation/e-statuses.md
+++ b/docs/reference/r-statuses.md
@@ -1,6 +1,8 @@
-# Charm Statuses
+# Charm statuses
 
-> :warning: **WARNING** : it is an work-in-progress article. Do NOT use it in production! Contact [Canonical Data Platform team](https://chat.charmhub.io/charmhub/channels/data-platform) if you are interested in the topic.
+[note type="caution"]
+This is a work-in-progress article. Do NOT use it in production! Contact [Canonical Data Platform team](https://chat.charmhub.io/charmhub/channels/data-platform) if you are interested in the topic.
+[/note]
 
 The charm follows [standard Juju applications statuses](https://juju.is/docs/olm/status-values#heading--application-status). Here you can find the expected end-users reaction on different statuses:
 
diff --git a/docs/reference/r-system-requirements.md b/docs/reference/r-system-requirements.md
new file mode 100644
index 000000000..c037f4bd8
--- /dev/null
+++ b/docs/reference/r-system-requirements.md
@@ -0,0 +1,66 @@
+# System requirements
+
+The following are the minimum software and hardware requirements to run Charmed MySQL on Kubernetes.
+
+## Summary
+
+* [Software](#software)
+  * [Juju](#juju)
+  * [MySQL Group replication requirements](#mysql-group-replication-requirements)
+* [Hardware](#hardware)
+* [Networking](#networking)
+
+---
+
+## Software
+* Ubuntu 22.04 (Jammy) or later
+* Kubernetes 1.27+
+* Canonical MicroK8s 1.27+ (snap channel 1.27-strict/stable and newer)
+
+### Juju
+
+The charm supports several Juju releases from [2.9 LTS](https://juju.is/docs/juju/roadmap#juju-juju-29) onwards. The table below shows which minor versions of each major Juju release are supported by the stable Charmhub releases of MySQL K8s. 
+
+| Juju major release | Supported minor versions | Compatible charm revisions |Comment |
+|:--------|:-----|:-----|:-----|
+| ![3.6 LTS] | `3.6.1+` | 213+ |     |
+| ![3.5] | `3.5.2+` | [153]+ |     |
+| ![3.4] | `3.4.3+` | [153]+ | Known issues with `3.4.2`: [bug #1](https://bugs.launchpad.net/juju/+bug/2065284), [bug #2](https://bugs.launchpad.net/juju/+bug/2064772)   |
+| ![3.1] | `3.1.6+` | [99]+ |     |
+| ![2.9 LTS] | `2.9.32+` | [75 ]+ |     |
+
+### MySQL Group Replication requirements
+
+* In order to integrate with this charm, every table created by the integrated application **must have a primary key**. This is required by the [group replication plugin](https://dev.mysql.com/doc/refman/8.0/en/group-replication-requirements.html) enabled in this charm.
+* The count of [Charmed MySQL K8s units](https://dev.mysql.com/doc/refman/8.0/en/group-replication-limitations.html) in a single Juju application is limited to 9. Unit 10+ will start; however, they will not join the cluster but sleep in a hot-swap reserve.
+
+## Hardware
+
+Make sure your machine meets the following requirements:
+- 8GB of RAM.
+- 2 CPU threads.
+- At least 20GB of available storage.
+
+The charm is based on the [charmed-mysql ROCK OCI](https://github.com/canonical/charmed-mysql-rock), which is recursively based on the [charmed-mysql snap](https://snapcraft.io/charmed-mysql). It currently supports:
+* `amd64`
+* `arm64` (from revision 180+)
+
+[Contact us](/t/11868) if you are interested in a new architecture!
+
+## Networking
+* Access to the internet for downloading the required OCI/ROCKs and charms.
+* Only IPv4 is supported at the moment
+  * See more information about this limitation in [this Jira issue](https://warthogs.atlassian.net/browse/DPE-4695)
+  * [Contact us](/t/11868) if you are interested in IPv6!
+
+<!-- LINKS -->
+[153]: /t/14072
+[99]: /t/11880
+[75]: /t/11879
+
+<!-- BADGES -->
+[2.9 LTS]: https://img.shields.io/badge/2.9_LTS-%23E95420?label=Juju
+[3.1]: https://img.shields.io/badge/3.1-%23E95420?label=Juju
+[3.4]: https://img.shields.io/badge/3.4-%23E95420?label=Juju
+[3.5]: https://img.shields.io/badge/3.5-%23E95420?label=Juju
+[3.6 LTS]: https://img.shields.io/badge/3.6_LTS-%23E95420?label=Juju
\ No newline at end of file
diff --git a/docs/tutorial/t-clean-up.md b/docs/tutorial/t-clean-up.md
index f62432c45..7fba38946 100644
--- a/docs/tutorial/t-clean-up.md
+++ b/docs/tutorial/t-clean-up.md
@@ -1,17 +1,35 @@
-# Cleanup and extra info
+> [Charmed MySQL K8s Tutorial](/t/9677) > 7. Clean up your environment
 
-This is part of the [Charmed MySQL Tutorial](/t/charmed-mysql-k8s-tutorial-overview/9677). Please refer to this page for more information and the overview of the content.
+# Clean up your environment
 
-## Remove and cleanup environment
-If you're done with testing and would like to free up resources on your machine, just remove Multipass VM.
-*Warning: when you remove VM as shown below you will lose all the data in MySQL and any other applications inside Multipass VM!*
+In this tutorial we've successfully deployed and accessed MySQL on MicroK8s, added and removed cluster members, added and removed database users, and enabled a layer of security with TLS.
+
+You may now keep your MySQL K8s deployment running and write to the database, or remove it entirely using the steps in this page.
+
+## Stop your virtual machine
+If you'd like to keep your environment for later, simply stop your VM with
+```shell
+multipass stop my-vm
+```
+
+## Delete your virtual machine
+If you're done with testing and would like to free up resources on your machine, you can remove the VM entirely.
+
+[note type="caution"]
+**Warning**: When you remove VM as shown below, you will lose all the data in MySQL and any other applications inside Multipass VM! 
+
+For more information, see the docs for [`multipass delete`](https://multipass.run/docs/delete-command).
+[/note]
+
+**Delete your VM and its data** by running
 ```shell
 multipass delete --purge my-vm
 ```
 
+
 ## Next Steps
-In this tutorial we've successfully deployed MySQL, added/removed cluster members, added/removed users to/from the database, and even enabled and disabled TLS. You may now keep your Charmed MySQL K8s deployment running and write to the database or remove it entirely using the steps in [Remove Charmed MySQL K8s and Juju](#remove-charmed-mysql-and-juju). If you're looking for what to do next you can:
-- Run [Charmed MySQL VM on LXD](https://github.com/canonical/mysql-operator).
+
+- Run [Charmed MySQL VM on VM/IAAS](https://github.com/canonical/mysql-operator).
 - Check out our Charmed offerings of [PostgreSQL K8s](https://charmhub.io/postgresql-k8s?channel=14) and [Kafka K8s](https://charmhub.io/kafka-k8s?channel=edge).
 - Read about [High Availability Best Practices](https://canonical.com/blog/database-high-availability)
 - [Report](https://github.com/canonical/mysql-k8s-operator/issues) any problems you encountered.
diff --git a/docs/tutorial/t-deploy.md b/docs/tutorial/t-deploy.md
index dcd9d39e8..037acc829 100644
--- a/docs/tutorial/t-deploy.md
+++ b/docs/tutorial/t-deploy.md
@@ -1,26 +1,36 @@
-# Get a Charmed MySQL up and running
+> [Charmed MySQL K8s Tutorial](/t/9677) > 2. Deploy MySQL
 
-This is part of the [Charmed MySQL Tutorial](/t/charmed-mysql-k8s-tutorial-overview/9677). Please refer to this page for more information and the overview of the content.
+# Deploy Charmed MySQL
 
-## Deploy Charmed MySQL K8s
-> :information_source: **Info**: [the minimum Juju version supported is 2.9.44](/t/11421)
+In this section, you will deploy Charmed MySQL K8s, access a unit, and interact with the MySQL databases that exist inside the application.
 
-To deploy Charmed MySQL K8s, all you need to do is run the following command, which will fetch the charm from [Charmhub](https://charmhub.io/mysql-k8s?channel=8.0) and deploy it to your model:
+## Summary
+* [Deploy MySQL](#deploy-mysql)
+* [Access MySQL](#access-mysql)
+  * [Retrieve credentials](#retrieve-credentials)
+  * [ Access MySQL via the `mysql` client](#access-mysql-via-the-mysql-client)
 
+---
+
+## Deploy MySQL
+
+To deploy Charmed MySQL K8s, run the following command:
 ```shell
-juju deploy mysql-k8s --channel 8.0 --trust
+juju deploy mysql-k8s --trust
 ```
-Note: `--trust` is required to create some K8s resources.
+> The `--trust` flag is necessary to create some K8s resources
 
-Juju will now fetch Charmed MySQL K8s and begin deploying it to the Microk8s Kubernetes. This process can take several minutes depending on how provisioned (RAM, CPU, etc) your machine is. You can track the progress by running:
+Juju will now fetch Charmed MySQL K8s from [Charmhub](https://charmhub.io/mysql-k8s?channel=8.0) and deploy it to MicroK8s. This process can take several minutes depending on how provisioned (RAM, CPU, etc) your machine is. You can track the progress by running:
 ```shell
 juju status --watch 1s
 ```
 
-This command is useful for checking the status of Charmed MySQL K8s and gathering information about the machines hosting Charmed MySQL. Some of the helpful information it displays include IP addresses, ports, state, etc. The command updates the status of Charmed MySQL K8s every second and as the application starts you can watch the status and messages of Charmed MySQL K8s change. Wait until the application is ready - when it is ready, `juju status` will show:
+>This command is useful for checking the real-time information about the state of a charm and the machines hosting it. Check the [`juju status` documentation](https://juju.is/docs/juju/juju-status) for more information about its usage.
+
+When the application is ready, `juju status` will show the `mysql` app as `active` and the `mysql-k8s/0*` unit as `idle`, like the example below:
 ```shell
 Model     Controller  Cloud/Region        Version  SLA          Timestamp
-tutorial  overlord    microk8s/localhost  2.9.44   unsupported  22:33:45+01:00
+tutorial  overlord    microk8s/localhost  3.5.2   unsupported  22:33:45+01:00
 
 App        Version   Status  Scale  Charm      Channel     Rev  Address         Exposed  Message
 mysql-k8s  8.0.31    active      1  mysql-k8s  8.0/stable  36   10.152.183.234  no       Unit is ready: Mode: RW
@@ -28,18 +38,29 @@ mysql-k8s  8.0.31    active      1  mysql-k8s  8.0/stable  36   10.152.183.234
 Unit          Workload  Agent  Address     Ports  Message
 mysql-k8s/0*  active    idle   10.1.84.74         Unit is ready: Mode: RW
 ```
-To exit the screen with `juju status --watch 1s`, enter `Ctrl+c`.
-If you want to further inspect juju logs, can watch for logs with `juju debug-log`.
-More info on logging at [juju logs](https://juju.is/docs/olm/juju-logs).
+> To exit the screen with `juju status --watch 1s`, enter `Ctrl+C`.
+
+You can also watch juju logs with the [`juju debug-log`](https://juju.is/docs/juju/juju-debug-log) command. More info on logging in the [juju logs documentation](https://juju.is/docs/olm/juju-logs).
 
 ## Access MySQL
-> **!** *Disclaimer: this part of the tutorial accesses MySQL via the `root` user. **Do not** directly interface with the root user in a production environment. In a production environment always create a separate user using [Data Integrator](https://charmhub.io/data-integrator) and connect to MySQL with that user instead. Later in the section covering Relations we will cover how to access MySQL without the root user.*
+[note type="caution"]
+**Warning:** This part of the tutorial accesses MySQL via the `root` user. 
+
+**Do not directly interface with the `root` user in a production environment.**
+
+In a [later section about integrations](/t/9671), we will cover how to safely access MySQL via a separate user.
+[/note]
 
-The first action most users take after installing MySQL is accessing MySQL. The easiest way to do this is via the [MySQL Command-Line Client](https://dev.mysql.com/doc/refman/8.0/en/mysql.html) `mysql`. Connecting to the database requires that you know the values for `host`, `username` and `password`. To retrieve the necessary fields please run Charmed MySQL K8s action `get-password`:
+ The easiest way to access MySQL is via the [MySQL Command-Line Client](https://dev.mysql.com/doc/refman/8.0/en/mysql.html) (`mysql`). For this, we must first retrieve the credentials.
+
+### Retrieve credentials
+Connecting to the database requires that you know the values for `host` (IP address), `username` and `password`. 
+
+To retrieve `username` and `password`, run the [Juju action](https://juju.is/docs/juju/action) `get-password` on the leader unit as follows:
 ```shell
-juju run-action mysql-k8s/leader get-password --wait
+juju run mysql-k8s/leader get-password
 ```
-Running the command should output:
+Example output:
 ```yaml
 unit-mysql-k8s-0:
   UnitId: mysql-k8s/0
@@ -54,12 +75,12 @@ unit-mysql-k8s-0:
     started: 2023-02-15 21:35:55 +0000 UTC
 ```
 
-*Note: to request a password for a different user, use an option `username`:*
+To request a password for a different user, use the option `username`:
 ```shell
-juju run-action mysql-k8s/leader get-password username=myuser --wait
+juju run mysql-k8s/leader get-password username=<username>
 ```
 
-The host’s IP address can be found with `juju status` (the unit hosting the MySQL K8s application):
+To retrieve the host’s IP address, run `juju status`. This should be listed under the "Public address" of the unit hosting the MySQL application:
 ```shell
 ...
 Unit          Workload  Agent  Address     Ports  Message
@@ -67,32 +88,47 @@ mysql-k8s/0*  active    idle   10.1.84.74         Unit is ready: Mode: RW
 ...
 ```
 
-To access the units hosting Charmed MySQL K8s use:
-```shell
-mysql -h 10.1.84.74 -uroot -p<password>
+### Access MySQL via the `mysql` client
+
+To access the unit hosting Charmed MySQL, one could normally use the following command:
+
 ```
-*Note: if at any point you'd like to leave the unit hosting Charmed MySQL, enter `Ctrl+d` or type `exit`*.
+mysql -h <ip_address> -u<username> -p<password>
+```
+
+However, this is not possible with the `root` user. For security reasons, the `root` user is restricted to only allow connections from localhost. 
 
-Inside MySQL list DBs available on the host `show databases`:
+The way to access MySQL server with the `root` user is to first ssh into the primary Juju unit:
 ```shell
-> mysql -h 10.1.84.74 -uroot -psQI3Ojih7uL5UC4J1D9Xuqgx
+juju ssh mysql-k8s/leader
+```
+> In this case, we know the primary unit is the [juju leader unit](https://juju.is/docs/juju/leader), since it is the only existing unit. 
+>
+> In a cluster with more units, **the primary is not necessarily equivalent to the leader**. To identify the primary unit in a cluster, run `juju run mysql/<any_unit> get-cluster-status`. This will display the entire cluster topology.
 
-Server version: 8.0.31-0ubuntu0.22.04.1 (Ubuntu)
-...
+Once inside the Juju virtual machine, the `root` user can access MySQL by calling
+```
+mysql -h 127.0.0.1 -uroot -psQI3Ojih7uL5UC4J1D9Xuqgx
+```
+> Remember, your password will be different to the example above. Make sure to insert it without a space as `-p<password>`
 
-mysql> show databases;
-+-------------------------------+
-| Database                      |
-+-------------------------------+
-| information_schema            |
-| mysql                         |
-| mysql_innodb_cluster_metadata |
-| performance_schema            |
-| sys                           |
-+-------------------------------+
-5 rows in set (0.01 sec)
+You will then see the `mysql>` command prompt, similar to the output below:
+```none
+Welcome to the MySQL monitor.  Commands end with ; or \g.
+Your MySQL connection id is 56
+Server version: 8.0.32-0ubuntu0.22.04.2 (Ubuntu)
+
+Copyright (c) 2000, 2023, Oracle and/or its affiliates.
+
+Oracle is a registered trademark of Oracle Corporation and/or its
+affiliates. Other names may be trademarks of their respective
+owners.
+
+Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
+
+mysql>
 ```
-*Note: if at any point you'd like to leave the MySQL client, enter `Ctrl+d` or type `exit`*.
+> If at any point you'd like to leave the mysql client, enter `Ctrl+D` or type `exit`.
 
 You can now interact with MySQL directly using any [MySQL Queries](https://dev.mysql.com/doc/refman/8.0/en/entering-queries.html). For example entering `SELECT VERSION(), CURRENT_DATE;` should output something like:
 ```shell
@@ -105,4 +141,6 @@ mysql> SELECT VERSION(), CURRENT_DATE;
 1 row in set (0.00 sec)
 ```
 
-Feel free to test out any other MySQL queries. When you’re ready to leave the MySQL shell you can just type `exit`. Now you will be in your original shell where you first started the tutorial; here you can interact with Juju and Microk8s.
\ No newline at end of file
+Feel free to test out any other MySQL queries. When you’re ready to leave the MySQL shell you can just type `exit`. Now you will be in your original shell where you first started the tutorial; here you can interact with Juju and MicroK8s.
+
+> Next step: [3. Scale your replicas](/t/9675)
\ No newline at end of file
diff --git a/docs/tutorial/t-enable-tls.md b/docs/tutorial/t-enable-tls.md
index 63e893b3e..88d0219c9 100644
--- a/docs/tutorial/t-enable-tls.md
+++ b/docs/tutorial/t-enable-tls.md
@@ -1,12 +1,14 @@
+> [Charmed MySQL K8s Tutorial](/t/9677) > 6. Enable TLS encryption
+
 # Enable encryption with TLS
 
 [Transport Layer Security (TLS)](https://en.wikipedia.org/wiki/Transport_Layer_Security) is a protocol used to encrypt data exchanged between two applications. Essentially, it secures data transmitted over a network.
 
-Typically, enabling TLS internally within a highly available database or between a highly available database and client/server applications requires a high level of expertise. This has all been encoded into Charmed MySQL K8s so that configuring TLS requires minimal effort on your end.
+Typically, enabling TLS internally within a highly available database or between a highly available database and client/server applications requires a high level of expertise. This has all been encoded into Charmed MySQL so that configuring TLS requires minimal effort on your end.
 
-TLS is enabled by integrating Charmed MySQL K8s with the [Self Signed Certificates Charm](https://charmhub.io/self-signed-certificates). This charm centralises TLS certificate management consistently and handles operations like providing, requesting, and renewing TLS certificates.
+TLS is enabled by integrating Charmed MySQL with the [Self Signed Certificates Charm](https://charmhub.io/self-signed-certificates). This charm centralises TLS certificate management consistently and handles operations like providing, requesting, and renewing TLS certificates.
 
-In this section, you will learn how to enable security in your MySQL K8s deployment using TLS encryption.
+In this section, you will learn how to enable security in your MySQL deployment using TLS encryption.
 
 [note type="caution"]
 **[Self-signed certificates](https://en.wikipedia.org/wiki/Self-signed_certificate) are not recommended for a production environment.**
@@ -14,18 +16,28 @@ In this section, you will learn how to enable security in your MySQL K8s deploym
 Check [this guide](/t/11664) for an overview of the TLS certificates charms available. 
 [/note]
 
+## Summary
+- [Enable TLS](#enable-tls)
+  - [Deploy TLS charm](#deploy-tls-charm)
+  - [Integrate with MySQL](#integrate-with-mysql)
+    - [Check the TLS certificate in use](#check-the-tls-certificate-in-use)
+- [Disable TLS ](#disable-tls)
+
 ---
 
-### Configure TLS
-Before enabling TLS on Charmed MySQL K8s we must first deploy the `self-signed-certificates` charm:
+## Enable TLS
+To enable TLS on Charmed MySQL K8s, we must deploy the `self-signed-certificates` charm and integrate it with MySQL.
+
+### Deploy TLS charm
+Deploy the `self-signed-certificates` TLS charm with the following command:
 ```shell
 juju deploy self-signed-certificates --config ca-common-name="Tutorial CA"
 ```
 
-Wait until `self-signed-certificates`  is up and active, use `juju status --watch 1s` to monitor the progress:
+Wait until `self-signed-certificates` is up and active, using `juju status --watch 1s` to monitor its progress:
 ```shell
 Model     Controller  Cloud/Region        Version  SLA          Timestamp
-tutorial  overlord    microk8s/localhost  2.9.38   unsupported  23:04:02+01:00
+tutorial  overlord    microk8s/localhost  3.5.2  unsupported  23:04:02+01:00
 
 App                        Version   Status  Scale  Charm                      Channel      Rev  Address         Exposed  Message
 mysql-k8s                  8.0.31    active      2  mysql-k8s                  8.0/stable   36   10.152.183.234  no       
@@ -37,37 +49,40 @@ mysql-k8s/1                   active    idle   10.1.84.127         Unit is ready
 self-signed-certificates/0*   active    idle   10.1.84.71 
 ```
 
-### Add external TLS certificate
-To enable TLS on Charmed MySQL, relate the two applications:
+### Integrate with MySQL
+To enable TLS on Charmed MySQL, integrate the two applications:
 ```shell
-juju relate mysql-k8s self-signed-certificates
+juju integrate mysql-k8s self-signed-certificates
 ```
-#### Check the TLS certificate in use:
-Use `openssl` to connect to the MySQL and check the TLS certificate in use:
+
+MySQL K8s is now using TLS certificate generated by the `self-signed-certificates` charm.
+
+#### Check the TLS certificate in use
+Use `openssl` to connect to MySQL and check the TLS certificate in use:
 ```shell
 > openssl s_client -starttls mysql -connect 10.1.84.74:3306 | grep Issuer
 ...
 depth=1 C = US, CN = Tutorial CA
 ...
 ```
-Congratulations! MySQL is now using TLS certificate generated by the external application `self-signed-certificates`.
 
+## Disable TLS
+To remove the external TLS and return to the locally generate one, remove the integration from the applications:
 
-### Remove external TLS certificate
-To remove the external TLS and return to the locally generated one, remove the relation:
 ```shell
 juju remove-relation mysql-k8s self-signed-certificates
 ```
 
-#### Check the TLS certificate in use:
+If you once again check the TLS certificates in use via the OpenSSL client, you will see something similar to the output below:
 ```shell
 > openssl s_client -starttls mysql -connect 10.1.84.74:3306 | grep Issuer
 ```
 
-The output should be similar to:
 ```shell
 ...
 Issuer: CN = MySQL_Server_8.0.31_Auto_Generated_CA_Certificate
 ...
 ```
-The Charmed MySQL K8s application reverted to the certificate that was created locally during the MySQL server installation.
\ No newline at end of file
+The Charmed MySQL K8s application reverted to the certificate that was created locally during the MySQL server installation.
+
+>Next step: [7. Clean up your environment](/t/9665)
\ No newline at end of file
diff --git a/docs/tutorial/t-integrate.md b/docs/tutorial/t-integrate.md
index f03ee86b6..ec64a485a 100644
--- a/docs/tutorial/t-integrate.md
+++ b/docs/tutorial/t-integrate.md
@@ -1,26 +1,43 @@
-# Integrating your Charmed MySQL
+> [Charmed MySQL K8s Tutorial](/t/9677) > 5. Integrate with other applications
 
-This is part of the [Charmed MySQL Tutorial](/t/charmed-mysql-k8s-tutorial-overview/9677). Please refer to this page for more information and the overview of the content.
+# Integrate with other applications
 
-## Integrations (Relations for Juju 2.9)
-Relations, or what Juju 3.0+ documentation [describes as an Integration](https://juju.is/docs/sdk/integration), are the easiest way to create a user for MySQL in Charmed MySQL K8s. Relations automatically create a username, password, and database for the desired user/application. As mentioned earlier in the [Access MySQL section](#access-mysql) it is a better practice to connect to MySQL via a specific user rather than the admin user.
+[Integrations](https://juju.is/docs/sdk/integration), known as "relations" in Juju 2.9, are the easiest way to create a user for a Charmed MySQL application. 
 
-### Data Integrator Charm
-Before relating to a charmed application, we must first deploy our charmed application. In this tutorial we will relate to the [Data Integrator Charm](https://charmhub.io/data-integrator). This is a bare-bones charm that allows for central management of database users, providing support for different kinds of data platforms (e.g. MySQL, PostgreSQL, MongoDB, Kafka, etc) with a consistent, opinionated and robust user experience. In order to deploy the Data Integrator Charm we can use the command `juju deploy` we have learned above:
+Integrations automatically create a username, password, and database for the desired user/application. As mentioned in the [earlier section about accessing MySQL](/t/9912#access-mysql-via-the-mysql-client), it is better practice to connect to MySQL via a specific user instead of the `root` user.
+
+In this section, you will learn how to integrate your Charmed MySQL with another application (charmed or not) via the Data Integrator charm. 
+
+## Summary
+* [Deploy `data-integrator`](#deploy-data-integrator)
+* [Integrate with MySQL](#integrate-with-mysql)
+* [Access the integrated database](#access-the-integrated-database)
+* [Remove the user](#remove-the-user)
+
+---
+
+## Deploy `data-integrator`
+
+In this tutorial, we will relate to the [Data Integrator charm](https://charmhub.io/data-integrator). This is a bare-bones charm that allows for central management of database users. It automatically provides credentials and endpoints that are needed to connect with a charmed database application.
+
+ In order to deploy the Data Integrator charm we can use the command `juju deploy` we have learned above:
+
+To deploy `data-integrator`, run
 
 ```shell
-juju deploy data-integrator --channel edge --config database-name=test-database
+juju deploy data-integrator --config database-name=test-database
 ```
-The expected output:
+
+Example output:
 ```shell
-Located charm "data-integrator" in charm-hub, revision 4
-Deploying "data-integrator" from charm-hub charm "data-integrator", revision 4 in channel edge on jammy
+Located charm "data-integrator" in charm-hub, revision 13
+Deploying "data-integrator" from charm-hub charm "data-integrator", revision 3 in channel edge on jammy
 ```
 
-Checking the deployment progress using `juju status` will show you the `blocked` state for newly deployed charm:
+Running `juju status` will show you `data-integrator` in a `blocked` state. This state is expected due to not-yet established relation (integration) between applications.
 ```shell
 Model     Controller  Cloud/Region        Version  SLA          Timestamp
-tutorial  overlord    microk8s/localhost  2.9.38   unsupported  22:54:31+01:00
+tutorial  overlord    microk8s/localhost  3.5.2   unsupported  22:54:31+01:00
 
 App              Version    Status   Scale  Charm            Channel     Rev  Address         Exposed  Message
 data-integrator             waiting      1  data-integrator  edge        4    10.152.183.180  no       installing agent
@@ -33,15 +50,19 @@ mysql-k8s/1         active    idle   10.1.84.127         Unit is ready: Mode: RO
 ```
 The `blocked` state is expected due to not-yet established relation (integration) between applications.
 
-### Relate to MySQL
-Now that the Database Integrator Charm has been set up, we can relate it to MySQL. This will automatically create a username, password, and database for the Database Integrator Charm. Relate the two applications with:
+## Integrate with MySQL
+
+Now that the `data-integrator` charm has been set up, we can relate it to MySQL. This will automatically create a username, password, and database for `data-integrator`.
+
+Relate the two applications with:
 ```shell
 juju relate data-integrator mysql-k8s
 ```
+
 Wait for `juju status --watch 1s` to show all applications/units as `active`:
 ```shell
 Model     Controller  Cloud/Region        Version  SLA          Timestamp
-tutorial  overlord    microk8s/localhost  2.9.38   unsupported  22:55:44+01:00
+tutorial  overlord    microk8s/localhost  3.5.2   unsupported  22:55:44+01:00
 
 App              Version    Status   Scale  Charm            Channel     Rev  Address         Exposed  Message
 data-integrator             waiting      1  data-integrator  edge        4    10.152.183.180  no       installing agent
@@ -53,11 +74,12 @@ mysql-k8s/0*        active    idle   10.1.84.74          Unit is ready: Mode: RW
 mysql-k8s/1         active    idle   10.1.84.127         Unit is ready: Mode: RO
 ```
 
-To retrieve information such as the username, password, and database. Enter:
+To retrieve the username, password and database name, run the command
 ```shell
-juju run-action data-integrator/leader get-credentials --wait
+juju run data-integrator/leader get-credentials
 ```
-This should output something like:
+
+Example output:
 ```yaml
 unit-data-integrator-0:
   UnitId: data-integrator/0
@@ -76,10 +98,11 @@ unit-data-integrator-0:
     enqueued: 2023-02-15 21:56:17 +0000 UTC
     started: 2023-02-15 21:56:21 +0000 UTC
 ```
-*Note: your hostnames, usernames, and passwords will likely be different.*
+> Note that your hostnames, usernames, and passwords will be different.
+
+## Access the integrated database
 
-### Access the related database
-Use `endpoints`, `username`, `password` from above to connect newly created database `test-database` on MySQL K8s server:
+Use `endpoints`, `username`, `password` from above to connect newly created database `test-database` on MySQL server:
 ```shell
 > mysql -h 10.1.84.74 -u relation-3 -p7VRfmGjfUI1pVUPsfbMwmHFm -e "show databases;"
 +--------------------+
@@ -104,34 +127,39 @@ The newly created database `test-database` is also available on all other MySQL
 +--------------------+
 ```
 
-When you relate two applications Charmed MySQL K8s automatically sets up a new user and database for you.
-Please note the database name we specified when we first deployed the `data-integrator` charm: `--config database-name=test-database`.
+When you integratetwo applications, Charmed MySQL K8s automatically sets up a new user and database for you. Note the database name we specified when we first deployed the `data-integrator` charm: `--config database-name=test-database`.
 
-### Remove the user
-To remove the user, remove the relation. Removing the relation automatically removes the user that was created when the relation was created. Enter the following to remove the relation:
+## Remove the user
+To remove the user, remove the integration. Removing the integration automatically removes the user that was created when the integration was created. 
+
+To remove the integration, run the following command:
 ```shell
 juju remove-relation mysql-k8s data-integrator
 ```
 
-Now try again to connect to the same MySQL K8s you just used in [Access the related database](#access-the-related-database):
+Try to connect to the same MySQL you just used in the previous section ([Access the related database](#access-the-related-database)):
 ```shell
 mysql -h 10.1.84.74 -u relation-3 -p7VRfmGjfUI1pVUPsfbMwmHFm -e "show databases;"
 ```
 
-This will output an error message:
+This will output an error message, since the user no longer exists.
 ```shell
 ERROR 1045 (28000): Access denied for user 'relation-3'@'10.76.203.127' (using password: YES)
-```
-As this user no longer exists. This is expected as `juju remove-relation mysql-k8s data-integrator` also removes the user.
-Note: data stay remain on the server at this stage!
+``` 
+This is expected, as `juju remove-relation mysql-k8s data-integrator` also removes the user.
 
-Relate the the two applications again if you wanted to recreate the user:
+> **Note**: Data remains on the server at this stage.
+
+To create a user again, re-integrate the applications:
 ```shell
-juju relate data-integrator mysql-k8s
+juju integrate data-integrator mysql-k8s
 ```
-Re-relating generates a new user and password:
+
+Re-integrating generates a new user and password. Obtain these credentials as before, with the `get-credentials` action:
 ```shell
-juju run-action data-integrator/leader get-credentials --wait
+juju run data-integrator/leader get-credentials
 ```
-You can connect to the database with this new credentials.
-From here you will see all of your data is still present in the database.
\ No newline at end of file
+
+You can connect to the database with this new credentials. From here you will see all of your data is still present in the database.
+
+> Next step: [6. Enable TLS encryption](/t/9669)
\ No newline at end of file
diff --git a/docs/tutorial/t-manage-passwords.md b/docs/tutorial/t-manage-passwords.md
index fcd5dbc44..383de3673 100644
--- a/docs/tutorial/t-manage-passwords.md
+++ b/docs/tutorial/t-manage-passwords.md
@@ -1,16 +1,24 @@
-# Manage Passwords
+> [Charmed MySQL K8s Tutorial](/t/9677) > 4. Manage passwords
 
-This is part of the [Charmed MySQL Tutorial](/t/charmed-mysql-k8s-tutorial-overview/9677). Please refer to this page for more information and the overview of the content.
+# Manage passwords
 
-## Passwords
-When we accessed MySQL earlier in this tutorial, we needed to use a password manually. Passwords help to secure our database and are essential for security. Over time it is a good practice to change the password frequently. Here we will go through setting and changing the password for the admin user.
+When we accessed MySQL earlier in this tutorial, we needed to use a password manually. Passwords help to secure our database and are essential for security. Over time, it is a good practice to change the password frequently. 
 
-### Retrieve the root password
-As previously mentioned, the root password can be retrieved by running the `get-password` action on the Charmed MySQL K8s application:
+This section will go through setting and changing the password for the admin user.
+
+## Summary
+* [Retrieve the root password](#retrieve-the-root-password)
+* [Rotate the root password](#rotate-the-root-password)
+* [Set the root password](#set-the-root-password)
+
+---
+
+## Retrieve the root password
+The root user's password can be retrieved by running the `get-password` action on the Charmed MySQL K8s application:
 ```shell
-juju run-action mysql-k8s/leader get-password --wait
+juju run mysql-k8s/leader get-password
 ```
-Running the command should output:
+Example output:
 ```yaml
 unit-mysql-k8s-0:
   UnitId: mysql-k8s/0
@@ -25,12 +33,12 @@ unit-mysql-k8s-0:
     started: 2023-02-15 21:51:04 +0000 UTC
 ```
 
-### Rotate the root password
-You can change the root password to a new random password by entering:
+## Rotate the root password
+You can change the root user's password to a new random password by running:
 ```shell
-juju run-action mysql-k8s/leader set-password --wait
+juju run mysql-k8s/leader set-password
 ```
-Running the command should output:
+Example output:
 ```yaml
 unit-mysql-k8s-0:
   UnitId: mysql-k8s/0
@@ -42,34 +50,19 @@ unit-mysql-k8s-0:
     enqueued: 2023-02-15 21:51:34 +0000 UTC
     started: 2023-02-15 21:51:37 +0000 UTC
 ```
-Please notice the `status: completed` above which means the password has been successfully updated. To be sure, please call `get-password` once again:
-```shell
-juju run-action mysql-k8s/leader get-password --wait
-```
-Running the command should output:
-```yaml
-unit-mysql-k8s-0:
-  UnitId: mysql-k8s/0
-  id: "12"
-  results:
-    password: sN6bcP1j7xRfhw4ZDblcIYK1
-    username: root
-  status: completed
-  timing:
-    completed: 2023-02-15 21:52:13 +0000 UTC
-    enqueued: 2023-02-15 21:52:11 +0000 UTC
-    started: 2023-02-15 21:52:12 +0000 UTC
 
-```
-The root password should be different from the previous password.
+The `status: completed` above means the password has been successfully updated. To be sure, call `get-password` once again to check that the root password is different from the previous password.
 
-### Set the root password
-You can change the root password to a specific password by entering:
+## Set the root password
+You can change the root password to a specific password by running `set-password`:
+```shell
+juju run mysql-k8s/leader set-password password=my-password
+```
+Confirm with `get-password`:
 ```shell
-juju run-action mysql-k8s/leader set-password password=my-password --wait && \
-juju run-action mysql-k8s/leader get-password --wait
+juju run mysql-k8s/leader get-password
 ```
-Running the command should output:
+Example output:
 ```yaml
 unit-mysql-k8s-0:
   UnitId: mysql-k8s/0
@@ -92,4 +85,6 @@ unit-mysql-k8s-0:
     enqueued: 2023-02-15 21:52:47 +0000 UTC
     started: 2023-02-15 21:52:47 +0000 UTC
 ```
-The root `password` should match whatever you passed in when you entered the command.
\ No newline at end of file
+The root `password` should match whatever you passed in when you entered the command.
+
+> Next step: [5. Integrate with another application](/t/9671)
\ No newline at end of file
diff --git a/docs/tutorial/t-overview.md b/docs/tutorial/t-overview.md
index 00e5209de..9888deb26 100644
--- a/docs/tutorial/t-overview.md
+++ b/docs/tutorial/t-overview.md
@@ -1,26 +1,32 @@
-# Charmed MySQL K8s tutorial
+# Charmed MySQL K8s Tutorial
 
-The Charmed MySQL K8s Operator delivers automated operations management from [day 0 to day 2](https://codilime.com/blog/day-0-day-1-day-2-the-software-lifecycle-in-the-cloud-age/) on the [MySQL Community Edition](https://www.mysql.com/products/community/) relational database. It is an open source, end-to-end, production-ready data platform [on top of Juju](https://juju.is/). As a first step this tutorial shows you how to get Charmed MySQL K8s up and running, but the tutorial does not stop there. Through this tutorial you will learn a variety of operations, everything from adding replicas to advanced operations such as enabling Transport Layer Security (TLS). In this tutorial we will walk through how to:
-- Set up an environment using [Multipass](https://multipass.run/) with [Microk8s](https://microk8s.io/) and [Juju](https://juju.is/).
-- Deploy MySQL using a single command.
-- Access the admin database directly.
-- Add high availability with MySQL InnoDB Cluster, Group Replication.
-- Request and change the admin password.
-- Automatically create MySQL users via Juju relations.
-- Reconfigure TLS certificate in one command.
+This section of our documentation contains comprehensive, hands-on tutorials to help you learn how to deploy Charmed MySQL on Kubernetes and become familiar with its available operations.
 
-While this tutorial intends to guide and teach you as you deploy Charmed MySQL K8s, it will be most beneficial if you already have a familiarity with:
-- Basic terminal commands.
-- MySQL concepts such as replication and users.
+## Prerequisites
 
-## Step-by-step guide
+While this tutorial intends to guide you as you deploy Charmed MySQL K8s for the first time, it will be most beneficial if:
+- You have some experience using a Linux-based CLI
+- You are familiar with MySQL concepts such as replication and users.
+- Your computer fulfils the [minimum system requirements](/t/11421)
 
-Here’s an overview of the steps required with links to our separate tutorials that deal with each individual step:
-* [Set up the environment](/t/9679)
-* [Deploy MySQL](/t/9667)
-* [Managing your units](/t/9675)
-* [Manage passwords](/t/9673)
-* [Relate your MySQL to other applications](/t/9671)
-* [Enable security](/t/9669)
-* [Upgrade charm](/t/11754)
-* [Cleanup your environment](/t/9665)
\ No newline at end of file
+## Tutorial contents
+
+| Step | Details |
+| ------- | ---------- |
+| 1. **[Set up your environment]** | Set up a cloud environment for your deployment using [Multipass](https://multipass.run/) with [Microk8s](https://microk8s.io/) and [Juju](https://juju.is/).
+| 2. **[Deploy MySQL]** | Learn to deploy MySQL using a single command and access the database directly.
+| 3. **[Scale your replicas]** | Learn how to enable high availability with [MySQL InnoDB Cluster](https://dev.mysql.com/doc/refman/8.0/en/mysql-innodb-cluster-introduction.html)
+| 4. **[Manage passwords]** | Learn how to request and change passwords.
+| 5. **[Integrate MySQL with other applications]** | Learn how to integrate with other applications using the Data Integrator Charm, access the integrated database, and manage users.
+| 6. **[Enable TLS encryption]** | Learn how to enable TLS encryption on your MySQL cluster
+| 7. **[Clean up your environment]** | Free up your machine's resources.
+
+<!-- LINKS -->
+[Set up your environment]: /t/9679
+[Deploy MySQL]: /t/9667
+[Scale your replicas]: /t/9675
+[Manage passwords]: /t/9673
+[Integrate MySQL with other applications]: /t/9671
+[Enable TLS encryption]: /t/9669
+[Upgrade charm]: /t/11754
+[Clean up your environment]: /t/9665
\ No newline at end of file
diff --git a/docs/tutorial/t-scale.md b/docs/tutorial/t-scale.md
index edb0961f2..b10663fe4 100644
--- a/docs/tutorial/t-scale.md
+++ b/docs/tutorial/t-scale.md
@@ -1,23 +1,41 @@
-# Scale your Charmed MySQL
+> [Charmed MySQL K8s Tutorial](/t/9677) > 3. Scale your replicas
 
-This is part of the [Charmed MySQL Tutorial](/t/charmed-mysql-k8s-tutorial-overview/9677). Please refer to this page for more information and the overview of the content.
+# Scale your replicas
 
-## Adding and Removing units
+In this section, you will learn to scale your Charmed MySQL K8s by adding or removing juju units.
 
-Charmed MySQL K8s operator uses [MySQL InnoDB Cluster](https://dev.mysql.com/doc/refman/8.0/en/mysql-innodb-cluster-introduction.html) for scaling. Being built on MySQL [Group Replication](https://dev.mysql.com/doc/refman/8.0/en/group-replication.html), provides features such as automatic membership management, fault tolerance, automatic failover, and so on. An InnoDB Cluster usually runs in a single-primary mode, with one primary instance (read-write) and multiple secondary instances (read-only). The future versions on Charmed MySQL K8s will take advantage of a multi-primary mode, where multiple instances are primaries. Users can even change the topology of the cluster while InnoDB Cluster is online, to ensure the highest possible availability.
+The Charmed MySQL K8s operator uses [MySQL InnoDB Cluster](https://dev.mysql.com/doc/refman/8.0/en/mysql-innodb-cluster-introduction.html) for scaling. It is built on MySQL [Group Replication](https://dev.mysql.com/doc/refman/8.0/en/group-replication.html), providing features such as automatic membership management, fault tolerance, and automatic failover. 
 
-> **!** *Disclaimer: this tutorial hosts replicas all on the same machine, this should not be done in a production environment. To enable high availability in a production environment, replicas should be hosted on different servers to [maintain isolation](https://canonical.com/blog/database-high-availability).*
+An InnoDB Cluster usually runs in a single-primary mode, with one primary instance (read-write) and multiple secondary instances (read-only). 
 
-### Add cluster members (replicas)
-You can add two replicas to your deployed MySQL application by scaling it to three units using:
+<!-- TODO: clarify "future" Future versions on Charmed MySQL will take advantage of a multi-primary mode, where multiple instances are primaries. Users can even change the topology of the cluster while InnoDB Cluster is online, to ensure the highest possible availability. -->
+
+[note type="caution"]
+**Disclaimer:** This tutorial hosts replicas all on the same machine. **This should not be done in a production environment.** 
+
+To enable high availability in a production environment, replicas should be hosted on different servers to [maintain isolation](https://canonical.com/blog/database-high-availability).
+[/note]
+
+## Summary
+* [Add replicas](#add-replicas)
+* [Remove replicas](#remove-replicas)
+
+---
+
+Currently, your deployment has only one [juju unit](https://juju.is/docs/juju/unit), known in juju as the leader unit.  For each MySQL replica, a new juju unit (non-leader) is created. All units are members of the same database cluster.
+
+## Add replicas
+You can add two replicas to your deployed MySQL K8s application by scaling it to a total of three units with `juju scale-application`:
 ```shell
 juju scale-application mysql-k8s 3
 ```
 
-You can now watch the scaling process in live using: `juju status --watch 1s`. It usually takes several minutes for new cluster members to be added. You’ll know that all three nodes are in sync when `juju status` reports `Workload=active` and `Agent=idle`:
+You can now watch the scaling process in live using: `juju status --watch 1s`. It usually takes several minutes for new cluster members to be added. 
+
+You’ll know that all three nodes are in sync when `juju status` reports `Workload=active` and `Agent=idle`:
 ```
 Model     Controller  Cloud/Region        Version  SLA          Timestamp
-tutorial  overlord    microk8s/localhost  2.9.38   unsupported  22:48:57+01:00
+tutorial  overlord    microk8s/localhost  3.5.2   unsupported  22:48:57+01:00
 
 App        Version    Status  Scale  Charm      Channel     Rev  Address         Exposed  Message
 mysql-k8s  8.0.31     active      3  mysql-k8s  8.0/stable  36   10.152.183.234  no       
@@ -28,8 +46,16 @@ mysql-k8s/1   active    idle   10.1.84.127         Unit is ready: Mode: RO
 mysql-k8s/2   active    idle   10.1.84.73          Unit is ready: Mode: RO
 ```
 
-### Remove cluster members (replicas)
-Removing a unit from the application, scales the replicas down. Before we scale down the replicas, list all the units with `juju status`, here you will see three units `mysql-k8s/0`, `mysql-k8s/1`, and `mysql-k8s/2`. Each of these units hosts a MySQL replica. To scale the application down to two units, enter:
+[note]
+The maximum number of Charmed MySQL units in a single Juju application is 9. This is a limitation of MySQL Group replication. Read more about all limitations in the [official MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/group-replication-limitations.html).
+[/note]
+
+## Remove replicas
+Removing a unit from the application scales down the replicas. 
+
+Before we scale down, list all the units with `juju status`. You will see three units: `mysql-k8s/0`, `mysql-k8s/1`, and `mysql-k8s/2`. Each of these units hosts a MySQL K8s replica. 
+
+To scale the application down to two units, run:
 ```shell
 juju scale-application mysql-k8s 2
 ```
@@ -37,7 +63,7 @@ juju scale-application mysql-k8s 2
 You’ll know that the replica was successfully removed when `juju status --watch 1s` reports:
 ```
 Model     Controller  Cloud/Region        Version  SLA          Timestamp
-tutorial  overlord    microk8s/localhost  2.9.38   unsupported  22:48:57+01:00
+tutorial  overlord    microk8s/localhost  3.5.2   unsupported  22:48:57+01:00
 
 App        Version    Status  Scale  Charm      Channel     Rev  Address         Exposed  Message
 mysql-k8s  8.0.31     active      3  mysql-k8s  8.0/stable  36   10.152.183.234  no       
@@ -47,5 +73,4 @@ mysql-k8s/0*  active    idle   10.1.84.74          Unit is ready: Mode: RW
 mysql-k8s/1   active    idle   10.1.84.127         Unit is ready: Mode: RO
 ```
 
-### Scaling limitations
-**Note**: the maximum number of Charmed MySQL K8s units in a single Juju application is 9. It is a limitation of MySQL Group replication, read more about all limitations [here](https://dev.mysql.com/doc/refman/8.0/en/group-replication-limitations.html).
\ No newline at end of file
+> Next step: [4. Manage passwords](/t/9673)
\ No newline at end of file
diff --git a/docs/tutorial/t-set-up.md b/docs/tutorial/t-set-up.md
index 0caee3444..f8c33b438 100644
--- a/docs/tutorial/t-set-up.md
+++ b/docs/tutorial/t-set-up.md
@@ -1,39 +1,63 @@
-# Environment Setup
+> [Charmed MySQL K8s Tutorial](/t/9677) > 1. Set up your environment
 
-This is part of the [Charmed MySQL K8s Tutorial](/t/9677). Please refer to this page for more information and the overview of the content.
+# Set up your environment
 
-## Minimum requirements
-Before we start, make sure your machine meets [the following requirements](/t/11421).
+In this first step, you will set up a development environment with the required components for deploying Charmed MySQL K8s.
 
-## Multipass environment
-[Multipass](https://multipass.run/) is a quick and easy way to launch virtual machines running Ubuntu. It uses "[cloud-init](https://cloud-init.io/)" standard to install and configure all the necessary parts automatically.
+[note]
+Before you start, make sure your machine meets the [minimum system requirements](/t/11421).
+[/note]
 
-Let's install Multipass from [Snap](https://snapcraft.io/multipass) and launch a new VM using "[charm-dev](https://github.com/canonical/multipass-blueprints/blob/main/v1/charm-dev.yaml)" cloud-init config:
+## Summary
+* [Set up Multipass](#set-up-multipass)
+* [Set up Juju](#set-up-juju)
+
+---
+
+## Set up Multipass
+[Multipass](https://multipass.run/) is a quick and easy way to launch virtual machines running Ubuntu. It uses the [cloud-init](https://cloud-init.io/) standard to install and configure all the necessary parts automatically.
+
+Install Multipass from the [snap store](https://snapcraft.io/multipass):
 ```shell
-sudo snap install multipass && \
-multipass launch --cpus 4 --memory 8G --disk 30G --name my-vm charm-dev
+sudo snap install multipass
 ```
-*Note: all 'multipass launch' params are [described here](https://multipass.run/docs/launch-command)*.
 
-Multipass [list of commands](https://multipass.run/docs/multipass-cli-commands) is short and self-explanatory, e.g. show all running VMs:
+Launch a new VM using the [`charm-dev`](https://github.com/canonical/multipass-blueprints/blob/main/v1/charm-dev.yaml) cloud-init config:
 ```shell
-multipass list
+multipass launch --cpus 4 --memory 8G --disk 30G --name my-vm charm-dev
 ```
 
-As soon as new VM started, enter inside using:
+> All `multipass launch` params are described in the [Multipass documentation](https://multipass.run/docs/launch-command).
+
+The list of [Multipass commands](https://multipass.run/docs/multipass-cli-commands) is short and self-explanatory. For example, to show all running VMs, just run `multipass list`.
+
+As soon as new VM has started, access it with the following command:
 ```shell
 multipass shell my-vm
 ```
-*Note: if at any point you'd like to leave Multipass VM, enter `Ctrl+d` or type `exit`*.
+> If at any point you'd like to leave Multipass VM, enter `Ctrl+D` or type `exit`.
+
+All necessary components have been pre-installed inside VM already, like MicroK8s and Juju. The files `/var/log/cloud-init.log` and `/var/log/cloud-init-output.log` contain all low-level installation details. 
+
+## Set up Juju
 
-All the parts have been pre-installed inside VM already, like MicroK8s and Juju (the files '/var/log/cloud-init.log' and '/var/log/cloud-init-output.log' contain all low-level installation details). The Juju controller can work with different models; models host applications such as Charmed MySQL K8s. Set up a specific model for Charmed MySQL K8s named ‘tutorial’:
+Let's bootstrap Juju to use the local MicroK8s controller. We will call it "overlord", but you can give it any name you'd like.
 ```shell
-juju add-model tutorial
+juju bootstrap microk8s overlord
 ```
 
-You can now view the model you created above by entering the command `juju status` into the command line. You should see the following:
+The controller can work with different [Juju models](https://juju.is/docs/juju/model). Set up a specific model for Charmed MySQL named ‘tutorial’:
 ```shell
-Model     Controller  Cloud/Region        Version  SLA          Timestamp
-tutorial  overlord    microk8s/localhost  2.9.38   unsupported  22:30:11+01:00
+juju add-model tutorial
+```
+
+You can now view the model you created above by entering the command `juju status` into the command line. You should see something similar to the following output:
+
+```none
+Model    Controller  Cloud/Region         Version  SLA          Timestamp
+tutorial overlord    microk8s/localhost   3.5.2    unsupported  23:20:53+01:00
+
 Model "admin/tutorial" is empty.
-```
\ No newline at end of file
+```
+
+>Next step: [2. Deploy MySQL](/t/9667)
\ No newline at end of file
diff --git a/lib/charms/data_platform_libs/v0/data_interfaces.py b/lib/charms/data_platform_libs/v0/data_interfaces.py
index aaed2e528..971711903 100644
--- a/lib/charms/data_platform_libs/v0/data_interfaces.py
+++ b/lib/charms/data_platform_libs/v0/data_interfaces.py
@@ -331,7 +331,7 @@ def _on_topic_requested(self, event: TopicRequestedEvent):
 
 # Increment this PATCH version before using `charmcraft publish-lib` or reset
 # to 0 if you are raising the major API version
-LIBPATCH = 39
+LIBPATCH = 41
 
 PYDEPS = ["ops>=2.0.0"]
 
@@ -391,6 +391,10 @@ class IllegalOperationError(DataInterfacesError):
     """To be used when an operation is not allowed to be performed."""
 
 
+class PrematureDataAccessError(DataInterfacesError):
+    """To be raised when the Relation Data may be accessed (written) before protocol init complete."""
+
+
 ##############################################################################
 # Global helpers / utilities
 ##############################################################################
@@ -605,7 +609,7 @@ def get_group(self, group: str) -> Optional[SecretGroup]:
 class CachedSecret:
     """Locally cache a secret.
 
-    The data structure is precisely re-using/simulating as in the actual Secret Storage
+    The data structure is precisely reusing/simulating as in the actual Secret Storage
     """
 
     KNOWN_MODEL_ERRORS = [MODEL_ERRORS["no_label_and_uri"], MODEL_ERRORS["owner_no_refresh"]]
@@ -1453,6 +1457,8 @@ def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
 class ProviderData(Data):
     """Base provides-side of the data products relation."""
 
+    RESOURCE_FIELD = "database"
+
     def __init__(
         self,
         model: Model,
@@ -1618,6 +1624,15 @@ def _fetch_my_specific_relation_data(
     def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None:
         """Set values for fields not caring whether it's a secret or not."""
         req_secret_fields = []
+
+        keys = set(data.keys())
+        if self.fetch_relation_field(relation.id, self.RESOURCE_FIELD) is None and (
+            keys - {"endpoints", "read-only-endpoints", "replset"}
+        ):
+            raise PrematureDataAccessError(
+                "Premature access to relation data, update is forbidden before the connection is initialized."
+            )
+
         if relation.app:
             req_secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS)
 
@@ -2348,7 +2363,6 @@ def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> Non
     def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None:
         """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app."""
         if self.secret_fields and self.deleted_label:
-
             _, normal_fields = self._process_secret_fields(
                 relation,
                 self.secret_fields,
@@ -3290,6 +3304,8 @@ class KafkaRequiresEvents(CharmEvents):
 class KafkaProviderData(ProviderData):
     """Provider-side of the Kafka relation."""
 
+    RESOURCE_FIELD = "topic"
+
     def __init__(self, model: Model, relation_name: str) -> None:
         super().__init__(model, relation_name)
 
@@ -3539,6 +3555,8 @@ class OpenSearchRequiresEvents(CharmEvents):
 class OpenSearchProvidesData(ProviderData):
     """Provider-side of the OpenSearch relation."""
 
+    RESOURCE_FIELD = "index"
+
     def __init__(self, model: Model, relation_name: str) -> None:
         super().__init__(model, relation_name)
 
diff --git a/lib/charms/data_platform_libs/v0/data_models.py b/lib/charms/data_platform_libs/v0/data_models.py
index a1dbb8299..087f6f3c5 100644
--- a/lib/charms/data_platform_libs/v0/data_models.py
+++ b/lib/charms/data_platform_libs/v0/data_models.py
@@ -168,7 +168,7 @@ class MergedDataBag(ProviderDataBag, RequirerDataBag):
 
 # Increment this PATCH version before using `charmcraft publish-lib` or reset
 # to 0 if you are raising the major API version
-LIBPATCH = 4
+LIBPATCH = 5
 
 PYDEPS = ["ops>=2.0.0", "pydantic>=1.10,<2"]
 
@@ -209,7 +209,7 @@ def validate_params(cls: Type[T]):
     """
 
     def decorator(
-        f: Callable[[CharmBase, ActionEvent, Union[T, ValidationError]], G]
+        f: Callable[[CharmBase, ActionEvent, Union[T, ValidationError]], G],
     ) -> Callable[[CharmBase, ActionEvent], G]:
         @wraps(f)
         def event_wrapper(self: CharmBase, event: ActionEvent):
@@ -287,7 +287,7 @@ def decorator(
                 Optional[Union[UnitModel, ValidationError]],
             ],
             G,
-        ]
+        ],
     ) -> Callable[[CharmBase, RelationEvent], G]:
         @wraps(f)
         def event_wrapper(self: CharmBase, event: RelationEvent):
diff --git a/lib/charms/data_platform_libs/v0/data_secrets.py b/lib/charms/data_platform_libs/v0/data_secrets.py
index 5147b0f8b..a2c098cb5 100644
--- a/lib/charms/data_platform_libs/v0/data_secrets.py
+++ b/lib/charms/data_platform_libs/v0/data_secrets.py
@@ -17,7 +17,7 @@
 
 # Increment this PATCH version before using `charmcraft publish-lib` or reset
 # to 0 if you are raising the major API version
-LIBPATCH = 2
+LIBPATCH = 3
 
 
 APP_SCOPE = "app"
@@ -50,7 +50,7 @@ def generate_secret_label(charm: CharmBase, scope: Scopes) -> str:
 class CachedSecret:
     """Abstraction layer above direct Juju access with caching.
 
-    The data structure is precisely re-using/simulating Juju Secrets behavior, while
+    The data structure is precisely reusing/simulating Juju Secrets behavior, while
     also making sure not to fetch a secret multiple times within the same event scope.
     """
 
diff --git a/lib/charms/grafana_k8s/v0/grafana_dashboard.py b/lib/charms/grafana_k8s/v0/grafana_dashboard.py
index dfc32ddcb..d618c7991 100644
--- a/lib/charms/grafana_k8s/v0/grafana_dashboard.py
+++ b/lib/charms/grafana_k8s/v0/grafana_dashboard.py
@@ -157,7 +157,7 @@ def __init__(self, *args):
         self._on_dashboards_changed,
     )
 
-Dashboards can be retrieved the :meth:`dashboards`:
+Dashboards can be retrieved via the `dashboards` method:
 
 It will be returned in the format of:
 
@@ -175,7 +175,6 @@ def __init__(self, *args):
 The consuming charm should decompress the dashboard.
 """
 
-import base64
 import hashlib
 import json
 import logging
@@ -187,7 +186,7 @@ def __init__(self, *args):
 import tempfile
 import uuid
 from pathlib import Path
-from typing import Any, Dict, List, Optional, Tuple, Union
+from typing import Any, Callable, Dict, List, Optional, Tuple
 
 import yaml
 from ops.charm import (
@@ -209,6 +208,7 @@ def __init__(self, *args):
     StoredState,
 )
 from ops.model import Relation
+from cosl import LZMABase64, DashboardPath40UID
 
 # The unique Charmhub library identifier, never change it
 LIBID = "c49eb9c7dfef40c7b6235ebd67010a3f"
@@ -219,7 +219,9 @@ def __init__(self, *args):
 # Increment this PATCH version before using `charmcraft publish-lib` or reset
 # to 0 if you are raising the major API version
 
-LIBPATCH = 36
+LIBPATCH = 42
+
+PYDEPS = ["cosl >= 0.0.50"]
 
 logger = logging.getLogger(__name__)
 
@@ -415,8 +417,7 @@ def __init__(
         self.expected_relation_interface = expected_relation_interface
         self.actual_relation_interface = actual_relation_interface
         self.message = (
-            "The '{}' relation has '{}' as "
-            "interface rather than the expected '{}'".format(
+            "The '{}' relation has '{}' as " "interface rather than the expected '{}'".format(
                 relation_name, actual_relation_interface, expected_relation_interface
             )
         )
@@ -544,357 +545,486 @@ def _validate_relation_by_interface_and_direction(
         raise Exception("Unexpected RelationDirection: {}".format(expected_relation_role))
 
 
-def _encode_dashboard_content(content: Union[str, bytes]) -> str:
-    if isinstance(content, str):
-        content = bytes(content, "utf-8")
+class CharmedDashboard:
+    """A helper class for handling dashboards on the requirer (Grafana) side."""
 
-    return base64.b64encode(lzma.compress(content)).decode("utf-8")
+    @classmethod
+    def _convert_dashboard_fields(cls, content: str, inject_dropdowns: bool = True) -> str:
+        """Make sure values are present for Juju topology.
 
+        Inserts Juju topology variables and selectors into the template, as well as
+        a variable for Prometheus.
+        """
+        dict_content = json.loads(content)
+        datasources = {}
+        existing_templates = False
+
+        template_dropdowns = (
+            TOPOLOGY_TEMPLATE_DROPDOWNS + DATASOURCE_TEMPLATE_DROPDOWNS  # type: ignore
+            if inject_dropdowns
+            else DATASOURCE_TEMPLATE_DROPDOWNS
+        )
 
-def _decode_dashboard_content(encoded_content: str) -> str:
-    return lzma.decompress(base64.b64decode(encoded_content.encode("utf-8"))).decode()
+        # If the dashboard has __inputs, get the names to replace them. These are stripped
+        # from reactive dashboards in GrafanaDashboardAggregator, but charm authors in
+        # newer charms may import them directly from the marketplace
+        if "__inputs" in dict_content:
+            for field in dict_content["__inputs"]:
+                if "type" in field and field["type"] == "datasource":
+                    datasources[field["name"]] = field["pluginName"].lower()
+            del dict_content["__inputs"]
+
+        # If no existing template variables exist, just insert our own
+        if "templating" not in dict_content:
+            dict_content["templating"] = {"list": list(template_dropdowns)}  # type: ignore
+        else:
+            # Otherwise, set a flag so we can go back later
+            existing_templates = True
+            for template_value in dict_content["templating"]["list"]:
+                # Build a list of `datasource_name`: `datasource_type` mappings
+                # The "query" field is actually "prometheus", "loki", "influxdb", etc
+                if "type" in template_value and template_value["type"] == "datasource":
+                    datasources[template_value["name"]] = template_value["query"].lower()
+
+            # Put our own variables in the template
+            for d in template_dropdowns:  # type: ignore
+                if d not in dict_content["templating"]["list"]:
+                    dict_content["templating"]["list"].insert(0, d)
+
+        dict_content = cls._replace_template_fields(dict_content, datasources, existing_templates)
+        return json.dumps(dict_content)
 
+    @classmethod
+    def _replace_template_fields(  # noqa: C901
+        cls, dict_content: dict, datasources: dict, existing_templates: bool
+    ) -> dict:
+        """Make templated fields get cleaned up afterwards.
 
-def _convert_dashboard_fields(content: str, inject_dropdowns: bool = True) -> str:
-    """Make sure values are present for Juju topology.
+        If existing datasource variables are present, try to substitute them.
+        """
+        replacements = {"loki": "${lokids}", "prometheus": "${prometheusds}"}
+        used_replacements = []  # type: List[str]
+
+        # If any existing datasources match types we know, or we didn't find
+        # any templating variables at all, template them.
+        if datasources or not existing_templates:
+            panels = dict_content.get("panels", {})
+            if panels:
+                dict_content["panels"] = cls._template_panels(
+                    panels, replacements, used_replacements, existing_templates, datasources
+                )
 
-    Inserts Juju topology variables and selectors into the template, as well as
-    a variable for Prometheus.
-    """
-    dict_content = json.loads(content)
-    datasources = {}
-    existing_templates = False
-
-    template_dropdowns = (
-        TOPOLOGY_TEMPLATE_DROPDOWNS + DATASOURCE_TEMPLATE_DROPDOWNS  # type: ignore
-        if inject_dropdowns
-        else DATASOURCE_TEMPLATE_DROPDOWNS
-    )
+            # Find panels nested under rows
+            rows = dict_content.get("rows", {})
+            if rows:
+                for row_idx, row in enumerate(rows):
+                    if "panels" in row.keys():
+                        rows[row_idx]["panels"] = cls._template_panels(
+                            row["panels"],
+                            replacements,
+                            used_replacements,
+                            existing_templates,
+                            datasources,
+                        )
+
+                dict_content["rows"] = rows
+
+        # Finally, go back and pop off the templates we stubbed out
+        deletions = []
+        for tmpl in dict_content["templating"]["list"]:
+            if tmpl["name"] and tmpl["name"] in used_replacements:
+                # it might happen that existing template var name is the same as the one we insert (i.e prometheusds or lokids)
+                # in that case, we want to pop the existing one only.
+                if tmpl not in DATASOURCE_TEMPLATE_DROPDOWNS:
+                    deletions.append(tmpl)
+
+        for d in deletions:
+            dict_content["templating"]["list"].remove(d)
+
+        return dict_content
+
+    @classmethod
+    def _template_panels(
+        cls,
+        panels: dict,
+        replacements: dict,
+        used_replacements: list,
+        existing_templates: bool,
+        datasources: dict,
+    ) -> dict:
+        """Iterate through a `panels` object and template it appropriately."""
+        # Go through all the panels. If they have a datasource set, AND it's one
+        # that we can convert to ${lokids} or ${prometheusds}, by stripping off the
+        # ${} templating and comparing the name to the list we built, replace it,
+        # otherwise, leave it alone.
+        #
+        for panel in panels:
+            if "datasource" not in panel or not panel.get("datasource"):
+                continue
+            if not existing_templates:
+                datasource = panel.get("datasource")
+                if isinstance(datasource, str):
+                    if "loki" in datasource:
+                        panel["datasource"] = "${lokids}"
+                    elif "grafana" in datasource:
+                        continue
+                    else:
+                        panel["datasource"] = "${prometheusds}"
+                elif isinstance(datasource, dict):
+                    # In dashboards exported by Grafana 9, datasource type is dict
+                    dstype = datasource.get("type", "")
+                    if dstype == "loki":
+                        panel["datasource"]["uid"] = "${lokids}"
+                    elif dstype == "prometheus":
+                        panel["datasource"]["uid"] = "${prometheusds}"
+                    else:
+                        logger.debug("Unrecognized datasource type '%s'; skipping", dstype)
+                        continue
+                else:
+                    logger.error("Unknown datasource format: skipping")
+                    continue
+            else:
+                if isinstance(panel["datasource"], str):
+                    if panel["datasource"].lower() in replacements.values():
+                        # Already a known template variable
+                        continue
+                    # Strip out variable characters and maybe braces
+                    ds = re.sub(r"(\$|\{|\})", "", panel["datasource"])
+
+                    if ds not in datasources.keys():
+                        # Unknown, non-templated datasource, potentially a Grafana builtin
+                        continue
+
+                    replacement = replacements.get(datasources[ds], "")
+                    if replacement:
+                        used_replacements.append(ds)
+                    panel["datasource"] = replacement or panel["datasource"]
+                elif isinstance(panel["datasource"], dict):
+                    dstype = panel["datasource"].get("type", "")
+                    if panel["datasource"].get("uid", "").lower() in replacements.values():
+                        # Already a known template variable
+                        continue
+                    # Strip out variable characters and maybe braces
+                    ds = re.sub(r"(\$|\{|\})", "", panel["datasource"].get("uid", ""))
+
+                    if ds not in datasources.keys():
+                        # Unknown, non-templated datasource, potentially a Grafana builtin
+                        continue
+
+                    replacement = replacements.get(datasources[ds], "")
+                    if replacement:
+                        used_replacements.append(ds)
+                        panel["datasource"]["uid"] = replacement
+                else:
+                    logger.error("Unknown datasource format: skipping")
+                    continue
+        return panels
 
-    # If the dashboard has __inputs, get the names to replace them. These are stripped
-    # from reactive dashboards in GrafanaDashboardAggregator, but charm authors in
-    # newer charms may import them directly from the marketplace
-    if "__inputs" in dict_content:
-        for field in dict_content["__inputs"]:
-            if "type" in field and field["type"] == "datasource":
-                datasources[field["name"]] = field["pluginName"].lower()
-        del dict_content["__inputs"]
-
-    # If no existing template variables exist, just insert our own
-    if "templating" not in dict_content:
-        dict_content["templating"] = {"list": list(template_dropdowns)}  # type: ignore
-    else:
-        # Otherwise, set a flag so we can go back later
-        existing_templates = True
-        for template_value in dict_content["templating"]["list"]:
-            # Build a list of `datasource_name`: `datasource_type` mappings
-            # The "query" field is actually "prometheus", "loki", "influxdb", etc
-            if "type" in template_value and template_value["type"] == "datasource":
-                datasources[template_value["name"]] = template_value["query"].lower()
+    @classmethod
+    def _inject_labels(cls, content: str, topology: dict, transformer: "CosTool") -> str:
+        """Inject Juju topology into panel expressions via CosTool.
 
-        # Put our own variables in the template
-        for d in template_dropdowns:  # type: ignore
-            if d not in dict_content["templating"]["list"]:
-                dict_content["templating"]["list"].insert(0, d)
+        A dashboard will have a structure approximating:
+            {
+                "__inputs": [],
+                "templating": {
+                    "list": [
+                        {
+                            "name": "prometheusds",
+                            "type": "prometheus"
+                        }
+                    ]
+                },
+                "panels": [
+                    {
+                        "foo": "bar",
+                        "targets": [
+                            {
+                                "some": "field",
+                                "expr": "up{job="foo"}"
+                            },
+                            {
+                                "some_other": "field",
+                                "expr": "sum(http_requests_total{instance="$foo"}[5m])}
+                            }
+                        ],
+                        "datasource": "${someds}"
+                    }
+                ]
+            }
 
-    dict_content = _replace_template_fields(dict_content, datasources, existing_templates)
-    return json.dumps(dict_content)
+        `templating` is used elsewhere in this library, but the structure is not rigid. It is
+        not guaranteed that a panel will actually have any targets (it could be a "spacer" with
+        no datasource, hence no expression). It could have only one target. It could have multiple
+        targets. It could have multiple targets of which only one has an `expr` to evaluate. We need
+        to try to handle all of these concisely.
 
+        `cos-tool` (`github.com/canonical/cos-tool` as a Go module in general)
+        does not know "Grafana-isms", such as using `[$_variable]` to modify the query from the user
+        interface, so we add placeholders (as `5y`, since it must parse, but a dashboard looking for
+        five years for a panel query would be unusual).
 
-def _replace_template_fields(  # noqa: C901
-    dict_content: dict, datasources: dict, existing_templates: bool
-) -> dict:
-    """Make templated fields get cleaned up afterwards.
+        Args:
+            content: dashboard content as a string
+            topology: a dict containing topology values
+            transformer: a 'CosTool' instance
+        Returns:
+            dashboard content with replaced values.
+        """
+        dict_content = json.loads(content)
 
-    If existing datasource variables are present, try to substitute them.
-    """
-    replacements = {"loki": "${lokids}", "prometheus": "${prometheusds}"}
-    used_replacements = []  # type: List[str]
-
-    # If any existing datasources match types we know, or we didn't find
-    # any templating variables at all, template them.
-    if datasources or not existing_templates:
-        panels = dict_content.get("panels", {})
-        if panels:
-            dict_content["panels"] = _template_panels(
-                panels, replacements, used_replacements, existing_templates, datasources
-            )
+        if "panels" not in dict_content.keys():
+            return json.dumps(dict_content)
 
-        # Find panels nested under rows
-        rows = dict_content.get("rows", {})
-        if rows:
-            for row_idx, row in enumerate(rows):
-                if "panels" in row.keys():
-                    rows[row_idx]["panels"] = _template_panels(
-                        row["panels"],
-                        replacements,
-                        used_replacements,
-                        existing_templates,
-                        datasources,
-                    )
-
-            dict_content["rows"] = rows
-
-    # Finally, go back and pop off the templates we stubbed out
-    deletions = []
-    for tmpl in dict_content["templating"]["list"]:
-        if tmpl["name"] and tmpl["name"] in used_replacements:
-            deletions.append(tmpl)
-
-    for d in deletions:
-        dict_content["templating"]["list"].remove(d)
-
-    return dict_content
-
-
-def _template_panels(
-    panels: dict,
-    replacements: dict,
-    used_replacements: list,
-    existing_templates: bool,
-    datasources: dict,
-) -> dict:
-    """Iterate through a `panels` object and template it appropriately."""
-    # Go through all the panels. If they have a datasource set, AND it's one
-    # that we can convert to ${lokids} or ${prometheusds}, by stripping off the
-    # ${} templating and comparing the name to the list we built, replace it,
-    # otherwise, leave it alone.
-    #
-    for panel in panels:
-        if "datasource" not in panel or not panel.get("datasource"):
-            continue
-        if not existing_templates:
-            datasource = panel.get("datasource")
-            if isinstance(datasource, str):
-                if "loki" in datasource:
-                    panel["datasource"] = "${lokids}"
-                elif "grafana" in datasource:
-                    continue
-                else:
-                    panel["datasource"] = "${prometheusds}"
-            elif isinstance(datasource, dict):
-                # In dashboards exported by Grafana 9, datasource type is dict
-                dstype = datasource.get("type", "")
-                if dstype == "loki":
-                    panel["datasource"]["uid"] = "${lokids}"
-                elif dstype == "prometheus":
-                    panel["datasource"]["uid"] = "${prometheusds}"
-                else:
-                    logger.debug("Unrecognized datasource type '%s'; skipping", dstype)
-                    continue
-            else:
-                logger.error("Unknown datasource format: skipping")
+        # Go through all the panels and inject topology labels
+        # Panels may have more than one 'target' where the expressions live, so that must be
+        # accounted for. Additionally, `promql-transform` does not necessarily gracefully handle
+        # expressions with range queries including variables. Exclude these.
+        #
+        # It is not a certainty that the `datasource` field will necessarily reflect the type, so
+        # operate on all fields.
+        panels = dict_content["panels"]
+        topology_with_prefix = {"juju_{}".format(k): v for k, v in topology.items()}
+
+        # We need to use an index so we can insert the changed element back later
+        for panel_idx, panel in enumerate(panels):
+            if not isinstance(panel, dict):
                 continue
-        else:
-            if isinstance(panel["datasource"], str):
-                if panel["datasource"].lower() in replacements.values():
-                    # Already a known template variable
-                    continue
-                # Strip out variable characters and maybe braces
-                ds = re.sub(r"(\$|\{|\})", "", panel["datasource"])
 
-                if ds not in datasources.keys():
-                    # Unknown, non-templated datasource, potentially a Grafana builtin
-                    continue
+            # Use the index to insert it back in the same location
+            panels[panel_idx] = cls._modify_panel(panel, topology_with_prefix, transformer)
 
-                replacement = replacements.get(datasources[ds], "")
-                if replacement:
-                    used_replacements.append(ds)
-                panel["datasource"] = replacement or panel["datasource"]
-            elif isinstance(panel["datasource"], dict):
-                dstype = panel["datasource"].get("type", "")
-                if panel["datasource"].get("uid", "").lower() in replacements.values():
-                    # Already a known template variable
-                    continue
-                # Strip out variable characters and maybe braces
-                ds = re.sub(r"(\$|\{|\})", "", panel["datasource"].get("uid", ""))
+        return json.dumps(dict_content)
 
-                if ds not in datasources.keys():
-                    # Unknown, non-templated datasource, potentially a Grafana builtin
-                    continue
+    @classmethod
+    def _modify_panel(cls, panel: dict, topology: dict, transformer: "CosTool") -> dict:
+        """Inject Juju topology into panel expressions via CosTool.
 
-                replacement = replacements.get(datasources[ds], "")
-                if replacement:
-                    used_replacements.append(ds)
-                    panel["datasource"]["uid"] = replacement
-            else:
-                logger.error("Unknown datasource format: skipping")
-                continue
-    return panels
+        Args:
+            panel: a dashboard panel as a dict
+            topology: a dict containing topology values
+            transformer: a 'CosTool' instance
+        Returns:
+            the panel with injected values
+        """
+        if "targets" not in panel.keys():
+            return panel
 
+        # Pre-compile a regular expression to grab values from inside of []
+        range_re = re.compile(r"\[(?P<value>.*?)\]")
+        # Do the same for any offsets
+        offset_re = re.compile(r"offset\s+(?P<value>-?\s*[$\w]+)")
 
-def _inject_labels(content: str, topology: dict, transformer: "CosTool") -> str:
-    """Inject Juju topology into panel expressions via CosTool.
+        known_datasources = {"${prometheusds}": "promql", "${lokids}": "logql"}
 
-    A dashboard will have a structure approximating:
-        {
-            "__inputs": [],
-            "templating": {
-                "list": [
-                    {
-                        "name": "prometheusds",
-                        "type": "prometheus"
-                    }
-                ]
-            },
-            "panels": [
-                {
-                    "foo": "bar",
-                    "targets": [
-                        {
-                            "some": "field",
-                            "expr": "up{job="foo"}"
-                        },
-                        {
-                            "some_other": "field",
-                            "expr": "sum(http_requests_total{instance="$foo"}[5m])}
-                        }
-                    ],
-                    "datasource": "${someds}"
-                }
-            ]
-        }
+        targets = panel["targets"]
 
-    `templating` is used elsewhere in this library, but the structure is not rigid. It is
-    not guaranteed that a panel will actually have any targets (it could be a "spacer" with
-    no datasource, hence no expression). It could have only one target. It could have multiple
-    targets. It could have multiple targets of which only one has an `expr` to evaluate. We need
-    to try to handle all of these concisely.
+        # We need to use an index so we can insert the changed element back later
+        for idx, target in enumerate(targets):
+            # If there's no expression, we don't need to do anything
+            if "expr" not in target.keys():
+                continue
+            expr = target["expr"]
 
-    `cos-tool` (`github.com/canonical/cos-tool` as a Go module in general)
-    does not know "Grafana-isms", such as using `[$_variable]` to modify the query from the user
-    interface, so we add placeholders (as `5y`, since it must parse, but a dashboard looking for
-    five years for a panel query would be unusual).
+            if "datasource" not in panel.keys():
+                continue
 
-    Args:
-        content: dashboard content as a string
-        topology: a dict containing topology values
-        transformer: a 'CosTool' instance
-    Returns:
-        dashboard content with replaced values.
-    """
-    dict_content = json.loads(content)
+            if isinstance(panel["datasource"], str):
+                if panel["datasource"] not in known_datasources:
+                    continue
+                querytype = known_datasources[panel["datasource"]]
+            elif isinstance(panel["datasource"], dict):
+                if panel["datasource"]["uid"] not in known_datasources:
+                    continue
+                querytype = known_datasources[panel["datasource"]["uid"]]
+            else:
+                logger.error("Unknown datasource format: skipping")
+                continue
 
-    if "panels" not in dict_content.keys():
-        return json.dumps(dict_content)
+            # Capture all values inside `[]` into a list which we'll iterate over later to
+            # put them back in-order. Then apply the regex again and replace everything with
+            # `[5y]` so promql/parser will take it.
+            #
+            # Then do it again for offsets
+            range_values = [m.group("value") for m in range_re.finditer(expr)]
+            expr = range_re.sub(r"[5y]", expr)
+
+            offset_values = [m.group("value") for m in offset_re.finditer(expr)]
+            expr = offset_re.sub(r"offset 5y", expr)
+            # Retrieve the new expression (which may be unchanged if there were no label
+            # matchers in the expression, or if tt was unable to be parsed like logql. It's
+            # virtually impossible to tell from any datasource "name" in a panel what the
+            # actual type is without re-implementing a complete dashboard parser, but no
+            # harm will some from passing invalid promql -- we'll just get the original back.
+            #
+            replacement = transformer.inject_label_matchers(expr, topology, querytype)
 
-    # Go through all the panels and inject topology labels
-    # Panels may have more than one 'target' where the expressions live, so that must be
-    # accounted for. Additionally, `promql-transform` does not necessarily gracefully handle
-    # expressions with range queries including variables. Exclude these.
-    #
-    # It is not a certainty that the `datasource` field will necessarily reflect the type, so
-    # operate on all fields.
-    panels = dict_content["panels"]
-    topology_with_prefix = {"juju_{}".format(k): v for k, v in topology.items()}
+            if replacement == target["expr"]:
+                # promql-transform caught an error. Move on
+                continue
 
-    # We need to use an index so we can insert the changed element back later
-    for panel_idx, panel in enumerate(panels):
-        if not isinstance(panel, dict):
-            continue
+            # Go back and substitute values in [] which were pulled out
+            # Enumerate with an index... again. The same regex is ok, since it will still match
+            # `[(.*?)]`, which includes `[5y]`, our placeholder
+            for i, match in enumerate(range_re.finditer(replacement)):
+                # Replace one-by-one, starting from the left. We build the string back with
+                # `str.replace(string_to_replace, replacement_value, count)`. Limit the count
+                # to one, since we are going through one-by-one through the list we saved earlier
+                # in `range_values`.
+                replacement = replacement.replace(
+                    "[{}]".format(match.group("value")),
+                    "[{}]".format(range_values[i]),
+                    1,
+                )
 
-        # Use the index to insert it back in the same location
-        panels[panel_idx] = _modify_panel(panel, topology_with_prefix, transformer)
+            for i, match in enumerate(offset_re.finditer(replacement)):
+                # Replace one-by-one, starting from the left. We build the string back with
+                # `str.replace(string_to_replace, replacement_value, count)`. Limit the count
+                # to one, since we are going through one-by-one through the list we saved earlier
+                # in `range_values`.
+                replacement = replacement.replace(
+                    "offset {}".format(match.group("value")),
+                    "offset {}".format(offset_values[i]),
+                    1,
+                )
 
-    return json.dumps(dict_content)
+            # Use the index to insert it back in the same location
+            targets[idx]["expr"] = replacement
 
+        panel["targets"] = targets
+        return panel
 
-def _modify_panel(panel: dict, topology: dict, transformer: "CosTool") -> dict:
-    """Inject Juju topology into panel expressions via CosTool.
+    @classmethod
+    def _content_to_dashboard_object(
+        cls,
+        *,
+        charm_name,
+        content: str,
+        juju_topology: dict,
+        inject_dropdowns: bool = True,
+        dashboard_alt_uid: Optional[str] = None,
+    ) -> Dict:
+        """Helper method for keeping a consistent stored state schema for the dashboard and some metadata.
 
-    Args:
-        panel: a dashboard panel as a dict
-        topology: a dict containing topology values
-        transformer: a 'CosTool' instance
-    Returns:
-        the panel with injected values
-    """
-    if "targets" not in panel.keys():
-        return panel
+        Args:
+            charm_name: Charm name (although the aggregator passes the app name).
+            content: The compressed dashboard.
+            juju_topology: This is not actually used in the dashboards, but is present to provide a secondary
+              salt to ensure uniqueness in the dict keys in case individual charm units provide dashboards.
+            inject_dropdowns: Whether to auto-render topology dropdowns.
+            dashboard_alt_uid: Alternative uid used for dashboards added programmatically.
+        """
+        ret = {
+            "charm": charm_name,
+            "content": content,
+            "juju_topology": juju_topology if inject_dropdowns else {},
+            "inject_dropdowns": inject_dropdowns,
+        }
 
-    # Pre-compile a regular expression to grab values from inside of []
-    range_re = re.compile(r"\[(?P<value>.*?)\]")
-    # Do the same for any offsets
-    offset_re = re.compile(r"offset\s+(?P<value>-?\s*[$\w]+)")
+        if dashboard_alt_uid is not None:
+            ret["dashboard_alt_uid"] = dashboard_alt_uid
 
-    known_datasources = {"${prometheusds}": "promql", "${lokids}": "logql"}
+        return ret
 
-    targets = panel["targets"]
+    @classmethod
+    def _generate_alt_uid(cls, charm_name: str, key: str) -> str:
+        """Generate alternative uid for dashboards.
 
-    # We need to use an index so we can insert the changed element back later
-    for idx, target in enumerate(targets):
-        # If there's no expression, we don't need to do anything
-        if "expr" not in target.keys():
-            continue
-        expr = target["expr"]
+        Args:
+            charm_name: The name of the charm (not app; from metadata).
+            key: A string used (along with charm.meta.name) to build the hash uid.
 
-        if "datasource" not in panel.keys():
-            continue
+        Returns: A hash string.
+        """
+        raw_dashboard_alt_uid = "{}-{}".format(charm_name, key)
+        return hashlib.shake_256(raw_dashboard_alt_uid.encode("utf-8")).hexdigest(8)
 
-        if isinstance(panel["datasource"], str):
-            if panel["datasource"] not in known_datasources:
-                continue
-            querytype = known_datasources[panel["datasource"]]
-        elif isinstance(panel["datasource"], dict):
-            if panel["datasource"]["uid"] not in known_datasources:
-                continue
-            querytype = known_datasources[panel["datasource"]["uid"]]
+    @classmethod
+    def _replace_uid(
+        cls, *, dashboard_dict: dict, dashboard_path: Path, charm_dir: Path, charm_name: str
+    ):
+        # If we're running this from within an aggregator (such as grafana agent), then the uid was
+        # already rendered there, so we do not want to overwrite it with a uid generated from aggregator's info.
+        # We overwrite the uid only if it's not a valid "Path40" uid.
+        if not DashboardPath40UID.is_valid(original_uid := dashboard_dict.get("uid", "")):
+            rel_path = str(
+                dashboard_path.relative_to(charm_dir)
+                if dashboard_path.is_absolute()
+                else dashboard_path
+            )
+            dashboard_dict["uid"] = DashboardPath40UID.generate(charm_name, rel_path)
+            logger.debug(
+                "Processed dashboard '%s': replaced original uid '%s' with '%s'",
+                dashboard_path,
+                original_uid,
+                dashboard_dict["uid"],
+            )
         else:
-            logger.error("Unknown datasource format: skipping")
-            continue
+            logger.debug(
+                "Processed dashboard '%s': kept original uid '%s'", dashboard_path, original_uid
+            )
 
-        # Capture all values inside `[]` into a list which we'll iterate over later to
-        # put them back in-order. Then apply the regex again and replace everything with
-        # `[5y]` so promql/parser will take it.
-        #
-        # Then do it again for offsets
-        range_values = [m.group("value") for m in range_re.finditer(expr)]
-        expr = range_re.sub(r"[5y]", expr)
-
-        offset_values = [m.group("value") for m in offset_re.finditer(expr)]
-        expr = offset_re.sub(r"offset 5y", expr)
-        # Retrieve the new expression (which may be unchanged if there were no label
-        # matchers in the expression, or if tt was unable to be parsed like logql. It's
-        # virtually impossible to tell from any datasource "name" in a panel what the
-        # actual type is without re-implementing a complete dashboard parser, but no
-        # harm will some from passing invalid promql -- we'll just get the original back.
-        #
-        replacement = transformer.inject_label_matchers(expr, topology, querytype)
-
-        if replacement == target["expr"]:
-            # promql-tranform caught an error. Move on
-            continue
-
-        # Go back and substitute values in [] which were pulled out
-        # Enumerate with an index... again. The same regex is ok, since it will still match
-        # `[(.*?)]`, which includes `[5y]`, our placeholder
-        for i, match in enumerate(range_re.finditer(replacement)):
-            # Replace one-by-one, starting from the left. We build the string back with
-            # `str.replace(string_to_replace, replacement_value, count)`. Limit the count
-            # to one, since we are going through one-by-one through the list we saved earlier
-            # in `range_values`.
-            replacement = replacement.replace(
-                "[{}]".format(match.group("value")),
-                "[{}]".format(range_values[i]),
-                1,
+    @classmethod
+    def _add_tags(cls, dashboard_dict: dict, charm_name: str):
+        tags: List[str] = dashboard_dict.get("tags", [])
+        if not any(tag.startswith("charm: ") for tag in tags):
+            tags.append(f"charm: {charm_name}")
+        dashboard_dict["tags"] = tags
+
+    @classmethod
+    def load_dashboards_from_dir(
+        cls,
+        *,
+        dashboards_path: Path,
+        charm_name: str,
+        charm_dir: Path,
+        inject_dropdowns: bool,
+        juju_topology: dict,
+        path_filter: Callable[[Path], bool] = lambda p: True,
+    ) -> dict:
+        """Load dashboards files from directory into a mapping from "dashboard id" to a so-called "dashboard object"."""
+
+        # Path.glob uses fnmatch on the backend, which is pretty limited, so use a
+        # custom function for the filter
+        def _is_dashboard(p: Path) -> bool:
+            return (
+                p.is_file()
+                and p.name.endswith((".json", ".json.tmpl", ".tmpl"))
+                and path_filter(p)
             )
 
-        for i, match in enumerate(offset_re.finditer(replacement)):
-            # Replace one-by-one, starting from the left. We build the string back with
-            # `str.replace(string_to_replace, replacement_value, count)`. Limit the count
-            # to one, since we are going through one-by-one through the list we saved earlier
-            # in `range_values`.
-            replacement = replacement.replace(
-                "offset {}".format(match.group("value")),
-                "offset {}".format(offset_values[i]),
-                1,
+        dashboard_templates = {}
+
+        for path in filter(_is_dashboard, Path(dashboards_path).glob("*")):
+            try:
+                dashboard_dict = json.loads(path.read_bytes())
+            except json.JSONDecodeError as e:
+                logger.error("Failed to load dashboard '%s': %s", path, e)
+                continue
+            if type(dashboard_dict) is not dict:
+                logger.error(
+                    "Invalid dashboard '%s': expected dict, got %s", path, type(dashboard_dict)
+                )
+
+            cls._replace_uid(
+                dashboard_dict=dashboard_dict,
+                dashboard_path=path,
+                charm_dir=charm_dir,
+                charm_name=charm_name,
             )
 
-        # Use the index to insert it back in the same location
-        targets[idx]["expr"] = replacement
+            cls._add_tags(dashboard_dict=dashboard_dict, charm_name=charm_name)
 
-    panel["targets"] = targets
-    return panel
+            id = "file:{}".format(path.stem)
+            dashboard_templates[id] = cls._content_to_dashboard_object(
+                charm_name=charm_name,
+                content=LZMABase64.compress(json.dumps(dashboard_dict)),
+                dashboard_alt_uid=cls._generate_alt_uid(charm_name, id),
+                inject_dropdowns=inject_dropdowns,
+                juju_topology=juju_topology,
+            )
+
+        return dashboard_templates
 
 
 def _type_convert_stored(obj):
@@ -1075,16 +1205,19 @@ def add_dashboard(self, content: str, inject_dropdowns: bool = True) -> None:
         # that the stored state is there when this unit becomes leader.
         stored_dashboard_templates: Any = self._stored.dashboard_templates  # pyright: ignore
 
-        encoded_dashboard = _encode_dashboard_content(content)
+        encoded_dashboard = LZMABase64.compress(content)
 
         # Use as id the first chars of the encoded dashboard, so that
         # it is predictable across units.
         id = "prog:{}".format(encoded_dashboard[-24:-16])
 
-        stored_dashboard_templates[id] = self._content_to_dashboard_object(
-            encoded_dashboard, inject_dropdowns
+        stored_dashboard_templates[id] = CharmedDashboard._content_to_dashboard_object(
+            charm_name=self._charm.meta.name,
+            content=encoded_dashboard,
+            dashboard_alt_uid=CharmedDashboard._generate_alt_uid(self._charm.meta.name, id),
+            inject_dropdowns=inject_dropdowns,
+            juju_topology=self._juju_topology,
         )
-        stored_dashboard_templates[id]["dashboard_alt_uid"] = self._generate_alt_uid(id)
 
         if self._charm.unit.is_leader():
             for dashboard_relation in self._charm.model.relations[self._relation_name]:
@@ -1127,38 +1260,22 @@ def _update_all_dashboards_from_dir(
                 if dashboard_id.startswith("file:"):
                     del stored_dashboard_templates[dashboard_id]
 
-            # Path.glob uses fnmatch on the backend, which is pretty limited, so use a
-            # custom function for the filter
-            def _is_dashboard(p: Path) -> bool:
-                return p.is_file() and p.name.endswith((".json", ".json.tmpl", ".tmpl"))
-
-            for path in filter(_is_dashboard, Path(self._dashboards_path).glob("*")):
-                # path = Path(path)
-                id = "file:{}".format(path.stem)
-                stored_dashboard_templates[id] = self._content_to_dashboard_object(
-                    _encode_dashboard_content(path.read_bytes()), inject_dropdowns
+            stored_dashboard_templates.update(
+                CharmedDashboard.load_dashboards_from_dir(
+                    dashboards_path=Path(self._dashboards_path),
+                    charm_name=self._charm.meta.name,
+                    charm_dir=self._charm.charm_dir,
+                    inject_dropdowns=inject_dropdowns,
+                    juju_topology=self._juju_topology,
                 )
-                stored_dashboard_templates[id]["dashboard_alt_uid"] = self._generate_alt_uid(id)
-
-            self._stored.dashboard_templates = stored_dashboard_templates
+            )
 
             if self._charm.unit.is_leader():
                 for dashboard_relation in self._charm.model.relations[self._relation_name]:
                     self._upset_dashboards_on_relation(dashboard_relation)
 
-    def _generate_alt_uid(self, key: str) -> str:
-        """Generate alternative uid for dashboards.
-
-        Args:
-            key: A string used (along with charm.meta.name) to build the hash uid.
-
-        Returns: A hash string.
-        """
-        raw_dashboard_alt_uid = "{}-{}".format(self._charm.meta.name, key)
-        return hashlib.shake_256(raw_dashboard_alt_uid.encode("utf-8")).hexdigest(8)
-
     def _reinitialize_dashboard_data(self, inject_dropdowns: bool = True) -> None:
-        """Triggers a reload of dashboard outside of an eventing workflow.
+        """Triggers a reload of dashboard outside an eventing workflow.
 
         Args:
             inject_dropdowns: a :bool: used to indicate whether topology dropdowns should be added
@@ -1231,17 +1348,6 @@ def _upset_dashboards_on_relation(self, relation: Relation) -> None:
 
         relation.data[self._charm.app]["dashboards"] = json.dumps(stored_data)
 
-    def _content_to_dashboard_object(self, content: str, inject_dropdowns: bool = True) -> Dict:
-        return {
-            "charm": self._charm.meta.name,
-            "content": content,
-            "juju_topology": self._juju_topology if inject_dropdowns else {},
-            "inject_dropdowns": inject_dropdowns,
-        }
-
-    # This is not actually used in the dashboards, but is present to provide a secondary
-    # salt to ensure uniqueness in the dict keys in case individual charm units provide
-    # dashboards
     @property
     def _juju_topology(self) -> Dict:
         return {
@@ -1306,7 +1412,7 @@ def __init__(
         super().__init__(charm, relation_name)
         self._charm = charm
         self._relation_name = relation_name
-        self._tranformer = CosTool(self._charm)
+        self._transformer = CosTool(self._charm)
 
         self._stored.set_default(dashboards={})  # type: ignore
 
@@ -1436,21 +1542,21 @@ def _render_dashboards_and_signal_changed(self, relation: Relation) -> bool:  #
             error = None
             topology = template.get("juju_topology", {})
             try:
-                content = _decode_dashboard_content(template["content"])
+                content = LZMABase64.decompress(template["content"])
                 inject_dropdowns = template.get("inject_dropdowns", True)
                 content = self._manage_dashboard_uid(content, template)
-                content = _convert_dashboard_fields(content, inject_dropdowns)
+                content = CharmedDashboard._convert_dashboard_fields(content, inject_dropdowns)
 
                 if topology:
-                    content = _inject_labels(content, topology, self._tranformer)
+                    content = CharmedDashboard._inject_labels(content, topology, self._transformer)
 
-                content = _encode_dashboard_content(content)
+                content = LZMABase64.compress(content)
             except lzma.LZMAError as e:
                 error = str(e)
                 relation_has_invalid_dashboards = True
             except json.JSONDecodeError as e:
                 error = str(e.msg)
-                logger.warning("Invalid JSON in Grafana dashboard: {}".format(fname))
+                logger.warning("Invalid JSON in Grafana dashboard '{}': {}".format(fname, error))
                 continue
 
             # Prepend the relation name and ID to the dashboard ID to avoid clashes with
@@ -1506,7 +1612,7 @@ def _render_dashboards_and_signal_changed(self, relation: Relation) -> bool:  #
 
         if not coerced_data == stored_data:
             stored_dashboards = self.get_peer_data("dashboards")
-            stored_dashboards[relation.id] = stored_data
+            stored_dashboards[str(relation.id)] = stored_data
             self.set_peer_data("dashboards", stored_dashboards)
             return True
         return None  # type: ignore
@@ -1533,7 +1639,7 @@ def _to_external_object(self, relation_id, dashboard):
             "id": dashboard["original_id"],
             "relation_id": relation_id,
             "charm": dashboard["template"]["charm"],
-            "content": _decode_dashboard_content(dashboard["content"]),
+            "content": LZMABase64.decompress(dashboard["content"]),
         }
 
     @property
@@ -1570,8 +1676,10 @@ def set_peer_data(self, key: str, data: Any) -> None:
 
     def get_peer_data(self, key: str) -> Any:
         """Retrieve information from the peer data bucket instead of `StoredState`."""
-        data = self._charm.peers.data[self._charm.app].get(key, "")  # type: ignore[attr-defined]
-        return json.loads(data) if data else {}
+        if rel := self._charm.peers:  # type: ignore[attr-defined]
+            data = rel.data[self._charm.app].get(key, "")
+            return json.loads(data) if data else {}
+        return {}
 
 
 class GrafanaDashboardAggregator(Object):
@@ -1662,8 +1770,11 @@ def _upset_dashboards_on_event(self, event: RelationEvent) -> None:
             return
 
         for id in dashboards:
-            self._stored.dashboard_templates[id] = self._content_to_dashboard_object(  # type: ignore
-                dashboards[id], event
+            self._stored.dashboard_templates[id] = CharmedDashboard._content_to_dashboard_object(  # type: ignore
+                charm_name=event.app.name,
+                content=dashboards[id],
+                inject_dropdowns=True,
+                juju_topology=self._hybrid_topology(event),
             )
 
         self._stored.id_mappings[event.app.name] = dashboards  # type: ignore
@@ -1824,7 +1935,7 @@ def _handle_reactive_dashboards(self, event: RelationEvent) -> Optional[Dict]:
 
             from jinja2 import DebugUndefined, Template
 
-            content = _encode_dashboard_content(
+            content = LZMABase64.compress(
                 Template(dash, undefined=DebugUndefined).render(datasource=r"${prometheusds}")  # type: ignore
             )
             id = "prog:{}".format(content[-24:-16])
@@ -1855,32 +1966,20 @@ def _maybe_get_builtin_dashboards(self, event: RelationEvent) -> Dict:
             )
 
         if dashboards_path:
-
-            def is_dashboard(p: Path) -> bool:
-                return p.is_file() and p.name.endswith((".json", ".json.tmpl", ".tmpl"))
-
-            for path in filter(is_dashboard, Path(dashboards_path).glob("*")):
-                # path = Path(path)
-                if event.app.name in path.name:  # type: ignore
-                    id = "file:{}".format(path.stem)
-                    builtins[id] = self._content_to_dashboard_object(
-                        _encode_dashboard_content(path.read_bytes()), event
-                    )
+            builtins.update(
+                CharmedDashboard.load_dashboards_from_dir(
+                    dashboards_path=Path(dashboards_path),
+                    charm_name=event.app.name,
+                    charm_dir=self._charm.charm_dir,
+                    inject_dropdowns=True,
+                    juju_topology=self._hybrid_topology(event),
+                    path_filter=lambda path: event.app.name in path.name,
+                )
+            )
 
         return builtins
 
-    def _content_to_dashboard_object(self, content: str, event: RelationEvent) -> Dict:
-        return {
-            "charm": event.app.name,  # type: ignore
-            "content": content,
-            "juju_topology": self._juju_topology(event),
-            "inject_dropdowns": True,
-        }
-
-    # This is not actually used in the dashboards, but is present to provide a secondary
-    # salt to ensure uniqueness in the dict keys in case individual charm units provide
-    # dashboards
-    def _juju_topology(self, event: RelationEvent) -> Dict:
+    def _hybrid_topology(self, event: RelationEvent) -> Dict:
         return {
             "model": self._charm.model.name,
             "model_uuid": self._charm.model.uuid,
@@ -1999,12 +2098,9 @@ def _get_tool_path(self) -> Optional[Path]:
         arch = "amd64" if arch == "x86_64" else arch
         res = "cos-tool-{}".format(arch)
         try:
-            path = Path(res).resolve()
-            path.chmod(0o777)
+            path = Path(res).resolve(strict=True)
             return path
-        except NotImplementedError:
-            logger.debug("System lacks support for chmod")
-        except FileNotFoundError:
+        except (FileNotFoundError, OSError):
             logger.debug('Could not locate cos-tool at: "{}"'.format(res))
         return None
 
diff --git a/lib/charms/mysql/v0/architecture.py b/lib/charms/mysql/v0/architecture.py
new file mode 100644
index 000000000..cb45d3ede
--- /dev/null
+++ b/lib/charms/mysql/v0/architecture.py
@@ -0,0 +1,93 @@
+# Copyright 2024 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Library to provide hardware architecture checks for VMs and K8s charms.
+
+The WrongArchitectureWarningCharm class is designed to be used alongside
+the is-wrong-architecture helper function, as follows:
+
+```python
+from ops import main
+from charms.mysql.v0.architecture import WrongArchitectureWarningCharm, is_wrong_architecture
+
+if __name__ == "__main__":
+    if is_wrong_architecture():
+        main(WrongArchitectureWarningCharm)
+```
+"""
+
+import logging
+import os
+import pathlib
+import platform
+
+import yaml
+from ops.charm import CharmBase
+from ops.model import BlockedStatus
+
+# The unique Charmhub library identifier, never change it
+LIBID = "827e04542dba4c2a93bdc70ae40afdb1"
+LIBAPI = 0
+LIBPATCH = 1
+
+PYDEPS = ["ops>=2.0.0", "pyyaml>=5.0"]
+
+
+logger = logging.getLogger(__name__)
+
+
+class WrongArchitectureWarningCharm(CharmBase):
+    """A fake charm class that only signals a wrong architecture deploy."""
+
+    def __init__(self, *args):
+        super().__init__(*args)
+
+        hw_arch = platform.machine()
+        self.unit.status = BlockedStatus(
+            f"Charm incompatible with {hw_arch} architecture. "
+            f"If this app is being refreshed, rollback"
+        )
+        raise RuntimeError(
+            f"Incompatible architecture: this charm revision does not support {hw_arch}. "
+            f"If this app is being refreshed, rollback with instructions from Charmhub docs. "
+            f"If this app is being deployed for the first time, remove it and deploy it again "
+            f"using a compatible revision."
+        )
+
+
+def is_wrong_architecture() -> bool:
+    """Checks if charm was deployed on wrong architecture."""
+    charm_path = os.environ.get("CHARM_DIR", "")
+    manifest_path = pathlib.Path(charm_path, "manifest.yaml")
+
+    if not manifest_path.exists():
+        logger.error("Cannot check architecture: manifest file not found in %s", manifest_path)
+        return False
+
+    manifest = yaml.safe_load(manifest_path.read_text())
+
+    manifest_archs = []
+    for base in manifest["bases"]:
+        base_archs = base.get("architectures", [])
+        manifest_archs.extend(base_archs)
+
+    hardware_arch = platform.machine()
+    if ("amd64" in manifest_archs and hardware_arch == "x86_64") or (
+        "arm64" in manifest_archs and hardware_arch == "aarch64"
+    ):
+        logger.debug("Charm architecture matches")
+        return False
+
+    logger.error("Charm architecture does not match")
+    return True
diff --git a/lib/charms/mysql/v0/backups.py b/lib/charms/mysql/v0/backups.py
index ece6f354a..76024878c 100644
--- a/lib/charms/mysql/v0/backups.py
+++ b/lib/charms/mysql/v0/backups.py
@@ -60,9 +60,9 @@ def is_unit_blocked(self) -> bool:
     MySQLDeleteTempRestoreDirectoryError,
     MySQLEmptyDataDirectoryError,
     MySQLExecuteBackupCommandsError,
-    MySQLGetMemberStateError,
     MySQLInitializeJujuOperationsTableError,
     MySQLKillSessionError,
+    MySQLNoMemberStateError,
     MySQLOfflineModeAndHiddenInstanceExistsError,
     MySQLPrepareBackupForRestoreError,
     MySQLRescanClusterError,
@@ -73,6 +73,7 @@ def is_unit_blocked(self) -> bool:
     MySQLSetInstanceOptionError,
     MySQLStartMySQLDError,
     MySQLStopMySQLDError,
+    MySQLUnableToGetMemberStateError,
 )
 from charms.mysql.v0.s3_helpers import (
     fetch_and_check_existence_of_s3_path,
@@ -99,7 +100,7 @@ def is_unit_blocked(self) -> bool:
 
 # Increment this PATCH version before using `charmcraft publish-lib` or reset
 # to 0 if you are raising the major API version
-LIBPATCH = 11
+LIBPATCH = 12
 
 
 if typing.TYPE_CHECKING:
@@ -339,7 +340,7 @@ def _can_unit_perform_backup(self) -> Tuple[bool, Optional[str]]:
 
         try:
             state, role = self.charm._mysql.get_member_state()
-        except MySQLGetMemberStateError:
+        except (MySQLNoMemberStateError, MySQLUnableToGetMemberStateError):
             return False, "Error obtaining member state"
 
         if role == "primary" and self.charm.app.planned_units() > 1:
diff --git a/lib/charms/mysql/v0/mysql.py b/lib/charms/mysql/v0/mysql.py
index 147801ce9..c615fa437 100644
--- a/lib/charms/mysql/v0/mysql.py
+++ b/lib/charms/mysql/v0/mysql.py
@@ -83,7 +83,6 @@ def wait_until_mysql_connection(self) -> None:
     TYPE_CHECKING,
     Any,
     Dict,
-    Iterable,
     List,
     Literal,
     Optional,
@@ -134,7 +133,7 @@ def wait_until_mysql_connection(self) -> None:
 # Increment this major API version when introducing breaking changes
 LIBAPI = 0
 
-LIBPATCH = 70
+LIBPATCH = 84
 
 UNIT_TEARDOWN_LOCKNAME = "unit-teardown"
 UNIT_ADD_LOCKNAME = "unit-add"
@@ -147,6 +146,7 @@ def wait_until_mysql_connection(self) -> None:
 GET_MEMBER_STATE_TIME = 10  # seconds
 MAX_CONNECTIONS_FLOOR = 10
 MIM_MEM_BUFFERS = 200 * BYTES_1MiB
+ADMIN_PORT = 33062
 
 SECRET_INTERNAL_LABEL = "secret-id"
 SECRET_DELETED_LABEL = "None"
@@ -275,8 +275,12 @@ class MySQLGrantPrivilegesToUserError(Error):
     """Exception raised when there is an issue granting privileges to user."""
 
 
-class MySQLGetMemberStateError(Error):
-    """Exception raised when there is an issue getting member state."""
+class MySQLNoMemberStateError(Error):
+    """Exception raised when there is no member state."""
+
+
+class MySQLUnableToGetMemberStateError(Error):
+    """Exception raised when unable to get member state."""
 
 
 class MySQLGetClusterEndpointsError(Error):
@@ -619,6 +623,26 @@ def cluster_initialized(self) -> bool:
 
         return False
 
+    @property
+    def only_one_cluster_node_thats_uninitialized(self) -> Optional[bool]:
+        """Check if only a single cluster node exists across all units."""
+        if not self.app_peer_data.get("cluster-name"):
+            return None
+
+        total_cluster_nodes = 0
+        for unit in self.app_units:
+            total_cluster_nodes += self._mysql.get_cluster_node_count(
+                from_instance=self.get_unit_address(unit)
+            )
+
+        total_online_cluster_nodes = 0
+        for unit in self.app_units:
+            total_online_cluster_nodes += self._mysql.get_cluster_node_count(
+                from_instance=self.get_unit_address(unit), node_status=MySQLMemberState["ONLINE"]
+            )
+
+        return total_cluster_nodes == 1 and total_online_cluster_nodes == 0
+
     @property
     def cluster_fully_initialized(self) -> bool:
         """Returns True if the cluster is fully initialized.
@@ -882,12 +906,31 @@ def __init__(
             self.backups_password,
         ]
 
+    def instance_def(self, user: str, host: Optional[str] = None) -> str:
+        """Return instance definition used on mysqlsh.
+
+        Args:
+            user: User name.
+            host: Host name, default to unit address.
+        """
+        if host and ":" in host:
+            # strip port from address
+            host = host.split(":")[0]
+
+        if user in (self.server_config_user, self.backups_user):
+            # critical operator users use admin address
+            return f"{host or self.instance_address}:{ADMIN_PORT}"
+        elif host != self.instance_address:
+            return f"{host}:3306"
+        return f"{self.socket_uri}"
+
     def render_mysqld_configuration(  # noqa: C901
         self,
         *,
         profile: str,
         audit_log_enabled: bool,
         audit_log_strategy: str,
+        audit_log_policy: str,
         memory_limit: Optional[int] = None,
         experimental_max_connections: Optional[int] = None,
         binlog_retention_days: int,
@@ -948,16 +991,17 @@ def render_mysqld_configuration(  # noqa: C901
         config["mysqld"] = {
             "bind-address": "0.0.0.0",
             "mysqlx-bind-address": "0.0.0.0",
+            "admin_address": self.instance_address,
             "report_host": self.instance_address,
             "max_connections": str(max_connections),
             "innodb_buffer_pool_size": str(innodb_buffer_pool_size),
             "log_error_services": "log_filter_internal;log_sink_internal",
             "log_error": f"{snap_common}/var/log/mysql/error.log",
-            "general_log": "ON",
+            "general_log": "OFF",
             "general_log_file": f"{snap_common}/var/log/mysql/general.log",
-            "slow_query_log_file": f"{snap_common}/var/log/mysql/slowquery.log",
+            "slow_query_log_file": f"{snap_common}/var/log/mysql/slow.log",
             "binlog_expire_logs_seconds": f"{binlog_retention_seconds}",
-            "loose-audit_log_policy": "LOGINS",
+            "loose-audit_log_policy": audit_log_policy.upper(),
             "loose-audit_log_file": f"{snap_common}/var/log/mysql/audit.log",
         }
 
@@ -983,7 +1027,7 @@ def render_mysqld_configuration(  # noqa: C901
             config.write(string_io)
             return string_io.getvalue(), dict(config["mysqld"])
 
-    def configure_mysql_users(self, password_needed: bool = True) -> None:
+    def configure_mysql_users(self) -> None:
         """Configure the MySQL users for the instance."""
         # SYSTEM_USER and SUPER privileges to revoke from the root users
         # Reference: https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_super
@@ -1021,30 +1065,33 @@ def configure_mysql_users(self, password_needed: bool = True) -> None:
 
         try:
             logger.debug(f"Configuring MySQL users for {self.instance_address}")
-            if password_needed:
-                self._run_mysqlcli_script(
-                    "; ".join(configure_users_commands),
-                    password=self.root_password,
-                )
-            else:
-                self._run_mysqlcli_script("; ".join(configure_users_commands))
-        except MySQLClientError as e:
-            logger.exception(
-                f"Failed to configure users for: {self.instance_address} with error {e.message}",
-                exc_info=e,
+            self._run_mysqlcli_script(
+                configure_users_commands,
+                password=self.root_password,
             )
-            raise MySQLConfigureMySQLUsersError(e.message)
+        except MySQLClientError:
+            logger.error(f"Failed to configure users for: {self.instance_address}")
+            raise MySQLConfigureMySQLUsersError
+
+    def _plugin_file_exists(self, plugin_file_name: str) -> bool:
+        """Check if the plugin file exists.
+
+        Args:
+            plugin_file_name: Plugin file name, with the extension.
+
+        """
+        path = self.get_variable_value("plugin_dir")
+        return self._file_exists(f"{path}/{plugin_file_name}")
 
     def install_plugins(self, plugins: list[str]) -> None:
         """Install extra plugins."""
         supported_plugins = {
-            "audit_log": "INSTALL PLUGIN audit_log SONAME 'audit_log.so';",
-            "audit_log_filter": "INSTALL PLUGIN audit_log_filter SONAME 'audit_log_filter.so';",
+            "audit_log": ("INSTALL PLUGIN audit_log SONAME", "audit_log.so"),
+            "audit_log_filter": ("INSTALL PLUGIN audit_log_filter SONAME", "audit_log_filter.so"),
         }
 
-        super_read_only = self.get_variable_value("super_read_only").lower() == "on"
-
         try:
+            super_read_only = self.get_variable_value("super_read_only").lower() == "on"
             installed_plugins = self._get_installed_plugins()
             # disable super_read_only to install plugins
             for plugin in plugins:
@@ -1056,12 +1103,24 @@ def install_plugins(self, plugins: list[str]) -> None:
                     logger.warning(f"{plugin=} is not supported")
                     continue
 
-                command = supported_plugins[plugin]
+                command_prefix, plugin_file = (
+                    supported_plugins[plugin][0],
+                    supported_plugins[plugin][1],
+                )
+
+                if not self._plugin_file_exists(plugin_file):
+                    logger.warning(f"{plugin=} file not found. Skip installation")
+                    continue
+
+                command = f"{command_prefix} '{plugin_file}';"
                 if super_read_only:
                     command = (
-                        f"SET GLOBAL super_read_only=OFF; {command}"
-                        "SET GLOBAL super_read_only=ON;"
+                        "SET GLOBAL super_read_only=OFF",
+                        command,
+                        "SET GLOBAL super_read_only=ON",
                     )
+                else:
+                    command = (command,)
                 logger.info(f"Installing {plugin=}")
                 self._run_mysqlcli_script(
                     command,
@@ -1069,10 +1128,12 @@ def install_plugins(self, plugins: list[str]) -> None:
                     password=self.server_config_password,
                 )
         except MySQLClientError:
-            logger.exception(
-                f"Failed to install {plugin=}",  # type: ignore
-            )
+            logger.error(f"Failed to install {plugin=}")  # type: ignore
             raise MySQLPluginInstallError
+        except MySQLGetVariableError:
+            # workaround for config changed triggered after failed upgrade
+            # the check fails for charms revisions not using admin address
+            logger.warning("Failed to get super_read_only variable. Skip plugin installation")
 
     def uninstall_plugins(self, plugins: list[str]) -> None:
         """Uninstall plugins."""
@@ -1089,16 +1150,19 @@ def uninstall_plugins(self, plugins: list[str]) -> None:
                 command = f"UNINSTALL PLUGIN {plugin};"
                 if super_read_only:
                     command = (
-                        f"SET GLOBAL super_read_only=OFF; {command}"
-                        "SET GLOBAL super_read_only=ON;"
+                        "SET GLOBAL super_read_only=OFF",
+                        command,
+                        "SET GLOBAL super_read_only=ON",
                     )
+                else:
+                    command = (command,)
                 self._run_mysqlcli_script(
                     command,
                     user=self.server_config_user,
                     password=self.server_config_password,
                 )
         except MySQLClientError:
-            logger.exception(
+            logger.error(
                 f"Failed to uninstall {plugin=}",  # type: ignore
             )
             raise MySQLPluginInstallError
@@ -1107,39 +1171,32 @@ def _get_installed_plugins(self) -> set[str]:
         """Return a set of explicitly installed plugins."""
         try:
             output = self._run_mysqlcli_script(
-                "select name from mysql.plugin",
+                ("select name from mysql.plugin",),
                 password=self.root_password,
             )
             return {
-                plugin
-                for plugin in output.splitlines()
-                if plugin not in ["clone", "group_replication"]
+                plugin[0] for plugin in output if plugin[0] not in ["clone", "group_replication"]
             }
         except MySQLClientError:
-            logger.exception(
-                "Failed to get installed plugins",
-            )
+            logger.error("Failed to get installed plugins")
             raise
 
     def does_mysql_user_exist(self, username: str, hostname: str) -> bool:
         """Checks if a mysql user already exists."""
         user_existence_commands = (
-            f"select if((select count(*) from mysql.user where user = '{username}' and host = '{hostname}'), 'USER_EXISTS', 'USER_DOES_NOT_EXIST') as ''",
+            f"select user from mysql.user where user = '{username}' and host = '{hostname}'",
         )
 
         try:
             output = self._run_mysqlcli_script(
-                "; ".join(user_existence_commands),
+                user_existence_commands,
                 user=self.server_config_user,
                 password=self.server_config_password,
             )
-            return "USER_EXISTS" in output
-        except MySQLClientError as e:
-            logger.exception(
-                f"Failed to check for existence of mysql user {username}@{hostname}",
-                exc_info=e,
-            )
-            raise MySQLCheckUserExistenceError(e.message)
+            return len(output) == 1
+        except MySQLClientError:
+            logger.error(f"Failed to check for existence of mysql user {username}@{hostname}")
+            raise MySQLCheckUserExistenceError()
 
     def configure_mysqlrouter_user(
         self, username: str, password: str, hostname: str, unit_name: str
@@ -1151,13 +1208,13 @@ def configure_mysqlrouter_user(
             )
             # Using server_config_user as we are sure it has create user grants
             create_mysqlrouter_user_commands = (
-                f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+                "shell.connect_to_primary()",
                 f"session.run_sql(\"CREATE USER '{username}'@'{hostname}' IDENTIFIED BY '{password}' ATTRIBUTE '{escaped_mysqlrouter_user_attributes}';\")",
             )
 
             # Using server_config_user as we are sure it has create user grants
             mysqlrouter_user_grant_commands = (
-                f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+                "shell.connect_to_primary()",
                 f"session.run_sql(\"GRANT CREATE USER ON *.* TO '{username}'@'{hostname}' WITH GRANT OPTION;\")",
                 f"session.run_sql(\"GRANT SELECT, INSERT, UPDATE, DELETE, EXECUTE ON mysql_innodb_cluster_metadata.* TO '{username}'@'{hostname}';\")",
                 f"session.run_sql(\"GRANT SELECT ON mysql.user TO '{username}'@'{hostname}';\")",
@@ -1167,11 +1224,21 @@ def configure_mysqlrouter_user(
             )
 
             logger.debug(f"Configuring MySQLRouter {username=}")
-            self._run_mysqlsh_script("\n".join(create_mysqlrouter_user_commands))
+            self._run_mysqlsh_script(
+                "\n".join(create_mysqlrouter_user_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
             # grant permissions to the newly created mysqlrouter user
-            self._run_mysqlsh_script("\n".join(mysqlrouter_user_grant_commands))
+            self._run_mysqlsh_script(
+                "\n".join(mysqlrouter_user_grant_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError:
-            logger.exception(f"Failed to configure mysqlrouter {username=}")
+            logger.error(f"Failed to configure mysqlrouter {username=}")
             raise MySQLConfigureRouterUserError
 
     def create_application_database_and_scoped_user(
@@ -1190,9 +1257,7 @@ def create_application_database_and_scoped_user(
             attributes["unit_name"] = unit_name
         try:
             # Using server_config_user as we are sure it has create database grants
-            connect_command = (
-                f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
-            )
+            connect_command = ("shell.connect_to_primary()",)
             create_database_commands = (
                 f'session.run_sql("CREATE DATABASE IF NOT EXISTS `{database_name}`;")',
             )
@@ -1210,11 +1275,15 @@ def create_application_database_and_scoped_user(
             else:
                 commands = connect_command + create_scoped_user_commands
 
-            self._run_mysqlsh_script("\n".join(commands))
+            self._run_mysqlsh_script(
+                "\n".join(commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError as e:
-            logger.exception(
-                f"Failed to create application database {database_name} and scoped user {username}@{hostname}",
-                exc_info=e,
+            logger.error(
+                f"Failed to create application database {database_name} and scoped user {username}@{hostname}"
             )
             raise MySQLCreateApplicationDatabaseAndScopedUserError(e.message)
 
@@ -1228,7 +1297,11 @@ def _get_statements_to_delete_users_with_attribute(
         (e.g. "'bar'")
         """
         return [
-            f"session.run_sql(\"SELECT IFNULL(CONCAT('DROP USER ', GROUP_CONCAT(QUOTE(USER), '@', QUOTE(HOST))), 'SELECT 1') INTO @sql FROM INFORMATION_SCHEMA.USER_ATTRIBUTES WHERE ATTRIBUTE->'$.{attribute_name}'={attribute_value}\")",
+            (
+                "session.run_sql(\"SELECT IFNULL(CONCAT('DROP USER ', GROUP_CONCAT(QUOTE(USER),"
+                " '@', QUOTE(HOST))), 'SELECT 1') INTO @sql FROM INFORMATION_SCHEMA.USER_ATTRIBUTES"
+                f" WHERE ATTRIBUTE->'$.{attribute_name}'={attribute_value}\")"
+            ),
             'session.run_sql("PREPARE stmt FROM @sql")',
             'session.run_sql("EXECUTE stmt")',
             'session.run_sql("DEALLOCATE PREPARE stmt")',
@@ -1240,14 +1313,22 @@ def get_mysql_router_users_for_unit(
         """Get users for related MySQL Router unit."""
         relation_user = f"relation-{relation_id}"
         command = [
-            f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
-            f"result = session.run_sql(\"SELECT USER, ATTRIBUTE->>'$.router_id' FROM INFORMATION_SCHEMA.USER_ATTRIBUTES WHERE ATTRIBUTE->'$.created_by_user'='{relation_user}' AND ATTRIBUTE->'$.created_by_juju_unit'='{mysql_router_unit_name}'\")",
+            (
+                "result = session.run_sql(\"SELECT USER, ATTRIBUTE->>'$.router_id' FROM "
+                f"INFORMATION_SCHEMA.USER_ATTRIBUTES WHERE ATTRIBUTE->'$.created_by_user'='{relation_user}' "
+                f"AND ATTRIBUTE->'$.created_by_juju_unit'='{mysql_router_unit_name}'\")"
+            ),
             "print(result.fetch_all())",
         ]
         try:
-            output = self._run_mysqlsh_script("\n".join(command))
+            output = self._run_mysqlsh_script(
+                "\n".join(command),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError as e:
-            logger.exception(
+            logger.error(
                 f"Failed to get MySQL Router users for relation {relation_id} and unit {mysql_router_unit_name}"
             )
             raise MySQLGetRouterUsersError(e.message)
@@ -1257,21 +1338,26 @@ def get_mysql_router_users_for_unit(
     def delete_users_for_unit(self, unit_name: str) -> None:
         """Delete users for a unit."""
         drop_users_command = [
-            f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+            "shell.connect_to_primary()",
         ]
         drop_users_command.extend(
             self._get_statements_to_delete_users_with_attribute("unit_name", f"'{unit_name}'")
         )
         try:
-            self._run_mysqlsh_script("\n".join(drop_users_command))
+            self._run_mysqlsh_script(
+                "\n".join(drop_users_command),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError as e:
-            logger.exception(f"Failed to query and delete users for unit {unit_name}")
+            logger.error(f"Failed to query and delete users for unit {unit_name}")
             raise MySQLDeleteUsersForUnitError(e.message)
 
     def delete_users_for_relation(self, username: str) -> None:
         """Delete users for a relation."""
         drop_users_command = [
-            f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+            "shell.connect_to_primary()",
             f"session.run_sql(\"DROP USER IF EXISTS '{username}'@'%';\")",
         ]
         # If the relation is with a MySQL Router charm application, delete any users
@@ -1280,34 +1366,48 @@ def delete_users_for_relation(self, username: str) -> None:
             self._get_statements_to_delete_users_with_attribute("created_by_user", f"'{username}'")
         )
         try:
-            self._run_mysqlsh_script("\n".join(drop_users_command))
+            self._run_mysqlsh_script(
+                "\n".join(drop_users_command),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError as e:
-            logger.exception(f"Failed to delete {username=}")
+            logger.error(f"Failed to delete {username=}")
             raise MySQLDeleteUsersForRelationError(e.message)
 
     def delete_user(self, username: str) -> None:
         """Delete user."""
         drop_user_command = [
-            f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+            "shell.connect_to_primary()",
             f"session.run_sql(\"DROP USER `{username}`@'%'\")",
         ]
         try:
-            self._run_mysqlsh_script("\n".join(drop_user_command))
+            self._run_mysqlsh_script(
+                "\n".join(drop_user_command),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError as e:
-            logger.exception(f"Failed to delete user {username}")
+            logger.error(f"Failed to delete user {username}")
             raise MySQLDeleteUserError(e.message)
 
     def remove_router_from_cluster_metadata(self, router_id: str) -> None:
         """Remove MySQL Router from InnoDB Cluster metadata."""
         command = [
-            f"shell.connect_to_primary('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.instance_address}')",
             "cluster = dba.get_cluster()",
             f'cluster.remove_router_metadata("{router_id}")',
         ]
         try:
-            self._run_mysqlsh_script("\n".join(command))
+            self._run_mysqlsh_script(
+                "\n".join(command),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError as e:
-            logger.exception(f"Failed to remove router from metadata with ID {router_id}")
+            logger.error(f"Failed to remove router from metadata with ID {router_id}")
             raise MySQLRemoveRouterFromMetadataError(e.message)
 
     def set_dynamic_variable(
@@ -1318,44 +1418,53 @@ def set_dynamic_variable(
         instance_address: Optional[str] = None,
     ) -> None:
         """Set a dynamic variable value for the instance."""
-        if not instance_address:
-            instance_address = self.socket_uri
-
         # escape variable values when needed
         if not re.match(r"^[0-9,a-z,A-Z$_]+$", value):
             value = f"`{value}`"
 
         logger.debug(f"Setting {variable=} to {value=}")
-        set_var_command = [
-            f"shell.connect('{self.server_config_user}:{self.server_config_password}@{instance_address}')",
-            f"session.run_sql(\"SET {'PERSIST' if persist else 'GLOBAL'} {variable}={value}\")",
-        ]
+        set_var_command = (
+            f'session.run_sql("SET {"PERSIST" if persist else "GLOBAL"} {variable}={value}")'
+        )
 
         try:
-            self._run_mysqlsh_script("\n".join(set_var_command))
+            self._run_mysqlsh_script(
+                set_var_command,
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user, instance_address),
+            )
         except MySQLClientError:
-            logger.exception(f"Failed to set {variable=} to {value=}")
+            logger.error(f"Failed to set {variable=} to {value=}")
             raise MySQLSetVariableError
 
     def get_variable_value(self, variable: str) -> str:
         """Get the value of a variable."""
         get_var_command = [
-            f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
             f"result = session.run_sql(\"SHOW VARIABLES LIKE '{variable}'\")",
             "print(result.fetch_all())",
         ]
 
         try:
-            output = self._run_mysqlsh_script("\n".join(get_var_command))
+            output = self._run_mysqlsh_script(
+                "\n".join(get_var_command),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError:
-            logger.exception(f"Failed to get variable {variable}")
+            logger.error(f"Failed to get value for {variable=}")
             raise MySQLGetVariableError
 
         rows = json.loads(output)
         return rows[0][1]
 
     def configure_instance(self, create_cluster_admin: bool = True) -> None:
-        """Configure the instance to be used in an InnoDB cluster."""
+        """Configure the instance to be used in an InnoDB cluster.
+
+        Args:
+            create_cluster_admin: Whether to create the cluster admin user.
+        """
         options = {
             "restart": "true",
         }
@@ -1366,16 +1475,19 @@ def configure_instance(self, create_cluster_admin: bool = True) -> None:
                 "clusterAdminPassword": self.cluster_admin_password,
             })
 
-        configure_instance_command = (
-            f"dba.configure_instance('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}', {json.dumps(options)})",
-        )
+        configure_instance_command = f"dba.configure_instance(options={options})"
 
         try:
             logger.debug(f"Configuring instance for InnoDB on {self.instance_address}")
-            self._run_mysqlsh_script("\n".join(configure_instance_command))
+            self._run_mysqlsh_script(
+                configure_instance_command,
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
             self.wait_until_mysql_connection()
         except MySQLClientError:
-            logger.exception(f"Failed to configure instance {self.instance_address}")
+            logger.error(f"Failed to configure instance {self.instance_address}")
             raise MySQLConfigureInstanceError
 
     def create_cluster(self, unit_label: str) -> None:
@@ -1387,31 +1499,40 @@ def create_cluster(self, unit_label: str) -> None:
         }
 
         commands = (
-            f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
-            f"cluster = dba.create_cluster('{self.cluster_name}', {json.dumps(options)})",
+            f"cluster = dba.create_cluster('{self.cluster_name}', {options})",
             f"cluster.set_instance_option('{self.instance_address}', 'label', '{unit_label}')",
         )
 
         try:
             logger.debug(f"Creating a MySQL InnoDB cluster on {self.instance_address}")
-            self._run_mysqlsh_script("\n".join(commands))
+            self._run_mysqlsh_script(
+                "\n".join(commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError:
-            logger.exception(f"Failed to create cluster on instance: {self.instance_address}")
+            logger.error(f"Failed to create cluster on instance: {self.instance_address}")
             raise MySQLCreateClusterError
 
     def create_cluster_set(self) -> None:
         """Create a cluster set for the cluster on cluster primary."""
         commands = (
-            f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+            "shell.connect_to_primary()",
             f"cluster = dba.get_cluster('{self.cluster_name}')",
             f"cluster.create_cluster_set('{self.cluster_set_name}')",
         )
 
         try:
             logger.debug(f"Creating cluster set name {self.cluster_set_name}")
-            self._run_mysqlsh_script("\n".join(commands))
+            self._run_mysqlsh_script(
+                "\n".join(commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError:
-            logger.exception("Failed to create cluster-set")
+            logger.error("Failed to create cluster-set")
             raise MySQLCreateClusterSetError from None
 
     def create_replica_cluster(
@@ -1434,7 +1555,7 @@ def create_replica_cluster(
             options["cloneDonor"] = donor
 
         commands = (
-            f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+            "shell.connect_to_primary()",
             "cs = dba.get_cluster_set()",
             f"repl_cluster = cs.create_replica_cluster('{endpoint}','{replica_cluster_name}', {options})",
             f"repl_cluster.set_instance_option('{endpoint}', 'label', '{instance_label}')",
@@ -1442,7 +1563,16 @@ def create_replica_cluster(
 
         try:
             logger.debug(f"Creating replica cluster {replica_cluster_name}")
-            self._run_mysqlsh_script("\n".join(commands))
+
+            # hide exception logging on auto try
+            log_exception = method == "auto"
+            self._run_mysqlsh_script(
+                "\n".join(commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+                exception_as_warning=log_exception,
+            )
         except MySQLClientError:
             if method == "auto":
                 logger.warning(
@@ -1456,13 +1586,13 @@ def create_replica_cluster(
                     method="clone",
                 )
             else:
-                logger.exception("Failed to create replica cluster")
+                logger.error("Failed to create replica cluster")
                 raise MySQLCreateReplicaClusterError
 
     def promote_cluster_to_primary(self, cluster_name: str, force: bool = False) -> None:
         """Promote a cluster to become the primary cluster on the cluster set."""
         commands = (
-            f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+            "shell.connect_to_primary()",
             "cs = dba.get_cluster_set()",
             (
                 f"cs.force_primary_cluster('{cluster_name}')"
@@ -1471,40 +1601,57 @@ def promote_cluster_to_primary(self, cluster_name: str, force: bool = False) ->
             ),
         )
 
-        try:
+        if force:
+            logger.warning(f"Promoting {cluster_name=} to primary with {force=}")
+        else:
             logger.debug(f"Promoting {cluster_name=} to primary with {force=}")
-            self._run_mysqlsh_script("\n".join(commands))
+
+        try:
+            self._run_mysqlsh_script(
+                "\n".join(commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError:
-            logger.exception("Failed to promote cluster to primary")
+            logger.error("Failed to promote cluster to primary")
             raise MySQLPromoteClusterToPrimaryError
 
     def fence_writes(self) -> None:
         """Fence writes on the primary cluster."""
         commands = (
-            f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
             "c = dba.get_cluster()",
             "c.fence_writes()",
         )
 
         try:
-            self._run_mysqlsh_script("\n".join(commands))
+            self._run_mysqlsh_script(
+                "\n".join(commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError:
-            logger.exception("Failed to fence writes on cluster")
+            logger.error("Failed to fence writes on cluster")
             raise MySQLFencingWritesError
 
     def unfence_writes(self) -> None:
         """Unfence writes on the primary cluster and reset read_only flag."""
         commands = (
-            f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
             "c = dba.get_cluster()",
             "c.unfence_writes()",
             "session.run_sql('SET GLOBAL read_only=OFF')",
         )
 
         try:
-            self._run_mysqlsh_script("\n".join(commands))
+            self._run_mysqlsh_script(
+                "\n".join(commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError:
-            logger.exception("Failed to resume writes on primary cluster")
+            logger.error("Failed to resume writes on primary cluster")
             raise MySQLFencingWritesError
 
     def is_cluster_writes_fenced(self) -> Optional[bool]:
@@ -1527,17 +1674,26 @@ def is_cluster_in_cluster_set(self, cluster_name: str) -> Optional[bool]:
     def cluster_metadata_exists(self, from_instance: str) -> bool:
         """Check if this cluster metadata exists on database."""
         check_cluster_metadata_commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{from_instance}')",
+            "result = session.run_sql(\"SHOW DATABASES LIKE 'mysql_innodb_cluster_metadata'\")",
+            "content = result.fetch_all()",
+            "if content:",
             (
-                'result = session.run_sql("SELECT cluster_name FROM mysql_innodb_cluster_metadata'
+                '  result = session.run_sql("SELECT cluster_name FROM mysql_innodb_cluster_metadata'
                 f".clusters where cluster_name = '{self.cluster_name}';\")"
             ),
-            "print(bool(result.fetch_one()))",
+            "  print(bool(result.fetch_one()))",
+            "else:",
+            "  print(False)",
         )
 
         try:
             output = self._run_mysqlsh_script(
-                "\n".join(check_cluster_metadata_commands), timeout=10
+                "\n".join(check_cluster_metadata_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user, from_instance),
+                timeout=60,
+                exception_as_warning=True,
             )
         except MySQLClientError:
             logger.warning(f"Failed to check if cluster metadata exists {from_instance=}")
@@ -1548,23 +1704,29 @@ def cluster_metadata_exists(self, from_instance: str) -> bool:
     def rejoin_cluster(self, cluster_name) -> None:
         """Try to rejoin a cluster to the cluster set."""
         commands = (
-            f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+            "shell.connect_to_primary()",
             "cs = dba.get_cluster_set()",
             f"cs.rejoin_cluster('{cluster_name}')",
         )
 
         try:
             logger.debug(f"Rejoining {cluster_name=}")
-            self._run_mysqlsh_script("\n".join(commands))
+            self._run_mysqlsh_script(
+                "\n".join(commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
+
             logger.info(f"Rejoined {cluster_name=}")
         except MySQLClientError:
-            logger.exception("Failed to rejoin cluster")
+            logger.error("Failed to rejoin cluster")
             raise MySQLRejoinClusterError
 
     def remove_replica_cluster(self, replica_cluster_name: str, force: bool = False) -> None:
         """Remove a replica cluster from the cluster-set."""
         commands = [
-            f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+            "shell.connect_to_primary()",
             "cs = dba.get_cluster_set()",
         ]
         if force:
@@ -1574,9 +1736,14 @@ def remove_replica_cluster(self, replica_cluster_name: str, force: bool = False)
 
         try:
             logger.debug(f"Removing replica cluster {replica_cluster_name}")
-            self._run_mysqlsh_script("\n".join(commands))
+            self._run_mysqlsh_script(
+                "\n".join(commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError:
-            logger.exception("Failed to remove replica cluster")
+            logger.error("Failed to remove replica cluster")
             raise MySQLRemoveReplicaClusterError
 
     def initialize_juju_units_operations_table(self) -> None:
@@ -1597,16 +1764,13 @@ def initialize_juju_units_operations_table(self) -> None:
             )
 
             self._run_mysqlcli_script(
-                "; ".join(initialize_table_commands),
+                initialize_table_commands,
                 user=self.server_config_user,
                 password=self.server_config_password,
             )
-        except MySQLClientError as e:
-            logger.exception(
-                f"Failed to initialize mysql.juju_units_operations table with error {e.message}",
-                exc_info=e,
-            )
-            raise MySQLInitializeJujuOperationsTableError(e.message)
+        except MySQLClientError:
+            logger.error("Failed to initialize mysql.juju_units_operations table with error")
+            raise MySQLInitializeJujuOperationsTableError
 
     def add_instance_to_cluster(
         self,
@@ -1632,11 +1796,8 @@ def add_instance_to_cluster(
         ):
             raise MySQLLockAcquisitionError("Lock not acquired")
 
+        connect_instance = from_instance or self.instance_address
         connect_commands = (
-            (
-                f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}"
-                f"@{from_instance or self.instance_address}')"
-            ),
             f"cluster = dba.get_cluster('{self.cluster_name}')",
             "shell.options['dba.restartWaitTimeout'] = 3600",
         )
@@ -1652,11 +1813,19 @@ def add_instance_to_cluster(
                 f"Adding instance {instance_address}/{instance_unit_label} to {self.cluster_name=}"
                 f"with recovery {method=}"
             )
-            self._run_mysqlsh_script("\n".join(connect_commands + add_instance_command))
+            # hide exception logging on auto try
+            log_exception = method == "auto"
+            self._run_mysqlsh_script(
+                "\n".join(connect_commands + add_instance_command),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user, connect_instance),
+                exception_as_warning=log_exception,
+            )
 
         except MySQLClientError:
             if method == "clone":
-                logger.exception(
+                logger.error(
                     f"Failed to add {instance_address=} to {self.cluster_name=} on {self.instance_address=}",
                 )
                 raise MySQLAddInstanceToClusterError
@@ -1680,7 +1849,6 @@ def is_instance_configured_for_innodb(
     ) -> bool:
         """Confirm if instance is configured for use in an InnoDB cluster."""
         commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{instance_address}')",
             "instance_configured = dba.check_instance_configuration()['status'] == 'ok'",
             'print("INSTANCE_CONFIGURED" if instance_configured else "INSTANCE_NOT_CONFIGURED")',
         )
@@ -1690,7 +1858,12 @@ def is_instance_configured_for_innodb(
                 f"Confirming instance {instance_address}/{instance_unit_label} configuration for InnoDB"
             )
 
-            output = self._run_mysqlsh_script("\n".join(commands))
+            output = self._run_mysqlsh_script(
+                "\n".join(commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user, instance_address),
+            )
             return "INSTANCE_CONFIGURED" in output
         except MySQLClientError as e:
             # confirmation can fail if the clusteradmin user does not yet exist on the instance
@@ -1699,21 +1872,36 @@ def is_instance_configured_for_innodb(
             )
             return False
 
+    def drop_group_replication_metadata_schema(self) -> None:
+        """Drop the group replication metadata schema from current unit."""
+        commands = "dba.drop_metadata_schema()"
+
+        try:
+            self._run_mysqlsh_script(
+                commands,
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
+        except MySQLClientError:
+            logger.error("Failed to drop group replication metadata schema")
+
     def are_locks_acquired(self, from_instance: Optional[str] = None) -> bool:
         """Report if any topology change is being executed."""
         commands = (
-            (
-                f"shell.connect('{self.server_config_user}:{self.server_config_password}"
-                f"@{from_instance or self.socket_uri}')"
-            ),
             "result = session.run_sql(\"SELECT COUNT(*) FROM mysql.juju_units_operations WHERE status='in-progress';\")",
             "print(f'<LOCKS>{result.fetch_one()[0]}</LOCKS>')",
         )
         try:
-            output = self._run_mysqlsh_script("\n".join(commands))
+            output = self._run_mysqlsh_script(
+                "\n".join(commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user, from_instance),
+            )
         except MySQLClientError:
             # log error and fallback to assuming topology is changing
-            logger.exception("Failed to get locks count")
+            logger.error("Failed to get locks count")
             return True
 
         matches = re.search(r"<LOCKS>(\d)</LOCKS>", output)
@@ -1734,24 +1922,28 @@ def rescan_cluster(
             options["addInstances"] = "auto"
 
         rescan_cluster_commands = (
-            (
-                f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@"
-                f"{from_instance or self.socket_uri}')"
-            ),
             f"cluster = dba.get_cluster('{self.cluster_name}')",
-            f"cluster.rescan({json.dumps(options)})",
+            f"cluster.rescan({options})",
         )
         try:
             logger.debug("Rescanning cluster")
-            self._run_mysqlsh_script("\n".join(rescan_cluster_commands))
+            self._run_mysqlsh_script(
+                "\n".join(rescan_cluster_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user, from_instance),
+            )
         except MySQLClientError as e:
-            logger.exception("Error rescanning the cluster")
+            logger.error("Error rescanning the cluster")
             raise MySQLRescanClusterError(e.message)
 
     def is_instance_in_cluster(self, unit_label: str) -> bool:
         """Confirm if instance is in the cluster."""
+        if not self.cluster_metadata_exists(self.instance_address):
+            # early return if instance has no cluster metadata
+            return False
+
         commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.socket_uri}')",
             f"cluster = dba.get_cluster('{self.cluster_name}')",
             f"print(cluster.status()['defaultReplicaSet']['topology'].get('{unit_label}', {{}}).get('status', 'NOT_A_MEMBER'))",
         )
@@ -1759,7 +1951,13 @@ def is_instance_in_cluster(self, unit_label: str) -> bool:
         try:
             logger.debug(f"Checking existence of unit {unit_label} in cluster {self.cluster_name}")
 
-            output = self._run_mysqlsh_script("\n".join(commands))
+            output = self._run_mysqlsh_script(
+                "\n".join(commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+                exception_as_warning=True,
+            )
             return (
                 MySQLMemberState.ONLINE in output.lower()
                 or MySQLMemberState.RECOVERING in output.lower()
@@ -1782,13 +1980,18 @@ def get_cluster_status(
         """Get the cluster status dictionary."""
         options = {"extended": extended}
         status_commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{from_instance or self.socket_uri}')",
             f"cluster = dba.get_cluster('{self.cluster_name}')",
             f"print(cluster.status({options}))",
         )
 
         try:
-            output = self._run_mysqlsh_script("\n".join(status_commands), timeout=30)
+            output = self._run_mysqlsh_script(
+                "\n".join(status_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user, from_instance),
+                timeout=30,
+            )
             output_dict = json.loads(output.lower())
             return output_dict
         except MySQLClientError:
@@ -1800,13 +2003,19 @@ def get_cluster_set_status(
         """Get the cluster-set status dictionary."""
         options = {"extended": extended}
         status_commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{from_instance or self.socket_uri}')",
             "cs = dba.get_cluster_set()",
             f"print(cs.status({options}))",
         )
 
         try:
-            output = self._run_mysqlsh_script("\n".join(status_commands), timeout=150)
+            output = self._run_mysqlsh_script(
+                "\n".join(status_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user, from_instance),
+                timeout=150,
+                exception_as_warning=True,
+            )
             output_dict = json.loads(output.lower())
             return output_dict
         except MySQLClientError:
@@ -1824,13 +2033,19 @@ def get_replica_cluster_status(self, replica_cluster_name: Optional[str] = None)
         if not replica_cluster_name:
             replica_cluster_name = self.cluster_name
         status_commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.socket_uri}')",
             "cs = dba.get_cluster_set()",
             f"print(cs.status(extended=1)['clusters']['{replica_cluster_name}']['globalStatus'])",
         )
 
         try:
-            output = self._run_mysqlsh_script("\n".join(status_commands), timeout=30)
+            output = self._run_mysqlsh_script(
+                "\n".join(status_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+                timeout=150,
+                exception_as_warning=True,
+            )
             return output.lower().strip()
         except MySQLClientError:
             logger.warning(f"Failed to get replica cluster status for {replica_cluster_name}")
@@ -1850,14 +2065,19 @@ def get_cluster_node_count(
                 f" WHERE member_state = '{node_status.value.upper()}'"
             )
         size_commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}"
-            f"@{from_instance or self.socket_uri}')",
             f'result = session.run_sql("{query}")',
             'print(f"<NODES>{result.fetch_one()[0]}</NODES>")',
         )
 
         try:
-            output = self._run_mysqlsh_script("\n".join(size_commands))
+            output = self._run_mysqlsh_script(
+                "\n".join(size_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user, from_instance),
+                timeout=30,
+                exception_as_warning=True,
+            )
         except MySQLClientError:
             logger.warning("Failed to get node count")
             return 0
@@ -1895,6 +2115,8 @@ def _get_host_ip(host: str) -> str:
         if self.is_cluster_replica():
             # replica return global primary address
             global_primary = self.get_cluster_set_global_primary_address()
+            if not global_primary:
+                raise MySQLGetClusterEndpointsError("Failed to get global primary address")
             rw_endpoints = {_get_host_ip(global_primary) if get_ips else global_primary}
         else:
             rw_endpoints = {
@@ -1923,12 +2145,16 @@ def execute_remove_instance(
             "force": "true" if force else "false",
         }
         remove_instance_commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{connect_instance or self.instance_address}')",
             f"cluster = dba.get_cluster('{self.cluster_name}')",
             "cluster.remove_instance("
             f"'{self.cluster_admin_user}@{self.instance_address}', {remove_instance_options})",
         )
-        self._run_mysqlsh_script("\n".join(remove_instance_commands))
+        self._run_mysqlsh_script(
+            "\n".join(remove_instance_commands),
+            user=self.server_config_user,
+            password=self.server_config_password,
+            host=self.instance_def(self.server_config_user, connect_instance),
+        )
 
     @retry(
         retry=retry_if_exception_type(MySQLRemoveInstanceRetryError),
@@ -1999,8 +2225,7 @@ def remove_instance(  # noqa: C901
         except MySQLClientError as e:
             # In case of an error, raise an error and retry
             logger.warning(
-                f"Failed to acquire lock and remove instance {self.instance_address} with error {e.message}",
-                exc_info=e,
+                f"Failed to acquire lock and remove instance {self.instance_address} with error {e.message}"
             )
             raise MySQLRemoveInstanceRetryError(e.message)
         finally:
@@ -2029,18 +2254,22 @@ def remove_instance(  # noqa: C901
                 self._release_lock(lock_instance, unit_label, UNIT_TEARDOWN_LOCKNAME)
             except MySQLClientError as e:
                 # Raise an error that does not lead to a retry of this method
-                logger.exception(f"Failed to release lock on {unit_label}")
+                logger.error(f"Failed to release lock on {unit_label}")
                 raise MySQLRemoveInstanceError(e.message)
 
     def dissolve_cluster(self) -> None:
         """Dissolve the cluster independently of the unit teardown process."""
         logger.debug(f"Dissolving cluster {self.cluster_name}")
         dissolve_cluster_commands = (
-            f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
             f"cluster = dba.get_cluster('{self.cluster_name}')",
             "cluster.dissolve({'force': 'true'})",
         )
-        self._run_mysqlsh_script("\n".join(dissolve_cluster_commands))
+        self._run_mysqlsh_script(
+            "\n".join(dissolve_cluster_commands),
+            user=self.server_config_user,
+            password=self.server_config_password,
+            host=self.instance_def(self.server_config_user),
+        )
 
     def _acquire_lock(self, primary_address: str, unit_label: str, lock_name: str) -> bool:
         """Attempts to acquire a lock by using the mysql.juju_units_operations table."""
@@ -2049,14 +2278,24 @@ def _acquire_lock(self, primary_address: str, unit_label: str, lock_name: str) -
         )
 
         acquire_lock_commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{primary_address}')",
-            f"session.run_sql(\"UPDATE mysql.juju_units_operations SET executor='{unit_label}', status='in-progress' WHERE task='{lock_name}' AND executor='';\")",
-            f"acquired_lock = session.run_sql(\"SELECT count(*) FROM mysql.juju_units_operations WHERE task='{lock_name}' AND executor='{unit_label}';\").fetch_one()[0]",
+            (
+                f"session.run_sql(\"UPDATE mysql.juju_units_operations SET executor='{unit_label}',"
+                f" status='in-progress' WHERE task='{lock_name}' AND executor='';\")"
+            ),
+            (
+                'acquired_lock = session.run_sql("SELECT count(*) FROM mysql.juju_units_operations'
+                f" WHERE task='{lock_name}' AND executor='{unit_label}';\").fetch_one()[0]"
+            ),
             "print(f'<ACQUIRED_LOCK>{acquired_lock}</ACQUIRED_LOCK>')",
         )
 
         try:
-            output = self._run_mysqlsh_script("\n".join(acquire_lock_commands))
+            output = self._run_mysqlsh_script(
+                "\n".join(acquire_lock_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user, primary_address),
+            )
         except MySQLClientError:
             logger.debug(f"Failed to acquire lock {lock_name}")
             return False
@@ -2068,27 +2307,44 @@ def _acquire_lock(self, primary_address: str, unit_label: str, lock_name: str) -
 
     def _release_lock(self, primary_address: str, unit_label: str, lock_name: str) -> None:
         """Releases a lock in the mysql.juju_units_operations table."""
-        logger.debug(f"Releasing lock {lock_name} on {primary_address} for unit {unit_label}")
+        logger.debug(f"Releasing {lock_name=} @{primary_address=} for {unit_label=}")
 
         release_lock_commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{primary_address}')",
-            "session.run_sql(\"UPDATE mysql.juju_units_operations SET executor='', status='not-started'"
+            "r = session.run_sql(\"UPDATE mysql.juju_units_operations SET executor='', status='not-started'"
             f" WHERE task='{lock_name}' AND executor='{unit_label}';\")",
+            "print(r.get_affected_items_count())",
         )
-        self._run_mysqlsh_script("\n".join(release_lock_commands))
+        affected_rows = self._run_mysqlsh_script(
+            "\n".join(release_lock_commands),
+            user=self.server_config_user,
+            password=self.server_config_password,
+            host=self.instance_def(self.server_config_user, primary_address),
+        )
+        if affected_rows:
+            if int(affected_rows) == 0:
+                logger.warning("No lock to release")
+            else:
+                logger.debug(f"{lock_name=} released for {unit_label=}")
 
     def _get_cluster_member_addresses(self, exclude_unit_labels: List = []) -> Tuple[List, bool]:
         """Get the addresses of the cluster's members."""
         logger.debug(f"Getting cluster member addresses, excluding units {exclude_unit_labels}")
 
         get_cluster_members_commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.socket_uri}')",
             f"cluster = dba.get_cluster('{self.cluster_name}')",
-            f"member_addresses = ','.join([member['address'] for label, member in cluster.status()['defaultReplicaSet']['topology'].items() if label not in {exclude_unit_labels}])",
+            (
+                "member_addresses = ','.join([member['address'] for label, member in "
+                f"cluster.status()['defaultReplicaSet']['topology'].items() if label not in {exclude_unit_labels}])"
+            ),
             "print(f'<MEMBER_ADDRESSES>{member_addresses}</MEMBER_ADDRESSES>')",
         )
 
-        output = self._run_mysqlsh_script("\n".join(get_cluster_members_commands))
+        output = self._run_mysqlsh_script(
+            "\n".join(get_cluster_members_commands),
+            user=self.server_config_user,
+            password=self.server_config_password,
+            host=self.instance_def(self.server_config_user),
+        )
         matches = re.search(r"<MEMBER_ADDRESSES>(.*)</MEMBER_ADDRESSES>", output)
 
         if not matches:
@@ -2105,20 +2361,23 @@ def get_cluster_primary_address(
         self, connect_instance_address: Optional[str] = None
     ) -> Optional[str]:
         """Get the cluster primary's address."""
-        if not connect_instance_address:
-            connect_instance_address = self.instance_address
-        logger.debug(f"Getting cluster primary member's address from {connect_instance_address}")
+        logger.debug("Getting cluster primary member's address")
 
         get_cluster_primary_commands = (
-            f"shell.connect_to_primary('{self.cluster_admin_user}:{self.cluster_admin_password}@{connect_instance_address}')",
+            "shell.connect_to_primary()",
             "primary_address = shell.parse_uri(session.uri)['host']",
             "print(f'<PRIMARY_ADDRESS>{primary_address}</PRIMARY_ADDRESS>')",
         )
 
         try:
-            output = self._run_mysqlsh_script("\n".join(get_cluster_primary_commands))
+            output = self._run_mysqlsh_script(
+                "\n".join(get_cluster_primary_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user, connect_instance_address),
+            )
         except MySQLClientError as e:
-            logger.warning("Failed to get cluster primary addresses", exc_info=e)
+            logger.warning("Failed to get cluster primary addresses")
             raise MySQLGetClusterPrimaryAddressError(e.message)
         matches = re.search(r"<PRIMARY_ADDRESS>(.+)</PRIMARY_ADDRESS>", output)
 
@@ -2131,30 +2390,35 @@ def get_cluster_set_global_primary_address(
         self, connect_instance_address: Optional[str] = None
     ) -> Optional[str]:
         """Get the cluster set global primary's address."""
-        if not connect_instance_address:
-            connect_instance_address = self.instance_address
-        logger.debug(
-            f"Getting cluster set global primary member's address from {connect_instance_address}"
-        )
+        logger.debug("Getting cluster set global primary member's address")
 
         get_cluster_set_global_primary_commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{connect_instance_address}')",
             "cs = dba.get_cluster_set()",
             "global_primary = cs.status()['globalPrimaryInstance']",
             "print(f'<PRIMARY_ADDRESS>{global_primary}</PRIMARY_ADDRESS>')",
         )
 
         try:
-            output = self._run_mysqlsh_script("\n".join(get_cluster_set_global_primary_commands))
+            output = self._run_mysqlsh_script(
+                "\n".join(get_cluster_set_global_primary_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user, connect_instance_address),
+            )
         except MySQLClientError as e:
-            logger.warning("Failed to get cluster set global primary addresses", exc_info=e)
+            logger.warning("Failed to get cluster set global primary addresses")
             raise MySQLGetClusterPrimaryAddressError(e.message)
         matches = re.search(r"<PRIMARY_ADDRESS>(.+)</PRIMARY_ADDRESS>", output)
 
         if not matches:
             return None
 
-        return matches.group(1)
+        address = matches.group(1)
+        if ":" in address:
+            # strip port from address
+            address = address.split(":")[0]
+
+        return address
 
     def get_primary_label(self) -> Optional[str]:
         """Get the label of the cluster's primary."""
@@ -2175,47 +2439,29 @@ def set_cluster_primary(self, new_primary_address: str) -> None:
         logger.debug(f"Setting cluster primary to {new_primary_address}")
 
         set_cluster_primary_commands = (
-            f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+            "shell.connect_to_primary()",
             f"cluster = dba.get_cluster('{self.cluster_name}')",
             f"cluster.set_primary_instance('{new_primary_address}')",
         )
         try:
-            self._run_mysqlsh_script("\n".join(set_cluster_primary_commands))
+            self._run_mysqlsh_script(
+                "\n".join(set_cluster_primary_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError as e:
-            logger.exception("Failed to set cluster primary")
+            logger.error("Failed to set cluster primary")
             raise MySQLSetClusterPrimaryError(e.message)
 
-    def get_cluster_members_addresses(self) -> Optional[Iterable[str]]:
-        """Get the addresses of the cluster's members."""
-        get_cluster_members_commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.socket_uri}')",
-            f"cluster = dba.get_cluster('{self.cluster_name}')",
-            "members = ','.join((member['address'] for member in cluster.describe()['defaultReplicaSet']['topology']))",
-            "print(f'<MEMBERS>{members}</MEMBERS>')",
-        )
-
-        try:
-            output = self._run_mysqlsh_script("\n".join(get_cluster_members_commands))
-        except MySQLClientError as e:
-            logger.warning("Failed to get cluster members addresses", exc_info=e)
-            raise MySQLGetClusterMembersAddressesError(e.message)
-
-        matches = re.search(r"<MEMBERS>(.+)</MEMBERS>", output)
-
-        if not matches:
-            return None
-
-        return set(matches.group(1).split(","))
-
     def verify_server_upgradable(self, instance: Optional[str] = None) -> None:
         """Wrapper for API check_for_server_upgrade."""
+        # use cluster admin user to enforce standard port usage
         check_command = [
-            f"shell.connect('{self.server_config_user}"
-            f":{self.server_config_password}@{instance or self.socket_uri}')",
             "try:",
             "    util.check_for_server_upgrade(options={'outputFormat': 'JSON'})",
             "except ValueError:",  # ValueError is raised for same version check
-            "    if session.run_sql('select @@version').fetch_all()[0][0].split('-')[0] == shell.version.split()[1]:",
+            "    if session.run_sql('select @@version').fetch_all()[0][0].split('-')[0] in shell.version:",
             "        print('SAME_VERSION')",
             "    else:",
             "        raise",
@@ -2229,7 +2475,12 @@ def _strip_output(output: str):
             return output
 
         try:
-            output = self._run_mysqlsh_script("\n".join(check_command))
+            output = self._run_mysqlsh_script(
+                "\n".join(check_command),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user, instance),
+            )
             if "SAME_VERSION" in output:
                 return
             result = json.loads(_strip_output(output))
@@ -2244,15 +2495,19 @@ def get_mysql_version(self) -> Optional[str]:
         logger.debug("Getting InnoDB version")
 
         get_version_commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.socket_uri}')",
             'result = session.run_sql("SELECT version()")',
             'print(f"<VERSION>{result.fetch_one()[0]}</VERSION>")',
         )
 
         try:
-            output = self._run_mysqlsh_script("\n".join(get_version_commands))
+            output = self._run_mysqlsh_script(
+                "\n".join(get_version_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError as e:
-            logger.warning("Failed to get workload version", exc_info=e)
+            logger.warning("Failed to get workload version")
             raise MySQLGetMySQLVersionError(e.message)
 
         matches = re.search(r"<VERSION>(.+)</VERSION>", output)
@@ -2267,45 +2522,53 @@ def grant_privileges_to_user(
     ) -> None:
         """Grants specified privileges to the provided database user."""
         grant_privileges_commands = (
-            f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
-            f"session.run_sql(\"GRANT {', '.join(privileges)} ON *.* TO '{username}'@'{hostname}'{' WITH GRANT OPTION' if with_grant_option else ''}\")",
+            "shell.connect_to_primary()",
+            (
+                f"session.run_sql(\"GRANT {', '.join(privileges)} ON *.* TO '{username}'@'{hostname}'"
+                f'{" WITH GRANT OPTION" if with_grant_option else ""}")'
+            ),
         )
 
         try:
-            self._run_mysqlsh_script("\n".join(grant_privileges_commands))
+            self._run_mysqlsh_script(
+                "\n".join(grant_privileges_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError as e:
-            logger.warning(f"Failed to grant privileges to user {username}@{hostname}", exc_info=e)
+            logger.warning(f"Failed to grant privileges to user {username}@{hostname}")
             raise MySQLGrantPrivilegesToUserError(e.message)
 
     def update_user_password(self, username: str, new_password: str, host: str = "%") -> None:
         """Updates user password in MySQL database."""
-        logger.debug(f"Updating password for {username}.")
-
         # password is set on the global primary
         if not (instance_address := self.get_cluster_set_global_primary_address()):
             raise MySQLCheckUserExistenceError("No primary found")
 
         update_user_password_commands = (
-            f"shell.connect('{self.server_config_user}:{self.server_config_password}@{instance_address}')",
             f"session.run_sql(\"ALTER USER '{username}'@'{host}' IDENTIFIED BY '{new_password}';\")",
             'session.run_sql("FLUSH PRIVILEGES;")',
         )
 
+        logger.debug(f"Updating password for {username}.")
         try:
-            self._run_mysqlsh_script("\n".join(update_user_password_commands))
-        except MySQLClientError as e:
-            logger.exception(
-                f"Failed to update user password for user {username}",
-                exc_info=e,
+            self._run_mysqlsh_script(
+                "\n".join(update_user_password_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user, instance_address),
             )
-            raise MySQLCheckUserExistenceError(e.message)
+        except MySQLClientError:
+            logger.error(f"Failed to update user password for user {username}")
+            raise MySQLCheckUserExistenceError
 
     @retry(reraise=True, stop=stop_after_attempt(3), wait=wait_fixed(GET_MEMBER_STATE_TIME))
     def get_member_state(self) -> Tuple[str, str]:
         """Get member status (MEMBER_STATE, MEMBER_ROLE) in the cluster."""
         member_state_query = (
             "SELECT MEMBER_STATE, MEMBER_ROLE, MEMBER_ID, @@server_uuid"
-            " FROM performance_schema.replication_group_members"
+            " FROM performance_schema.replication_group_members",
         )
 
         try:
@@ -2315,33 +2578,59 @@ def get_member_state(self) -> Tuple[str, str]:
                 password=self.cluster_admin_password,
                 timeout=10,
             )
-        except MySQLClientError as e:
-            logger.error(
-                "Failed to get member state: mysqld daemon is down",
-            )
-            raise MySQLGetMemberStateError(e.message)
+        except MySQLClientError:
+            logger.error("Failed to get member state: mysqld daemon is down")
+            raise MySQLUnableToGetMemberStateError
 
         # output is like:
-        # 'MEMBER_STATE\tMEMBER_ROLE\tMEMBER_ID\t@@server_uuid\nONLINE\tPRIMARY\t<uuid>\t<uuid>\n'
-        lines = output.strip().lower().split("\n")
-        if len(lines) < 2:
-            raise MySQLGetMemberStateError("No member state retrieved")
+        # [('ONLINE',
+        #  'PRIMARY',
+        #  '1de30105-ce16-11ef-bb27-00163e3cb985',
+        #  '1de30105-ce16-11ef-bb27-00163e3cb985'), (...)]
+        if len(output) == 0:
+            raise MySQLNoMemberStateError("No member state retrieved")
+
+        def lower_or_unknown(value) -> str:
+            return value.lower() if value else "unknown"
 
-        if len(lines) == 2:
+        if len(output) == 1:
             # Instance just know it own state
             # sometimes member_id is not populated
-            results = lines[1].split("\t")
-            return results[0], results[1] or "unknown"
+            return lower_or_unknown(output[0][0]), lower_or_unknown(output[0][1])
 
-        for line in lines[1:]:
+        for row in output:
             # results will be like:
             # ['online', 'primary', 'a6c00302-1c07-11ee-bca1-...', 'a6c00302-1c07-11ee-bca1-...']
-            results = line.split("\t")
-            if results[2] == results[3]:
+            if row[2] == row[3]:
                 # filter server uuid
-                return results[0], results[1] or "unknown"
+                return lower_or_unknown(row[0]), lower_or_unknown(row[1])
 
-        raise MySQLGetMemberStateError("No member state retrieved")
+        raise MySQLNoMemberStateError("No member state retrieved")
+
+    def is_cluster_auto_rejoin_ongoing(self):
+        """Check if the instance is performing a cluster auto rejoin operation."""
+        cluster_auto_rejoin_command = (
+            "cursor = session.run_sql(\"SELECT work_completed, work_estimated FROM performance_schema.events_stages_current WHERE event_name LIKE '%auto-rejoin%'\")",
+            "result = cursor.fetch_one() or [0,0]",
+            "print(f'<COMPLETED_ATTEMPTS>{result[0]}</COMPLETED_ATTEMPTS>')",
+            "print(f'<ESTIMATED_ATTEMPTS>{result[1]}</ESTIMATED_ATTEMPTS>')",
+        )
+
+        try:
+            output = self._run_mysqlsh_script(
+                "\n".join(cluster_auto_rejoin_command),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
+        except MySQLClientError as e:
+            logger.error("Failed to get cluster auto-rejoin information", exc_info=e)
+            raise
+
+        completed_matches = re.search(r"<COMPLETED_ATTEMPTS>(\d)</COMPLETED_ATTEMPTS>", output)
+        estimated_matches = re.search(r"<ESTIMATED_ATTEMPTS>(\d)</ESTIMATED_ATTEMPTS>", output)
+
+        return int(completed_matches.group(1)) < int(estimated_matches.group(1))
 
     def is_cluster_replica(self, from_instance: Optional[str] = None) -> Optional[bool]:
         """Check if this cluster is a replica in a cluster set."""
@@ -2351,14 +2640,6 @@ def is_cluster_replica(self, from_instance: Optional[str] = None) -> Optional[bo
 
         return cs_status["clusters"][self.cluster_name.lower()]["clusterrole"] == "replica"
 
-    def cluster_set_cluster_count(self, from_instance: Optional[str] = None) -> int:
-        """Get the number of clusters in the cluster set."""
-        cs_status = self.get_cluster_set_status(extended=0, from_instance=from_instance)
-        if not cs_status:
-            return 0
-
-        return len(cs_status["clusters"])
-
     def get_cluster_set_name(self, from_instance: Optional[str] = None) -> Optional[str]:
         """Get cluster set name."""
         cs_status = self.get_cluster_set_status(extended=0, from_instance=from_instance)
@@ -2370,27 +2651,51 @@ def get_cluster_set_name(self, from_instance: Optional[str] = None) -> Optional[
     def stop_group_replication(self) -> None:
         """Stop Group replication if enabled on the instance."""
         stop_gr_command = (
-            f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
             "data = session.run_sql('SELECT 1 FROM performance_schema.replication_group_members')",
             "if len(data.fetch_all()) > 0:",
             "    session.run_sql('STOP GROUP_REPLICATION')",
         )
         try:
-            self._run_mysqlsh_script("\n".join(stop_gr_command))
+            logger.debug("Stopping Group Replication for unit")
+            self._run_mysqlsh_script(
+                "\n".join(stop_gr_command),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
+        except MySQLClientError:
+            logger.warning("Failed to stop Group Replication for unit")
+
+    def start_group_replication(self) -> None:
+        """Start Group replication on the instance."""
+        start_gr_command = "session.run_sql('START GROUP_REPLICATION')"
+
+        try:
+            logger.debug("Starting Group Replication for unit")
+            self._run_mysqlsh_script(
+                start_gr_command,
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError:
-            logger.debug("Failed to stop Group Replication for unit")
+            logger.warning("Failed to start Group Replication for unit")
 
     def reboot_from_complete_outage(self) -> None:
         """Wrapper for reboot_cluster_from_complete_outage command."""
         reboot_from_outage_command = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.socket_uri}')",
-            f"dba.reboot_cluster_from_complete_outage('{self.cluster_name}')",
+            f"dba.reboot_cluster_from_complete_outage('{self.cluster_name}')"
         )
 
         try:
-            self._run_mysqlsh_script("\n".join(reboot_from_outage_command))
+            self._run_mysqlsh_script(
+                reboot_from_outage_command,
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError as e:
-            logger.exception("Failed to reboot cluster")
+            logger.error("Failed to reboot cluster")
             raise MySQLRebootFromCompleteOutageError(e.message)
 
     def hold_if_recovering(self) -> None:
@@ -2398,7 +2703,7 @@ def hold_if_recovering(self) -> None:
         while True:
             try:
                 member_state, _ = self.get_member_state()
-            except MySQLGetMemberStateError:
+            except (MySQLNoMemberStateError, MySQLUnableToGetMemberStateError):
                 break
             if member_state == MySQLMemberState.RECOVERING:
                 logger.debug("Unit is recovering")
@@ -2413,43 +2718,51 @@ def set_instance_offline_mode(self, offline_mode: bool = False) -> None:
 
         try:
             self._run_mysqlcli_script(
-                "; ".join(set_instance_offline_mode_commands),
-                user=self.cluster_admin_user,
-                password=self.cluster_admin_password,
+                set_instance_offline_mode_commands,
+                user=self.server_config_user,
+                password=self.server_config_password,
             )
-        except MySQLClientError as e:
-            logger.exception(f"Failed to set instance state to offline_mode {mode}")
-            raise MySQLSetInstanceOfflineModeError(e.message)
+        except MySQLClientError:
+            logger.error(f"Failed to set instance state to offline_mode {mode}")
+            raise MySQLSetInstanceOfflineModeError
 
     def set_instance_option(self, option: str, value: Any) -> None:
         """Sets an instance option."""
         set_instance_option_commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.socket_uri}')",
             f"cluster = dba.get_cluster('{self.cluster_name}')",
             f"cluster.set_instance_option('{self.instance_address}', '{option}', '{value}')",
         )
 
         try:
-            self._run_mysqlsh_script("\n".join(set_instance_option_commands))
+            self._run_mysqlsh_script(
+                "\n".join(set_instance_option_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError:
-            logger.exception(f"Failed to set option {option} with value {value}")
+            logger.error(f"Failed to set option {option} with value {value}")
             raise MySQLSetInstanceOptionError
 
     def offline_mode_and_hidden_instance_exists(self) -> bool:
         """Indicates whether an instance exists in offline_mode and hidden from router."""
         offline_mode_message = "Instance has offline_mode enabled"
         commands = (
-            f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.instance_address}')",
             f"cluster_topology = dba.get_cluster('{self.cluster_name}').status()['defaultReplicaSet']['topology']",
             f"selected_instances = [label for label, member in cluster_topology.items() if '{offline_mode_message}' in member.get('instanceErrors', '') and member.get('hiddenFromRouter')]",
             "print(f'<OFFLINE_MODE_INSTANCES>{len(selected_instances)}</OFFLINE_MODE_INSTANCES>')",
         )
 
         try:
-            output = self._run_mysqlsh_script("\n".join(commands))
-        except MySQLClientError as e:
-            logger.exception("Failed to query offline mode instances")
-            raise MySQLOfflineModeAndHiddenInstanceExistsError(e.message)
+            output = self._run_mysqlsh_script(
+                "\n".join(commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
+        except MySQLClientError:
+            logger.error("Failed to query offline mode instances")
+            raise MySQLOfflineModeAndHiddenInstanceExistsError
 
         matches = re.search(r"<OFFLINE_MODE_INSTANCES>(.*)</OFFLINE_MODE_INSTANCES>", output)
 
@@ -2500,7 +2813,7 @@ def get_innodb_buffer_pool_parameters(
                 group_replication_message_cache,
             )
         except Exception:
-            logger.exception("Failed to compute innodb buffer pool parameters")
+            logger.error("Failed to compute innodb buffer pool parameters")
             raise MySQLGetAutoTuningParametersError("Error computing buffer pool parameters")
 
     def get_max_connections(self, available_memory: int) -> int:
@@ -2542,12 +2855,12 @@ def execute_backup_commands(
             nproc, _ = self._execute_commands(nproc_command)
             tmp_dir, _ = self._execute_commands(make_temp_dir_command, user=user, group=group)
         except MySQLExecError:
-            logger.exception("Failed to execute commands prior to running backup")
+            logger.error("Failed to execute commands prior to running backup")
             raise MySQLExecuteBackupCommandsError
         except Exception:
             # Catch all other exceptions to prevent the database being stuck in
             # a bad state due to pre-backup operations
-            logger.exception("Failed unexpectedly to execute commands prior to running backup")
+            logger.error("Failed unexpectedly to execute commands prior to running backup")
             raise MySQLExecuteBackupCommandsError
 
         # TODO: remove flags --no-server-version-check
@@ -2598,12 +2911,12 @@ def execute_backup_commands(
                 stream_output="stderr",
             )
         except MySQLExecError:
-            logger.exception("Failed to execute backup commands")
+            logger.error("Failed to execute backup commands")
             raise MySQLExecuteBackupCommandsError
         except Exception:
             # Catch all other exceptions to prevent the database being stuck in
             # a bad state due to pre-backup operations
-            logger.exception("Failed unexpectedly to execute backup commands")
+            logger.error("Failed unexpectedly to execute backup commands")
             raise MySQLExecuteBackupCommandsError
 
     def delete_temp_backup_directory(
@@ -2626,11 +2939,11 @@ def delete_temp_backup_directory(
                 group=group,
             )
         except MySQLExecError as e:
-            logger.exception("Failed to delete temp backup directory")
+            logger.error("Failed to delete temp backup directory")
             raise MySQLDeleteTempBackupDirectoryError(e.message)
-        except Exception as e:
-            logger.exception("Failed to delete temp backup directory")
-            raise MySQLDeleteTempBackupDirectoryError(e)
+        except Exception:
+            logger.error("Failed to delete temp backup directory")
+            raise MySQLDeleteTempBackupDirectoryError
 
     def retrieve_backup_with_xbcloud(
         self,
@@ -2639,8 +2952,8 @@ def retrieve_backup_with_xbcloud(
         temp_restore_directory: str,
         xbcloud_location: str,
         xbstream_location: str,
-        user=None,
-        group=None,
+        user: Optional[str] = None,
+        group: Optional[str] = None,
     ) -> Tuple[str, str, str]:
         """Retrieve the specified backup from S3."""
         nproc_command = ["nproc"]
@@ -2657,7 +2970,7 @@ def retrieve_backup_with_xbcloud(
                 group=group,
             )
         except MySQLExecError as e:
-            logger.exception("Failed to execute commands prior to running xbcloud get")
+            logger.error("Failed to execute commands prior to running xbcloud get")
             raise MySQLRetrieveBackupWithXBCloudError(e.message)
 
         retrieve_backup_command = [
@@ -2695,10 +3008,10 @@ def retrieve_backup_with_xbcloud(
             )
             return (stdout, stderr, tmp_dir)
         except MySQLExecError as e:
-            logger.exception("Failed to retrieve backup")
+            logger.error("Failed to retrieve backup")
             raise MySQLRetrieveBackupWithXBCloudError(e.message)
         except Exception:
-            logger.exception("Failed to retrieve backup")
+            logger.error("Failed to retrieve backup")
             raise MySQLRetrieveBackupWithXBCloudError
 
     def prepare_backup_for_restore(
@@ -2706,8 +3019,8 @@ def prepare_backup_for_restore(
         backup_location: str,
         xtrabackup_location: str,
         xtrabackup_plugin_dir: str,
-        user=None,
-        group=None,
+        user: Optional[str] = None,
+        group: Optional[str] = None,
     ) -> Tuple[str, str]:
         """Prepare the backup in the provided dir for restore."""
         try:
@@ -2738,17 +3051,17 @@ def prepare_backup_for_restore(
                 group=group,
             )
         except MySQLExecError as e:
-            logger.exception("Failed to prepare backup for restore")
+            logger.error("Failed to prepare backup for restore")
             raise MySQLPrepareBackupForRestoreError(e.message)
         except Exception:
-            logger.exception("Failed to prepare backup for restore")
+            logger.error("Failed to prepare backup for restore")
             raise MySQLPrepareBackupForRestoreError
 
     def empty_data_files(
         self,
         mysql_data_directory: str,
-        user=None,
-        group=None,
+        user: Optional[str] = None,
+        group: Optional[str] = None,
     ) -> None:
         """Empty the mysql data directory in preparation of backup restore."""
         empty_data_files_command = [
@@ -2771,10 +3084,10 @@ def empty_data_files(
                 group=group,
             )
         except MySQLExecError as e:
-            logger.exception("Failed to empty data directory in prep for backup restore")
+            logger.error("Failed to empty data directory in prep for backup restore")
             raise MySQLEmptyDataDirectoryError(e.message)
         except Exception:
-            logger.exception("Failed to empty data directory in prep for backup restore")
+            logger.error("Failed to empty data directory in prep for backup restore")
             raise MySQLEmptyDataDirectoryError
 
     def restore_backup(
@@ -2784,8 +3097,8 @@ def restore_backup(
         defaults_config_file: str,
         mysql_data_directory: str,
         xtrabackup_plugin_directory: str,
-        user=None,
-        group=None,
+        user: Optional[str] = None,
+        group: Optional[str] = None,
     ) -> Tuple[str, str]:
         """Restore the provided prepared backup."""
         restore_backup_command = [
@@ -2809,17 +3122,17 @@ def restore_backup(
                 group=group,
             )
         except MySQLExecError as e:
-            logger.exception("Failed to restore backup")
+            logger.error("Failed to restore backup")
             raise MySQLRestoreBackupError(e.message)
         except Exception:
-            logger.exception("Failed to restore backup")
+            logger.error("Failed to restore backup")
             raise MySQLRestoreBackupError
 
     def delete_temp_restore_directory(
         self,
         temp_restore_directory: str,
-        user=None,
-        group=None,
+        user: Optional[str] = None,
+        group: Optional[str] = None,
     ) -> None:
         """Delete the temp restore directory from the mysql data directory."""
         logger.info(f"Deleting temp restore directory in {temp_restore_directory}")
@@ -2841,7 +3154,7 @@ def delete_temp_restore_directory(
                 group=group,
             )
         except MySQLExecError as e:
-            logger.exception("Failed to remove temp backup directory")
+            logger.error("Failed to remove temp backup directory")
             raise MySQLDeleteTempRestoreDirectoryError(e.message)
 
     @abstractmethod
@@ -2866,11 +3179,11 @@ def tls_setup(
     ) -> None:
         """Setup TLS files and requirement mode."""
         enable_commands = (
-            f"SET PERSIST ssl_ca='{ca_path}';"
-            f"SET PERSIST ssl_key='{key_path}';"
-            f"SET PERSIST ssl_cert='{cert_path}';"
-            f"SET PERSIST require_secure_transport={'on' if require_tls else 'off'};"
-            "ALTER INSTANCE RELOAD TLS;"
+            f"SET PERSIST ssl_ca='{ca_path}'",
+            f"SET PERSIST ssl_key='{key_path}'",
+            f"SET PERSIST ssl_cert='{cert_path}'",
+            f"SET PERSIST require_secure_transport={'on' if require_tls else 'off'}",
+            "ALTER INSTANCE RELOAD TLS",
         )
 
         try:
@@ -2880,13 +3193,12 @@ def tls_setup(
                 password=self.server_config_password,
             )
         except MySQLClientError:
-            logger.exception("Failed to set custom TLS configuration")
+            logger.error("Failed to set custom TLS configuration")
             raise MySQLTLSSetupError("Failed to set custom TLS configuration")
 
     def kill_unencrypted_sessions(self) -> None:
         """Kill non local, non system open unencrypted connections."""
         kill_connections_command = (
-            f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
             (
                 'processes = session.run_sql("'
                 "SELECT processlist_id FROM performance_schema.threads WHERE "
@@ -2898,15 +3210,19 @@ def kill_unencrypted_sessions(self) -> None:
         )
 
         try:
-            self._run_mysqlsh_script("\n".join(kill_connections_command))
+            self._run_mysqlsh_script(
+                "\n".join(kill_connections_command),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError:
-            logger.exception("Failed to kill external sessions")
+            logger.error("Failed to kill external sessions")
             raise MySQLKillSessionError
 
     def kill_client_sessions(self) -> None:
         """Kill non local, non system open unencrypted connections."""
         kill_connections_command = (
-            f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
             (
                 'processes = session.run_sql("'
                 "SELECT processlist_id FROM performance_schema.threads WHERE "
@@ -2918,23 +3234,30 @@ def kill_client_sessions(self) -> None:
         )
 
         try:
-            self._run_mysqlsh_script("\n".join(kill_connections_command))
+            self._run_mysqlsh_script(
+                "\n".join(kill_connections_command),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
         except MySQLClientError:
-            logger.exception("Failed to kill external sessions")
+            logger.error("Failed to kill external sessions")
             raise MySQLKillSessionError
 
     def check_mysqlsh_connection(self) -> bool:
         """Checks if it is possible to connect to the server with mysqlsh."""
-        connect_commands = (
-            f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
-            'session.run_sql("SELECT 1")',
-        )
+        connect_commands = 'session.run_sql("SELECT 1")'
 
         try:
-            self._run_mysqlsh_script("\n".join(connect_commands))
+            self._run_mysqlsh_script(
+                connect_commands,
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+            )
             return True
         except MySQLClientError:
-            logger.exception("Failed to connect to MySQL with mysqlsh")
+            logger.error("Failed to connect to MySQL with mysqlsh")
             return False
 
     def get_pid_of_port_3306(self) -> Optional[str]:
@@ -2950,7 +3273,6 @@ def get_pid_of_port_3306(self) -> Optional[str]:
     def flush_mysql_logs(self, logs_type: Union[MySQLTextLogs, list[MySQLTextLogs]]) -> None:
         """Flushes the specified logs_type logs."""
         flush_logs_commands = [
-            f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
             'session.run_sql("SET sql_log_bin = 0")',
         ]
 
@@ -2968,19 +3290,30 @@ def flush_mysql_logs(self, logs_type: Union[MySQLTextLogs, list[MySQLTextLogs]])
             flush_logs_commands.append("session.run_sql(\"set global audit_log_flush='ON'\")")
 
         try:
-            self._run_mysqlsh_script("\n".join(flush_logs_commands), timeout=50)
+            self._run_mysqlsh_script(
+                "\n".join(flush_logs_commands),
+                user=self.server_config_user,
+                password=self.server_config_password,
+                host=self.instance_def(self.server_config_user),
+                timeout=50,
+                exception_as_warning=True,
+            )
         except MySQLClientError:
             logger.warning(f"Failed to flush {logs_type} logs.")
 
     def get_databases(self) -> set[str]:
         """Return a set with all databases on the server."""
         list_databases_commands = (
-            f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
             'result = session.run_sql("SHOW DATABASES")',
             "for db in result.fetch_all():\n  print(db[0])",
         )
 
-        output = self._run_mysqlsh_script("\n".join(list_databases_commands))
+        output = self._run_mysqlsh_script(
+            "\n".join(list_databases_commands),
+            user=self.server_config_user,
+            password=self.server_config_password,
+            host=self.instance_def(self.server_config_user),
+        )
         return set(output.split())
 
     def get_non_system_databases(self) -> set[str]:
@@ -2993,13 +3326,34 @@ def get_non_system_databases(self) -> set[str]:
             "sys",
         }
 
-    def strip_off_passwords(self, input_string: str) -> str:
+    def strip_off_passwords(self, input_string: Optional[str]) -> str:
         """Strips off passwords from the input string."""
+        if not input_string:
+            return ""
         stripped_input = input_string
+        hidden_pass = "*****"
         for password in self.passwords:
-            stripped_input = stripped_input.replace(password, "xxxxxxxxxxxx")
+            stripped_input = stripped_input.replace(password, hidden_pass)
+        if "IDENTIFIED" in input_string:
+            # when failure occurs for password setting (user creation, password rotation)
+            pattern = r"(?<=IDENTIFIED BY\ \')[^\']+(?=\')"
+            stripped_input = re.sub(pattern, hidden_pass, stripped_input)
         return stripped_input
 
+    def strip_off_passwords_from_exception(self, e: Exception) -> None:
+        """Remove password from execution exceptions.
+
+        Checks from known exceptions for password. Known exceptions are:
+        * ops.pebble: ExecError
+        * subprocess: CalledProcessError, TimeoutExpired
+        """
+        if hasattr(e, "cmd"):
+            for i, v in enumerate(e.cmd):  # type: ignore
+                e.cmd[i] = self.strip_off_passwords(v)  # type: ignore
+        if hasattr(e, "command"):
+            for i, v in enumerate(e.command):  # type: ignore
+                e.command[i] = self.strip_off_passwords(v)  # type: ignore
+
     @abstractmethod
     def is_mysqld_running(self) -> bool:
         """Returns whether mysqld is running."""
@@ -3039,14 +3393,26 @@ def reset_data_dir(self) -> None:
         raise NotImplementedError
 
     @abstractmethod
-    def _run_mysqlsh_script(self, script: str, timeout: Optional[int] = None) -> str:
+    def _run_mysqlsh_script(
+        self,
+        script: str,
+        user: str,
+        host: str,
+        password: str,
+        timeout: Optional[int] = None,
+        exception_as_warning: bool = False,
+    ) -> str:
         """Execute a MySQL shell script.
 
         Raises MySQLClientError if script execution fails.
 
         Args:
             script: Mysqlsh script string
+            user: User to invoke the mysqlsh script with
+            host: Host to run the script on
+            password: Password to invoke the mysqlsh script
             timeout: Optional timeout for script execution
+            exception_as_warning: (optional) whether the exception should be treated as warning
 
         Returns:
             String representing the output of the mysqlsh command
@@ -3056,21 +3422,34 @@ def _run_mysqlsh_script(self, script: str, timeout: Optional[int] = None) -> str
     @abstractmethod
     def _run_mysqlcli_script(
         self,
-        script: str,
+        script: Union[Tuple[Any, ...], List[Any]],
         user: str = "root",
         password: Optional[str] = None,
         timeout: Optional[int] = None,
-    ) -> str:
+        exception_as_warning: bool = False,
+    ) -> list:
         """Execute a MySQL CLI script.
 
         Execute SQL script as instance with given user.
 
-        Raises MySQLClientError if script execution fails.
+        Raises:
+            MySQLClientError if script execution fails.
+            TimeoutError if script execution times out.
 
         Args:
             script: raw SQL script string
             user: (optional) user to invoke the mysql cli script with (default is "root")
             password: (optional) password to invoke the mysql cli script with
             timeout: (optional) time before the query should timeout
+            exception_as_warning: (optional) whether the exception should be treated as warning
+        """
+        raise NotImplementedError
+
+    @abstractmethod
+    def _file_exists(self, path: str) -> bool:
+        """Check if a file exists.
+
+        Args:
+            path: Path to the file to check
         """
         raise NotImplementedError
diff --git a/lib/charms/mysql/v0/s3_helpers.py b/lib/charms/mysql/v0/s3_helpers.py
index 2dce393d1..c0c9ddd72 100644
--- a/lib/charms/mysql/v0/s3_helpers.py
+++ b/lib/charms/mysql/v0/s3_helpers.py
@@ -21,6 +21,7 @@
 from typing import Dict, List, Tuple
 
 import boto3
+import botocore
 
 logger = logging.getLogger(__name__)
 
@@ -32,13 +33,36 @@
 
 # Increment this PATCH version before using `charmcraft publish-lib` or reset
 # to 0 if you are raising the major API version
-LIBPATCH = 9
+LIBPATCH = 10
 
 # botocore/urllib3 clutter the logs when on debug
 logging.getLogger("botocore").setLevel(logging.WARNING)
 logging.getLogger("urllib3").setLevel(logging.WARNING)
 
 
+def _construct_endpoint(s3_parameters: dict) -> str:
+    """Construct the S3 service endpoint using the region.
+
+    This is needed when the provided endpoint is from AWS, and it doesn't contain the region.
+    """
+    # Use the provided endpoint if a region is not needed.
+    endpoint = s3_parameters["endpoint"]
+
+    # Load endpoints data.
+    loader = botocore.loaders.create_loader()
+    data = loader.load_data("endpoints")
+
+    # Construct the endpoint using the region.
+    resolver = botocore.regions.EndpointResolver(data)
+    endpoint_data = resolver.construct_endpoint("s3", s3_parameters["region"])
+
+    # Use the built endpoint if it is an AWS endpoint.
+    if endpoint_data and endpoint.endswith(endpoint_data["dnsSuffix"]):
+        endpoint = f"{endpoint.split('://')[0]}://{endpoint_data['hostname']}"
+
+    return endpoint
+
+
 def upload_content_to_s3(content: str, content_path: str, s3_parameters: Dict) -> bool:
     """Uploads the provided contents to the provided S3 bucket.
 
@@ -67,11 +91,7 @@ def upload_content_to_s3(content: str, content_path: str, s3_parameters: Dict) -
             ca_file.flush()
             verif = ca_file.name
 
-        s3 = session.resource(
-            "s3",
-            endpoint_url=s3_parameters["endpoint"],
-            verify=verif,
-        )
+        s3 = session.resource("s3", endpoint_url=_construct_endpoint(s3_parameters), verify=verif)
 
         bucket = s3.Bucket(s3_parameters["bucket"])
 
diff --git a/lib/charms/mysql/v0/tls.py b/lib/charms/mysql/v0/tls.py
index 85586d561..be550291f 100644
--- a/lib/charms/mysql/v0/tls.py
+++ b/lib/charms/mysql/v0/tls.py
@@ -51,7 +51,7 @@
 
 LIBID = "eb73947deedd4380a3a90d527e0878eb"
 LIBAPI = 0
-LIBPATCH = 7
+LIBPATCH = 8
 
 SCOPE = "unit"
 
@@ -166,6 +166,9 @@ def _on_certificate_expiring(self, event: CertificateExpiringEvent) -> None:
 
     def _on_tls_relation_broken(self, _) -> None:
         """Disable TLS when TLS relation broken."""
+        if self.charm.removing_unit:
+            logger.debug("Unit is being removed, skipping TLS cleanup.")
+            return
         try:
             if not ops.jujuversion.JujuVersion.from_environ().has_secrets:
                 self.charm.set_secret(SCOPE, "certificate-authority", None)
diff --git a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py
index e3d35c6f3..d1169ef3d 100644
--- a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py
+++ b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py
@@ -340,8 +340,8 @@ def _on_scrape_targets_changed(self, event):
 
 import yaml
 from cosl import JujuTopology
-from cosl.rules import AlertRules
-from ops.charm import CharmBase, RelationRole
+from cosl.rules import AlertRules, generic_alert_groups
+from ops.charm import CharmBase, RelationJoinedEvent, RelationRole
 from ops.framework import (
     BoundEvent,
     EventBase,
@@ -362,7 +362,7 @@ def _on_scrape_targets_changed(self, event):
 
 # Increment this PATCH version before using `charmcraft publish-lib` or reset
 # to 0 if you are raising the major API version
-LIBPATCH = 47
+LIBPATCH = 50
 
 PYDEPS = ["cosl"]
 
@@ -1309,6 +1309,8 @@ def __init__(
         refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None,
         external_url: str = "",
         lookaside_jobs_callable: Optional[Callable] = None,
+        *,
+        forward_alert_rules: bool = True,
     ):
         """Construct a metrics provider for a Prometheus charm.
 
@@ -1411,6 +1413,7 @@ def __init__(
                 files.  Defaults to "./prometheus_alert_rules",
                 resolved relative to the directory hosting the charm entry file.
                 The alert rules are automatically updated on charm upgrade.
+            forward_alert_rules: a boolean flag to toggle forwarding of charmed alert rules.
             refresh_event: an optional bound event or list of bound events which
                 will be observed to re-set scrape job data (IP address and others)
             external_url: an optional argument that represents an external url that
@@ -1449,6 +1452,7 @@ def __init__(
 
         self._charm = charm
         self._alert_rules_path = alert_rules_path
+        self._forward_alert_rules = forward_alert_rules
         self._relation_name = relation_name
         # sanitize job configurations to the supported subset of parameters
         jobs = [] if jobs is None else jobs
@@ -1530,7 +1534,11 @@ def set_scrape_job_spec(self, _=None):
             return
 
         alert_rules = AlertRules(query_type="promql", topology=self.topology)
-        alert_rules.add_path(self._alert_rules_path, recursive=True)
+        if self._forward_alert_rules:
+            alert_rules.add_path(self._alert_rules_path, recursive=True)
+            alert_rules.add(
+                generic_alert_groups.application_rules, group_name_prefix=self.topology.identifier
+            )
         alert_rules_as_dict = alert_rules.as_dict()
 
         for relation in self._charm.model.relations[self._relation_name]:
@@ -1776,6 +1784,9 @@ def __init__(
         relation_names: Optional[dict] = None,
         relabel_instance=True,
         resolve_addresses=False,
+        path_to_own_alert_rules: Optional[str] = None,
+        *,
+        forward_alert_rules: bool = True,
     ):
         """Construct a `MetricsEndpointAggregator`.
 
@@ -1795,6 +1806,8 @@ def __init__(
             resolve_addresses: A boolean flag indiccating if the aggregator
                 should attempt to perform DNS lookups of targets and append
                 a `dns_name` label
+            path_to_own_alert_rules: Optionally supply a path for alert rule files
+            forward_alert_rules: a boolean flag to toggle forwarding of charmed alert rules
         """
         self._charm = charm
 
@@ -1807,15 +1820,21 @@ def __init__(
         self._alert_rules_relation = relation_names.get("alert_rules", "prometheus-rules")
 
         super().__init__(charm, self._prometheus_relation)
+        self.topology = JujuTopology.from_charm(charm)
+
         self._stored.set_default(jobs=[], alert_rules=[])
 
         self._relabel_instance = relabel_instance
         self._resolve_addresses = resolve_addresses
 
+        self._forward_alert_rules = forward_alert_rules
+
         # manage Prometheus charm relation events
         prometheus_events = self._charm.on[self._prometheus_relation]
         self.framework.observe(prometheus_events.relation_joined, self._set_prometheus_data)
 
+        self.path_to_own_alert_rules = path_to_own_alert_rules
+
         # manage list of Prometheus scrape jobs from related scrape targets
         target_events = self._charm.on[self._target_relation]
         self.framework.observe(target_events.relation_changed, self._on_prometheus_targets_changed)
@@ -1828,7 +1847,7 @@ def __init__(
         self.framework.observe(alert_rule_events.relation_changed, self._on_alert_rules_changed)
         self.framework.observe(alert_rule_events.relation_departed, self._on_alert_rules_departed)
 
-    def _set_prometheus_data(self, event):
+    def _set_prometheus_data(self, event: Optional[RelationJoinedEvent] = None):
         """Ensure every new Prometheus instances is updated.
 
         Any time a new Prometheus unit joins the relation with
@@ -1838,6 +1857,7 @@ def _set_prometheus_data(self, event):
         if not self._charm.unit.is_leader():
             return
 
+        # Gather the scrape jobs
         jobs = [] + _type_convert_stored(
             self._stored.jobs  # pyright: ignore
         )  # list of scrape jobs, one per relation
@@ -1846,6 +1866,7 @@ def _set_prometheus_data(self, event):
             if targets and relation.app:
                 jobs.append(self._static_scrape_job(targets, relation.app.name))
 
+        # Gather the alert rules
         groups = [] + _type_convert_stored(
             self._stored.alert_rules  # pyright: ignore
         )  # list of alert rule groups
@@ -1856,9 +1877,23 @@ def _set_prometheus_data(self, event):
                 rules = self._label_alert_rules(unit_rules, appname)
                 group = {"name": self.group_name(appname), "rules": rules}
                 groups.append(group)
-
-        event.relation.data[self._charm.app]["scrape_jobs"] = json.dumps(jobs)
-        event.relation.data[self._charm.app]["alert_rules"] = json.dumps({"groups": groups})
+        alert_rules = AlertRules(query_type="promql", topology=self.topology)
+        # Add alert rules from file
+        if self.path_to_own_alert_rules:
+            alert_rules.add_path(self.path_to_own_alert_rules, recursive=True)
+        # Add generic alert rules
+        alert_rules.add(
+            generic_alert_groups.application_rules, group_name_prefix=self.topology.identifier
+        )
+        groups.extend(alert_rules.as_dict()["groups"])
+
+        # Set scrape jobs and alert rules in relation data
+        relations = [event.relation] if event else self.model.relations[self._prometheus_relation]
+        for rel in relations:
+            rel.data[self._charm.app]["scrape_jobs"] = json.dumps(jobs)  # type: ignore
+            rel.data[self._charm.app]["alert_rules"] = json.dumps(  # type: ignore
+                {"groups": groups if self._forward_alert_rules else []}
+            )
 
     def _on_prometheus_targets_changed(self, event):
         """Update scrape jobs in response to scrape target changes.
@@ -2129,7 +2164,9 @@ def set_alert_rule_data(self, name: str, unit_rules: dict, label_rules: bool = T
 
             if updated_group["name"] not in [g["name"] for g in groups]:
                 groups.append(updated_group)
-            relation.data[self._charm.app]["alert_rules"] = json.dumps({"groups": groups})
+            relation.data[self._charm.app]["alert_rules"] = json.dumps(
+                {"groups": groups if self._forward_alert_rules else []}
+            )
 
             if not _type_convert_stored(self._stored.alert_rules) == groups:  # pyright: ignore
                 self._stored.alert_rules = groups
@@ -2177,8 +2214,8 @@ def remove_alert_rules(self, group_name: str, unit_name: str) -> None:
                 changed_group["rules"] = rules_kept  # type: ignore
                 groups.append(changed_group)
 
-            relation.data[self._charm.app]["alert_rules"] = (
-                json.dumps({"groups": groups}) if groups else "{}"
+            relation.data[self._charm.app]["alert_rules"] = json.dumps(
+                {"groups": groups if self._forward_alert_rules else []}
             )
 
             if not _type_convert_stored(self._stored.alert_rules) == groups:  # pyright: ignore
@@ -2364,12 +2401,9 @@ def _get_tool_path(self) -> Optional[Path]:
         arch = "amd64" if arch == "x86_64" else arch
         res = "cos-tool-{}".format(arch)
         try:
-            path = Path(res).resolve()
-            path.chmod(0o777)
+            path = Path(res).resolve(strict=True)
             return path
-        except NotImplementedError:
-            logger.debug("System lacks support for chmod")
-        except FileNotFoundError:
+        except (FileNotFoundError, OSError):
             logger.debug('Could not locate cos-tool at: "{}"'.format(res))
         return None
 
diff --git a/lib/charms/tempo_k8s/v1/charm_tracing.py b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py
similarity index 60%
rename from lib/charms/tempo_k8s/v1/charm_tracing.py
rename to lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py
index 2dbdddd68..a9b6deeb6 100644
--- a/lib/charms/tempo_k8s/v1/charm_tracing.py
+++ b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py
@@ -10,17 +10,17 @@
 in real time from the Grafana dashboard the execution flow of your charm.
 
 # Quickstart
-Fetch the following charm libs (and ensure the minimum version/revision numbers are satisfied):
+Fetch the following charm libs:
 
-    charmcraft fetch-lib charms.tempo_k8s.v2.tracing  # >= 1.10
-    charmcraft fetch-lib charms.tempo_k8s.v1.charm_tracing  # >= 2.7
+    charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.tracing
+    charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.charm_tracing
 
 Then edit your charm code to include:
 
 ```python
 # import the necessary charm libs
-from charms.tempo_k8s.v2.tracing import TracingEndpointRequirer, charm_tracing_config
-from charms.tempo_k8s.v1.charm_tracing import charm_tracing
+from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer, charm_tracing_config
+from charms.tempo_coordinator_k8s.v0.charm_tracing import charm_tracing
 
 # decorate your charm class with charm_tracing:
 @charm_tracing(
@@ -51,7 +51,7 @@ def __init__(self, ...):
 
 2) add to your charm a "my_tracing_endpoint" (you can name this attribute whatever you like)
 **property**, **method** or **instance attribute** that returns an otlp http/https endpoint url.
-If you are using the ``charms.tempo_k8s.v2.tracing.TracingEndpointRequirer`` as
+If you are using the ``charms.tempo_coordinator_k8s.v0.tracing.TracingEndpointRequirer`` as
 ``self.tracing = TracingEndpointRequirer(self)``, the implementation could be:
 
 ```
@@ -69,6 +69,9 @@ def my_tracing_endpoint(self) -> Optional[str]:
     - every event as a span (including custom events)
     - every charm method call (except dunders) as a span
 
+We recommend that you scale up your tracing provider and relate it to an ingress so that your tracing requests
+go through the ingress and get load balanced across all units. Otherwise, if the provider's leader goes down, your tracing goes down.
+
 
 ## TLS support
 If your charm integrates with a TLS provider which is also trusted by the tracing provider (the Tempo charm),
@@ -80,7 +83,7 @@ def my_tracing_endpoint(self) -> Optional[str]:
 
 For example:
 ```
-from charms.tempo_k8s.v1.charm_tracing import trace_charm
+from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm
 @trace_charm(
     tracing_endpoint="my_tracing_endpoint",
     server_cert="_server_cert"
@@ -114,9 +117,61 @@ def get_tracer(self) -> opentelemetry.trace.Tracer:
 See the official opentelemetry Python SDK documentation for usage:
 https://opentelemetry-python.readthedocs.io/en/latest/
 
-## Upgrading from `v0`
 
-If you are upgrading from `charm_tracing` v0, you need to take the following steps (assuming you already
+## Caching traces
+The `trace_charm` machinery will buffer any traces collected during charm execution and store them
+to a file on the charm container until a tracing backend becomes available. At that point, it will
+flush them to the tracing receiver.
+
+By default, the buffer is configured to start dropping old traces if any of these conditions apply:
+
+- the storage size exceeds 10 MiB
+- the number of buffered events exceeds 100
+
+You can configure this by, for example:
+
+```python
+@trace_charm(
+    tracing_endpoint="my_tracing_endpoint",
+    server_cert="_server_cert",
+    # only cache up to 42 events
+    buffer_max_events=42,
+    # only cache up to 42 MiB
+    buffer_max_size_mib=42,  # minimum 10!
+)
+class MyCharm(CharmBase):
+    ...
+```
+
+Note that setting `buffer_max_events` to 0 will effectively disable the buffer.
+
+The path of the buffer file is by default in the charm's execution root, which for k8s charms means
+that in case of pod churn, the cache will be lost. The recommended solution is to use an existing storage
+(or add a new one) such as:
+
+```yaml
+storage:
+  data:
+    type: filesystem
+    location: /charm-traces
+```
+
+and then configure the `@trace_charm` decorator to use it as path for storing the buffer:
+```python
+@trace_charm(
+    tracing_endpoint="my_tracing_endpoint",
+    server_cert="_server_cert",
+    # store traces to a PVC so they're not lost on pod restart.
+    buffer_path="/charm-traces/buffer.file",
+)
+class MyCharm(CharmBase):
+    ...
+```
+
+## Upgrading from `tempo_k8s.v0`
+
+If you are upgrading from `tempo_k8s.v0.charm_tracing` (note that since then, the charm library moved to
+`tempo_coordinator_k8s.v0.charm_tracing`), you need to take the following steps (assuming you already
 have the newest version of the library in your charm):
 1) If you need the dependency for your tests, add the following dependency to your charm project
 (or, if your project had a dependency on `opentelemetry-exporter-otlp-proto-grpc` only because
@@ -150,7 +205,7 @@ def my_tracing_endpoint(self) -> Optional[str]:
 needs to be replaced with:
 
 ```
-    from charms.tempo_k8s.v1.charm_tracing import trace_charm
+    from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm
 
     @trace_charm(
         tracing_endpoint="my_tracing_endpoint",
@@ -171,6 +226,12 @@ def my_tracing_endpoint(self) -> Optional[str]:
 3) If you were passing a certificate (str) using `server_cert`, you need to change it to
 provide an *absolute* path to the certificate file instead.
 """
+import typing
+
+from opentelemetry.exporter.otlp.proto.common._internal.trace_encoder import (
+    encode_spans,
+)
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
 
 
 def _remove_stale_otel_sdk_packages():
@@ -222,6 +283,9 @@ def _remove_stale_otel_sdk_packages():
     otel_logger.debug("Successfully applied _remove_stale_otel_sdk_packages patch. ")
 
 
+# apply hacky patch to remove stale opentelemetry sdk packages on upgrade-charm.
+# it could be trouble if someone ever decides to implement their own tracer parallel to
+# ours and before the charm has inited. We assume they won't.
 _remove_stale_otel_sdk_packages()
 
 import functools
@@ -235,6 +299,7 @@ def _remove_stale_otel_sdk_packages():
     Any,
     Callable,
     Generator,
+    List,
     Optional,
     Sequence,
     Type,
@@ -247,30 +312,33 @@ def _remove_stale_otel_sdk_packages():
 import ops
 from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
 from opentelemetry.sdk.resources import Resource
-from opentelemetry.sdk.trace import Span, TracerProvider
-from opentelemetry.sdk.trace.export import BatchSpanProcessor
+from opentelemetry.sdk.trace import ReadableSpan, Span, TracerProvider
+from opentelemetry.sdk.trace.export import (
+    BatchSpanProcessor,
+    SpanExporter,
+    SpanExportResult,
+)
+from opentelemetry.trace import INVALID_SPAN, Tracer
+from opentelemetry.trace import get_current_span as otlp_get_current_span
 from opentelemetry.trace import (
-    INVALID_SPAN,
-    Tracer,
     get_tracer,
     get_tracer_provider,
     set_span_in_context,
     set_tracer_provider,
 )
-from opentelemetry.trace import get_current_span as otlp_get_current_span
 from ops.charm import CharmBase
 from ops.framework import Framework
 
 # The unique Charmhub library identifier, never change it
-LIBID = "cb1705dcd1a14ca09b2e60187d1215c7"
+LIBID = "01780f1e588c42c3976d26780fdf9b89"
 
 # Increment this major API version when introducing breaking changes
-LIBAPI = 1
+LIBAPI = 0
 
 # Increment this PATCH version before using `charmcraft publish-lib` or reset
 # to 0 if you are raising the major API version
 
-LIBPATCH = 15
+LIBPATCH = 5
 
 PYDEPS = ["opentelemetry-exporter-otlp-proto-http==1.21.0"]
 
@@ -278,7 +346,7 @@ def _remove_stale_otel_sdk_packages():
 dev_logger = logging.getLogger("tracing-dev")
 
 # set this to 0 if you are debugging/developing this library source
-dev_logger.setLevel(logging.CRITICAL)
+dev_logger.setLevel(logging.ERROR)
 
 _CharmType = Type[CharmBase]  # the type CharmBase and any subclass thereof
 _C = TypeVar("_C", bound=_CharmType)
@@ -288,6 +356,186 @@ def _remove_stale_otel_sdk_packages():
 _GetterType = Union[Callable[[_CharmType], Optional[str]], property]
 
 CHARM_TRACING_ENABLED = "CHARM_TRACING_ENABLED"
+BUFFER_DEFAULT_CACHE_FILE_NAME = ".charm_tracing_buffer.raw"
+# we store the buffer as raw otlp-native protobuf (bytes) since it's hard to serialize/deserialize it in
+# any portable format. Json dumping is supported, but loading isn't.
+# cfr: https://github.com/open-telemetry/opentelemetry-python/issues/1003
+
+BUFFER_DEFAULT_CACHE_FILE_SIZE_LIMIT_MiB = 10
+_BUFFER_CACHE_FILE_SIZE_LIMIT_MiB_MIN = 10
+BUFFER_DEFAULT_MAX_EVENT_HISTORY_LENGTH = 100
+_MiB_TO_B = 2**20  # megabyte to byte conversion rate
+_OTLP_SPAN_EXPORTER_TIMEOUT = 1
+"""Timeout in seconds that the OTLP span exporter has to push traces to the backend."""
+
+
+class _Buffer:
+    """Handles buffering for spans emitted while no tracing backend is configured or available.
+
+    Use the max_event_history_length_buffering param of @trace_charm to tune
+    the amount of memory that this will hog on your units.
+
+    The buffer is formatted as a bespoke byte dump (protobuf limitation).
+    We cannot store them as json because that is not well-supported by the sdk
+    (see https://github.com/open-telemetry/opentelemetry-python/issues/3364).
+    """
+
+    _SPANSEP = b"__CHARM_TRACING_BUFFER_SPAN_SEP__"
+
+    def __init__(self, db_file: Path, max_event_history_length: int, max_buffer_size_mib: int):
+        self._db_file = db_file
+        self._max_event_history_length = max_event_history_length
+        self._max_buffer_size_mib = max(max_buffer_size_mib, _BUFFER_CACHE_FILE_SIZE_LIMIT_MiB_MIN)
+
+        # set by caller
+        self.exporter: Optional[OTLPSpanExporter] = None
+
+    def save(self, spans: typing.Sequence[ReadableSpan]):
+        """Save the spans collected by this exporter to the cache file.
+
+        This method should be as fail-safe as possible.
+        """
+        if self._max_event_history_length < 1:
+            dev_logger.debug("buffer disabled: max history length < 1")
+            return
+
+        current_history_length = len(self.load())
+        new_history_length = current_history_length + len(spans)
+        if (diff := self._max_event_history_length - new_history_length) < 0:
+            self.drop(diff)
+        self._save(spans)
+
+    def _serialize(self, spans: Sequence[ReadableSpan]) -> bytes:
+        # encode because otherwise we can't json-dump them
+        return encode_spans(spans).SerializeToString()
+
+    def _save(self, spans: Sequence[ReadableSpan], replace: bool = False):
+        dev_logger.debug(f"saving {len(spans)} new spans to buffer")
+        old = [] if replace else self.load()
+        new = self._serialize(spans)
+
+        try:
+            # if the buffer exceeds the size limit, we start dropping old spans until it does
+
+            while len((new + self._SPANSEP.join(old))) > (self._max_buffer_size_mib * _MiB_TO_B):
+                if not old:
+                    # if we've already dropped all spans and still we can't get under the
+                    # size limit, we can't save this span
+                    logger.error(
+                        f"span exceeds total buffer size limit ({self._max_buffer_size_mib}MiB); "
+                        f"buffering FAILED"
+                    )
+                    return
+
+                old = old[1:]
+                logger.warning(
+                    f"buffer size exceeds {self._max_buffer_size_mib}MiB; dropping older spans... "
+                    f"Please increase the buffer size, disable buffering, or ensure the spans can be flushed."
+                )
+
+            self._db_file.write_bytes(new + self._SPANSEP.join(old))
+        except Exception:
+            logger.exception("error buffering spans")
+
+    def load(self) -> List[bytes]:
+        """Load currently buffered spans from the cache file.
+
+        This method should be as fail-safe as possible.
+        """
+        if not self._db_file.exists():
+            dev_logger.debug("buffer file not found. buffer empty.")
+            return []
+        try:
+            spans = self._db_file.read_bytes().split(self._SPANSEP)
+        except Exception:
+            logger.exception(f"error parsing {self._db_file}")
+            return []
+        return spans
+
+    def drop(self, n_spans: Optional[int] = None):
+        """Drop some currently buffered spans from the cache file."""
+        current = self.load()
+        if n_spans:
+            dev_logger.debug(f"dropping {n_spans} spans from buffer")
+            new = current[n_spans:]
+        else:
+            dev_logger.debug("emptying buffer")
+            new = []
+
+        self._db_file.write_bytes(self._SPANSEP.join(new))
+
+    def flush(self) -> Optional[bool]:
+        """Export all buffered spans to the given exporter, then clear the buffer.
+
+        Returns whether the flush was successful, and None if there was nothing to flush.
+        """
+        if not self.exporter:
+            dev_logger.debug("no exporter set; skipping buffer flush")
+            return False
+
+        buffered_spans = self.load()
+        if not buffered_spans:
+            dev_logger.debug("nothing to flush; buffer empty")
+            return None
+
+        errors = False
+        for span in buffered_spans:
+            try:
+                out = self.exporter._export(span)  # type: ignore
+                if not (200 <= out.status_code < 300):
+                    # take any 2xx status code as a success
+                    errors = True
+            except ConnectionError:
+                dev_logger.debug(
+                    "failed exporting buffered span; backend might be down or still starting"
+                )
+                errors = True
+            except Exception:
+                logger.exception("unexpected error while flushing span batch from buffer")
+                errors = True
+
+        if not errors:
+            self.drop()
+        else:
+            logger.error("failed flushing spans; buffer preserved")
+        return not errors
+
+    @property
+    def is_empty(self):
+        """Utility to check whether the buffer has any stored spans.
+
+        This is more efficient than attempting a load() given how large the buffer might be.
+        """
+        return (not self._db_file.exists()) or (self._db_file.stat().st_size == 0)
+
+
+class _OTLPSpanExporter(OTLPSpanExporter):
+    """Subclass of OTLPSpanExporter to configure the max retry timeout, so that it fails a bit faster."""
+
+    # The issue we're trying to solve is that the model takes AGES to settle if e.g. tls is misconfigured,
+    # as every hook of a charm_tracing-instrumented charm takes about a minute to exit, as the charm can't
+    # flush the traces and keeps retrying for 'too long'
+
+    _MAX_RETRY_TIMEOUT = 4
+    # we give the exporter 4 seconds in total to succeed pushing the traces to tempo
+    # if it fails, we'll be caching the data in the buffer and flush it the next time, so there's no data loss risk.
+    # this means 2/3 retries (hard to guess from the implementation) and up to ~7 seconds total wait
+
+
+class _BufferedExporter(InMemorySpanExporter):
+    def __init__(self, buffer: _Buffer) -> None:
+        super().__init__()
+        self._buffer = buffer
+
+    def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult:
+        self._buffer.save(spans)
+        return super().export(spans)
+
+    def force_flush(self, timeout_millis: int = 0) -> bool:
+        # parent implementation is fake, so the timeout_millis arg is not doing anything.
+        result = super().force_flush(timeout_millis)
+        self._buffer.save(self.get_finished_spans())
+        return result
 
 
 def is_enabled() -> bool:
@@ -332,7 +580,7 @@ def _get_tracer() -> Optional[Tracer]:
         return tracer.get()
     except LookupError:
         # fallback: this course-corrects for a user error where charm_tracing symbols are imported
-        # from different paths (typically charms.tempo_k8s... and lib.charms.tempo_k8s...)
+        # from different paths (typically charms.tempo_coordinator_k8s... and lib.charms.tempo_coordinator_k8s...)
         try:
             ctx: Context = copy_context()
             if context_tracer := _get_tracer_from_context(ctx):
@@ -372,10 +620,6 @@ class UntraceableObjectError(TracingError):
     """Raised when an object you're attempting to instrument cannot be autoinstrumented."""
 
 
-class TLSError(TracingError):
-    """Raised when the tracing endpoint is https but we don't have a cert yet."""
-
-
 def _get_tracing_endpoint(
     tracing_endpoint_attr: str,
     charm_instance: object,
@@ -428,7 +672,10 @@ def _setup_root_span_initializer(
     charm_type: _CharmType,
     tracing_endpoint_attr: str,
     server_cert_attr: Optional[str],
-    service_name: Optional[str] = None,
+    service_name: Optional[str],
+    buffer_path: Optional[Path],
+    buffer_max_events: int,
+    buffer_max_size_mib: int,
 ):
     """Patch the charm's initializer."""
     original_init = charm_type.__init__
@@ -447,18 +694,11 @@ def wrap_init(self: CharmBase, framework: Framework, *args, **kwargs):
             logger.info("Tracing DISABLED: skipping root span initialization")
             return
 
-        # already init some attrs that will be reinited later by calling original_init:
-        # self.framework = framework
-        # self.handle = Handle(None, self.handle_kind, None)
-
         original_event_context = framework._event_context
         # default service name isn't just app name because it could conflict with the workload service name
         _service_name = service_name or f"{self.app.name}-charm"
 
         unit_name = self.unit.name
-        # apply hacky patch to remove stale opentelemetry sdk packages on upgrade-charm.
-        # it could be trouble if someone ever decides to implement their own tracer parallel to
-        # ours and before the charm has inited. We assume they won't.
         resource = Resource.create(
             attributes={
                 "service.name": _service_name,
@@ -476,28 +716,60 @@ def wrap_init(self: CharmBase, framework: Framework, *args, **kwargs):
         # if anything goes wrong with retrieving the endpoint, we let the exception bubble up.
         tracing_endpoint = _get_tracing_endpoint(tracing_endpoint_attr, self, charm_type)
 
+        buffer_only = False
+        # whether we're only exporting to buffer, or also to the otlp exporter.
+
         if not tracing_endpoint:
             # tracing is off if tracing_endpoint is None
-            return
+            # however we can buffer things until tracing comes online
+            buffer_only = True
 
         server_cert: Optional[Union[str, Path]] = (
             _get_server_cert(server_cert_attr, self, charm_type) if server_cert_attr else None
         )
 
-        if tracing_endpoint.startswith("https://") and not server_cert:
-            raise TLSError(
+        if (tracing_endpoint and tracing_endpoint.startswith("https://")) and not server_cert:
+            logger.error(
                 "Tracing endpoint is https, but no server_cert has been passed."
-                "Please point @trace_charm to a `server_cert` attr."
+                "Please point @trace_charm to a `server_cert` attr. "
+                "This might also mean that the tracing provider is related to a "
+                "certificates provider, but this application is not (yet). "
+                "In that case, you might just have to wait a bit for the certificates "
+                "integration to settle. This span will be buffered."
             )
+            buffer_only = True
 
-        exporter = OTLPSpanExporter(
-            endpoint=tracing_endpoint,
-            certificate_file=str(Path(server_cert).absolute()) if server_cert else None,
-            timeout=2,
+        buffer = _Buffer(
+            db_file=buffer_path or Path() / BUFFER_DEFAULT_CACHE_FILE_NAME,
+            max_event_history_length=buffer_max_events,
+            max_buffer_size_mib=buffer_max_size_mib,
         )
+        previous_spans_buffered = not buffer.is_empty
+
+        exporters: List[SpanExporter] = []
+        if buffer_only:
+            # we have to buffer because we're missing necessary backend configuration
+            dev_logger.debug("buffering mode: ON")
+            exporters.append(_BufferedExporter(buffer))
+
+        else:
+            dev_logger.debug("buffering mode: FALLBACK")
+            # in principle, we have the right configuration to be pushing traces,
+            # but if we fail for whatever reason, we will put everything in the buffer
+            # and retry the next time
+            otlp_exporter = _OTLPSpanExporter(
+                endpoint=tracing_endpoint,
+                certificate_file=str(Path(server_cert).absolute()) if server_cert else None,
+                timeout=_OTLP_SPAN_EXPORTER_TIMEOUT,  # give individual requests 1 second to succeed
+            )
+            exporters.append(otlp_exporter)
+            exporters.append(_BufferedExporter(buffer))
+            buffer.exporter = otlp_exporter
+
+        for exporter in exporters:
+            processor = BatchSpanProcessor(exporter)
+            provider.add_span_processor(processor)
 
-        processor = BatchSpanProcessor(exporter)
-        provider.add_span_processor(processor)
         set_tracer_provider(provider)
         _tracer = get_tracer(_service_name)  # type: ignore
         _tracer_token = tracer.set(_tracer)
@@ -521,7 +793,7 @@ def wrap_init(self: CharmBase, framework: Framework, *args, **kwargs):
 
         @contextmanager
         def wrap_event_context(event_name: str):
-            dev_logger.info(f"entering event context: {event_name}")
+            dev_logger.debug(f"entering event context: {event_name}")
             # when the framework enters an event context, we create a span.
             with _span("event: " + event_name) as event_context_span:
                 if event_context_span:
@@ -535,12 +807,50 @@ def wrap_event_context(event_name: str):
 
         @functools.wraps(original_close)
         def wrap_close():
-            dev_logger.info("tearing down tracer and flushing traces")
+            dev_logger.debug("tearing down tracer and flushing traces")
             span.end()
             opentelemetry.context.detach(span_token)  # type: ignore
             tracer.reset(_tracer_token)
             tp = cast(TracerProvider, get_tracer_provider())
-            tp.force_flush(timeout_millis=1000)  # don't block for too long
+            flush_successful = tp.force_flush(timeout_millis=1000)  # don't block for too long
+
+            if buffer_only:
+                # if we're in buffer_only mode, it means we couldn't even set up the exporter for
+                # tempo as we're missing some data.
+                # so attempting to flush the buffer doesn't make sense
+                dev_logger.debug("tracing backend unavailable: all spans pushed to buffer")
+
+            else:
+                dev_logger.debug("tracing backend found: attempting to flush buffer...")
+
+                # if we do have an exporter for tempo, and we could send traces to it,
+                # we can attempt to flush the buffer as well.
+                if not flush_successful:
+                    logger.error("flushing FAILED: unable to push traces to backend.")
+                else:
+                    dev_logger.debug("flush succeeded.")
+
+                    # the backend has accepted the spans generated during this event,
+                    if not previous_spans_buffered:
+                        # if the buffer was empty to begin with, any spans we collected now can be discarded
+                        buffer.drop()
+                        dev_logger.debug("buffer dropped: this trace has been sent already")
+                    else:
+                        # if the buffer was nonempty, we can attempt to flush it
+                        dev_logger.debug("attempting buffer flush...")
+                        buffer_flush_successful = buffer.flush()
+                        if buffer_flush_successful:
+                            dev_logger.debug("buffer flush OK")
+                        elif buffer_flush_successful is None:
+                            # TODO is this even possible?
+                            dev_logger.debug("buffer flush OK; empty: nothing to flush")
+                        else:
+                            # this situation is pretty weird, I'm not even sure it can happen,
+                            # because it would mean that we did manage
+                            # to push traces directly to the tempo exporter (flush_successful),
+                            # but the buffer flush failed to push to the same exporter!
+                            logger.error("buffer flush FAILED")
+
             tp.shutdown()
             original_close()
 
@@ -555,6 +865,9 @@ def trace_charm(
     server_cert: Optional[str] = None,
     service_name: Optional[str] = None,
     extra_types: Sequence[type] = (),
+    buffer_max_events: int = BUFFER_DEFAULT_MAX_EVENT_HISTORY_LENGTH,
+    buffer_max_size_mib: int = BUFFER_DEFAULT_CACHE_FILE_SIZE_LIMIT_MiB,
+    buffer_path: Optional[Union[str, Path]] = None,
 ) -> Callable[[_T], _T]:
     """Autoinstrument the decorated charm with tracing telemetry.
 
@@ -562,8 +875,8 @@ def trace_charm(
     method calls on instances of this class.
 
     Usage:
-    >>> from charms.tempo_k8s.v1.charm_tracing import trace_charm
-    >>> from charms.tempo_k8s.v1.tracing import TracingEndpointRequirer
+    >>> from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm
+    >>> from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer
     >>> from ops import CharmBase
     >>>
     >>> @trace_charm(
@@ -596,6 +909,10 @@ def trace_charm(
         Defaults to the juju application name this charm is deployed under.
     :param extra_types: pass any number of types that you also wish to autoinstrument.
         For example, charm libs, relation endpoint wrappers, workload abstractions, ...
+    :param buffer_max_events: max number of events to save in the buffer. Set to 0 to disable buffering.
+    :param buffer_max_size_mib: max size of the buffer file. When exceeded, spans will be dropped.
+        Minimum 10MiB.
+    :param buffer_path: path to buffer file to use for saving buffered spans.
     """
 
     def _decorator(charm_type: _T) -> _T:
@@ -606,6 +923,9 @@ def _decorator(charm_type: _T) -> _T:
             server_cert_attr=server_cert,
             service_name=service_name,
             extra_types=extra_types,
+            buffer_path=Path(buffer_path) if buffer_path else None,
+            buffer_max_size_mib=buffer_max_size_mib,
+            buffer_max_events=buffer_max_events,
         )
         return charm_type
 
@@ -618,6 +938,9 @@ def _autoinstrument(
     server_cert_attr: Optional[str] = None,
     service_name: Optional[str] = None,
     extra_types: Sequence[type] = (),
+    buffer_max_events: int = BUFFER_DEFAULT_MAX_EVENT_HISTORY_LENGTH,
+    buffer_max_size_mib: int = BUFFER_DEFAULT_CACHE_FILE_SIZE_LIMIT_MiB,
+    buffer_path: Optional[Path] = None,
 ) -> _T:
     """Set up tracing on this charm class.
 
@@ -626,7 +949,7 @@ def _autoinstrument(
 
     Usage:
 
-    >>> from charms.tempo_k8s.v1.charm_tracing import _autoinstrument
+    >>> from charms.tempo_coordinator_k8s.v0.charm_tracing import _autoinstrument
     >>> from ops.main import main
     >>> _autoinstrument(
     >>>         MyCharm,
@@ -650,13 +973,20 @@ def _autoinstrument(
         Defaults to the juju application name this charm is deployed under.
     :param extra_types: pass any number of types that you also wish to autoinstrument.
         For example, charm libs, relation endpoint wrappers, workload abstractions, ...
+    :param buffer_max_events: max number of events to save in the buffer. Set to 0 to disable buffering.
+    :param buffer_max_size_mib: max size of the buffer file. When exceeded, spans will be dropped.
+        Minimum 10MiB.
+    :param buffer_path: path to buffer file to use for saving buffered spans.
     """
-    dev_logger.info(f"instrumenting {charm_type}")
+    dev_logger.debug(f"instrumenting {charm_type}")
     _setup_root_span_initializer(
         charm_type,
         tracing_endpoint_attr,
         server_cert_attr=server_cert_attr,
         service_name=service_name,
+        buffer_path=buffer_path,
+        buffer_max_events=buffer_max_events,
+        buffer_max_size_mib=buffer_max_size_mib,
     )
     trace_type(charm_type)
     for type_ in extra_types:
@@ -672,12 +1002,12 @@ def trace_type(cls: _T) -> _T:
     It assumes that this class is only instantiated after a charm type decorated with `@trace_charm`
     has been instantiated.
     """
-    dev_logger.info(f"instrumenting {cls}")
+    dev_logger.debug(f"instrumenting {cls}")
     for name, method in inspect.getmembers(cls, predicate=inspect.isfunction):
-        dev_logger.info(f"discovered {method}")
+        dev_logger.debug(f"discovered {method}")
 
         if method.__name__.startswith("__"):
-            dev_logger.info(f"skipping {method} (dunder)")
+            dev_logger.debug(f"skipping {method} (dunder)")
             continue
 
         # the span title in the general case should be:
@@ -723,7 +1053,7 @@ def trace_function(function: _F, name: Optional[str] = None) -> _F:
 
 
 def _trace_callable(callable: _F, qualifier: str, name: Optional[str] = None) -> _F:
-    dev_logger.info(f"instrumenting {callable}")
+    dev_logger.debug(f"instrumenting {callable}")
 
     # sig = inspect.signature(callable)
     @functools.wraps(callable)
diff --git a/lib/charms/tempo_k8s/v2/tracing.py b/lib/charms/tempo_coordinator_k8s/v0/tracing.py
similarity index 93%
rename from lib/charms/tempo_k8s/v2/tracing.py
rename to lib/charms/tempo_coordinator_k8s/v0/tracing.py
index dfb23365f..27144fa62 100644
--- a/lib/charms/tempo_k8s/v2/tracing.py
+++ b/lib/charms/tempo_coordinator_k8s/v0/tracing.py
@@ -16,7 +16,7 @@
  This relation must use the `tracing` interface.
  The `TracingEndpointRequirer` object may be instantiated as follows
 
-    from charms.tempo_k8s.v2.tracing import TracingEndpointRequirer
+    from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer
 
     def __init__(self, *args):
         super().__init__(*args)
@@ -34,7 +34,7 @@ def __init__(self, *args):
 `TracingEndpointRequirer.request_protocols(*protocol:str, relation:Optional[Relation])` method.
 Using this method also allows you to use per-relation protocols.
 
-Units of provider charms obtain the tempo endpoint to which they will push their traces by calling
+Units of requirer charms obtain the tempo endpoint to which they will push their traces by calling
 `TracingEndpointRequirer.get_endpoint(protocol: str)`, where `protocol` is, for example:
 - `otlp_grpc`
 - `otlp_http`
@@ -44,7 +44,10 @@ def __init__(self, *args):
 If the `protocol` is not in the list of protocols that the charm requested at endpoint set-up time,
 the library will raise an error.
 
-## Requirer Library Usage
+We recommend that you scale up your tracing provider and relate it to an ingress so that your tracing requests
+go through the ingress and get load balanced across all units. Otherwise, if the provider's leader goes down, your tracing goes down.
+
+## Provider Library Usage
 
 The `TracingEndpointProvider` object may be used by charms to manage relations with their
 trace sources. For this purposes a Tempo-like charm needs to do two things
@@ -58,7 +61,7 @@ def __init__(self, *args):
 For example a Tempo charm may instantiate the `TracingEndpointProvider` in its constructor as
 follows
 
-    from charms.tempo_k8s.v2.tracing import TracingEndpointProvider
+    from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointProvider
 
     def __init__(self, *args):
         super().__init__(*args)
@@ -97,17 +100,17 @@ def __init__(self, *args):
 )
 from ops.framework import EventSource, Object
 from ops.model import ModelError, Relation
-from pydantic import BaseModel, ConfigDict, Field
+from pydantic import BaseModel, Field
 
 # The unique Charmhub library identifier, never change it
-LIBID = "12977e9aa0b34367903d8afeb8c3d85d"
+LIBID = "d2f02b1f8d1244b5989fd55bc3a28943"
 
 # Increment this major API version when introducing breaking changes
-LIBAPI = 2
+LIBAPI = 0
 
 # Increment this PATCH version before using `charmcraft publish-lib` or reset
 # to 0 if you are raising the major API version
-LIBPATCH = 8
+LIBPATCH = 5
 
 PYDEPS = ["pydantic"]
 
@@ -338,7 +341,7 @@ class Config:
     class ProtocolType(BaseModel):
         """Protocol Type."""
 
-        model_config = ConfigDict(
+        model_config = ConfigDict(  # type: ignore
             # Allow serializing enum values.
             use_enum_values=True
         )
@@ -888,13 +891,15 @@ def _get_endpoint(
             filter(lambda i: i.protocol.name == protocol, app_data.receivers)
         )
         if not receivers:
-            logger.error(f"no receiver found with protocol={protocol!r}")
+            # it can happen if the charm requests tracing protocols, but the relay (such as grafana-agent) isn't yet
+            # connected to the tracing backend. In this case, it's not an error the charm author can do anything about
+            logger.warning(f"no receiver found with protocol={protocol!r}.")
             return
         if len(receivers) > 1:
-            logger.error(
+            # if we have more than 1 receiver that matches, it shouldn't matter which receiver we'll be using.
+            logger.warning(
                 f"too many receivers with protocol={protocol!r}; using first one. Found: {receivers}"
             )
-            return
 
         receiver = receivers[0]
         return receiver.url
@@ -902,7 +907,16 @@ def _get_endpoint(
     def get_endpoint(
         self, protocol: ReceiverProtocol, relation: Optional[Relation] = None
     ) -> Optional[str]:
-        """Receiver endpoint for the given protocol."""
+        """Receiver endpoint for the given protocol.
+
+        It could happen that this function gets called before the provider publishes the endpoints.
+        In such a scenario, if a non-leader unit calls this function, a permission denied exception will be raised due to
+        restricted access. To prevent this, this function needs to be guarded by the `is_ready` check.
+
+        Raises:
+        ProtocolNotRequestedError:
+            If the charm unit is the leader unit and attempts to obtain an endpoint for a protocol it did not request.
+        """
         endpoint = self._get_endpoint(relation or self._relation, protocol=protocol)
         if not endpoint:
             requested_protocols = set()
@@ -925,7 +939,7 @@ def get_endpoint(
 def charm_tracing_config(
     endpoint_requirer: TracingEndpointRequirer, cert_path: Optional[Union[Path, str]]
 ) -> Tuple[Optional[str], Optional[str]]:
-    """Utility function to determine the charm_tracing config you will likely want.
+    """Return the charm_tracing config you likely want.
 
     If no endpoint is provided:
      disable charm tracing.
@@ -937,9 +951,8 @@ def charm_tracing_config(
      proceed with charm tracing (with or without tls, as appropriate)
 
     Usage:
-      If you are using charm_tracing >= v1.9:
-    >>> from lib.charms.tempo_k8s.v1.charm_tracing import trace_charm
-    >>> from lib.charms.tempo_k8s.v2.tracing import charm_tracing_config
+    >>> from lib.charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm
+    >>> from lib.charms.tempo_coordinator_k8s.v0.tracing import charm_tracing_config
     >>> @trace_charm(tracing_endpoint="my_endpoint", cert_path="cert_path")
     >>> class MyCharm(...):
     >>>     _cert_path = "/path/to/cert/on/charm/container.crt"
@@ -947,24 +960,6 @@ def charm_tracing_config(
     >>>         self.tracing = TracingEndpointRequirer(...)
     >>>         self.my_endpoint, self.cert_path = charm_tracing_config(
     ...             self.tracing, self._cert_path)
-
-      If you are using charm_tracing < v1.9:
-    >>> from lib.charms.tempo_k8s.v1.charm_tracing import trace_charm
-    >>> from lib.charms.tempo_k8s.v2.tracing import charm_tracing_config
-    >>> @trace_charm(tracing_endpoint="my_endpoint", cert_path="cert_path")
-    >>> class MyCharm(...):
-    >>>     _cert_path = "/path/to/cert/on/charm/container.crt"
-    >>>     def __init__(self, ...):
-    >>>         self.tracing = TracingEndpointRequirer(...)
-    >>>         self._my_endpoint, self._cert_path = charm_tracing_config(
-    ...             self.tracing, self._cert_path)
-    >>>     @property
-    >>>     def my_endpoint(self):
-    >>>         return self._my_endpoint
-    >>>     @property
-    >>>     def cert_path(self):
-    >>>         return self._cert_path
-
     """
     if not endpoint_requirer.is_ready():
         return None, None
@@ -976,11 +971,16 @@ def charm_tracing_config(
     is_https = endpoint.startswith("https://")
 
     if is_https:
-        if cert_path is None:
-            raise TracingError("Cannot send traces to an https endpoint without a certificate.")
-        elif not Path(cert_path).exists():
-            # if endpoint is https BUT we don't have a server_cert yet:
-            # disable charm tracing until we do to prevent tls errors
+        if cert_path is None or not Path(cert_path).exists():
+            # disable charm tracing until we obtain a cert to prevent tls errors
+            logger.error(
+                "Tracing endpoint is https, but no server_cert has been passed."
+                "Please point @trace_charm to a `server_cert` attr. "
+                "This might also mean that the tracing provider is related to a "
+                "certificates provider, but this application is not (yet). "
+                "In that case, you might just have to wait a bit for the certificates "
+                "integration to settle. "
+            )
             return None, None
         return endpoint, str(cert_path)
     else:
diff --git a/lib/charms/tls_certificates_interface/v2/tls_certificates.py b/lib/charms/tls_certificates_interface/v2/tls_certificates.py
index 9f67833ba..c232362fe 100644
--- a/lib/charms/tls_certificates_interface/v2/tls_certificates.py
+++ b/lib/charms/tls_certificates_interface/v2/tls_certificates.py
@@ -307,7 +307,7 @@ def _on_all_certificates_invalidated(self, event: AllCertificatesInvalidatedEven
 
 # Increment this PATCH version before using `charmcraft publish-lib` or reset
 # to 0 if you are raising the major API version
-LIBPATCH = 28
+LIBPATCH = 29
 
 PYDEPS = ["cryptography", "jsonschema"]
 
@@ -459,7 +459,7 @@ def restore(self, snapshot: dict):
 class CertificateExpiringEvent(EventBase):
     """Charm Event triggered when a TLS certificate is almost expired."""
 
-    def __init__(self, handle, certificate: str, expiry: str):
+    def __init__(self, handle: Handle, certificate: str, expiry: str):
         """CertificateExpiringEvent.
 
         Args:
diff --git a/metadata.yaml b/metadata.yaml
index 27c00d419..8deed8513 100644
--- a/metadata.yaml
+++ b/metadata.yaml
@@ -32,7 +32,7 @@ resources:
   mysql-image:
     type: oci-image
     description: Ubuntu LTS Docker image for MySQL
-    upstream-source: ghcr.io/canonical/charmed-mysql@sha256:704af773157fa6c36e5123cf04cac5375971c691b1411ab76cded596e8131dd3
+    upstream-source: ghcr.io/canonical/charmed-mysql@sha256:089fc04dd2d6f1559161ddf4720c1e06559aeb731ecae57b050c9c816e9833e9
 
 peers:
   database-peers:
diff --git a/poetry.lock b/poetry.lock
index 6ac66e7c6..d93577bc6 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
+# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand.
 
 [[package]]
 name = "allure-pytest"
@@ -6,6 +6,7 @@ version = "2.13.5"
 description = "Allure pytest integration"
 optional = false
 python-versions = "*"
+groups = ["integration"]
 files = [
     {file = "allure-pytest-2.13.5.tar.gz", hash = "sha256:0ef8e1790c44a988db6b83c4d4f5e91451e2c4c8ea10601dfa88528d23afcf6e"},
     {file = "allure_pytest-2.13.5-py3-none-any.whl", hash = "sha256:94130bac32964b78058e62cf4b815ad97a5ac82a065e6dd2d43abac2be7640fc"},
@@ -16,31 +17,28 @@ allure-python-commons = "2.13.5"
 pytest = ">=4.5.0"
 
 [[package]]
-name = "allure-pytest-collection-report"
-version = "0.1.0"
-description = ""
+name = "allure-pytest-default-results"
+version = "0.1.2"
+description = "Generate default \"unknown\" results to show in Allure Report if test case does not run"
 optional = false
-python-versions = "^3.8"
-files = []
-develop = false
+python-versions = ">=3.8"
+groups = ["integration"]
+files = [
+    {file = "allure_pytest_default_results-0.1.2-py3-none-any.whl", hash = "sha256:8dc6c5a5d548661c38111a2890509e794204586fa81cefbe61315fb63996e50c"},
+    {file = "allure_pytest_default_results-0.1.2.tar.gz", hash = "sha256:eb6c16aa1c2ede69e653a0ee38094791685eaacb0ac6b2cae5c6da1379dbdbfd"},
+]
 
 [package.dependencies]
 allure-pytest = ">=2.13.5"
 pytest = "*"
 
-[package.source]
-type = "git"
-url = "https://github.com/canonical/data-platform-workflows"
-reference = "v21.0.1"
-resolved_reference = "06f252ea079edfd055cee236ede28c237467f9b0"
-subdirectory = "python/pytest_plugins/allure_pytest_collection_report"
-
 [[package]]
 name = "allure-python-commons"
 version = "2.13.5"
 description = "('Contains the API for end users as well as helper functions and classes to build Allure adapters for Python test frameworks',)"
 optional = false
 python-versions = ">=3.6"
+groups = ["integration"]
 files = [
     {file = "allure-python-commons-2.13.5.tar.gz", hash = "sha256:a232e7955811f988e49a4c1dd6c16cce7e9b81d0ea0422b1e5654d3254e2caf3"},
     {file = "allure_python_commons-2.13.5-py3-none-any.whl", hash = "sha256:8b0e837b6e32d810adec563f49e1d04127a5b6770e0232065b7cb09b9953980d"},
@@ -56,6 +54,7 @@ version = "4.4.0"
 description = "High level compatibility layer for multiple asynchronous event loop implementations"
 optional = false
 python-versions = ">=3.8"
+groups = ["main", "charm-libs", "integration"]
 files = [
     {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"},
     {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"},
@@ -78,6 +77,7 @@ version = "2.4.1"
 description = "Annotate AST trees with source code positions"
 optional = false
 python-versions = "*"
+groups = ["integration"]
 files = [
     {file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"},
     {file = "asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0"},
@@ -96,6 +96,7 @@ version = "23.2.0"
 description = "Classes Without Boilerplate"
 optional = false
 python-versions = ">=3.7"
+groups = ["charm-libs", "integration"]
 files = [
     {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"},
     {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"},
@@ -115,17 +116,32 @@ version = "2.2.1"
 description = "Function decoration for backoff and retry"
 optional = false
 python-versions = ">=3.7,<4.0"
+groups = ["charm-libs"]
 files = [
     {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"},
     {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"},
 ]
 
+[[package]]
+name = "backports-strenum"
+version = "1.3.1"
+description = "Base class for creating enumerated constants that are also subclasses of str"
+optional = false
+python-versions = ">=3.8.6,<3.11"
+groups = ["integration"]
+markers = "python_version < \"3.11\""
+files = [
+    {file = "backports_strenum-1.3.1-py3-none-any.whl", hash = "sha256:cdcfe36dc897e2615dc793b7d3097f54d359918fc448754a517e6f23044ccf83"},
+    {file = "backports_strenum-1.3.1.tar.gz", hash = "sha256:77c52407342898497714f0596e86188bb7084f89063226f4ba66863482f42414"},
+]
+
 [[package]]
 name = "bcrypt"
 version = "4.1.3"
 description = "Modern password hashing for your software and your servers"
 optional = false
 python-versions = ">=3.7"
+groups = ["integration"]
 files = [
     {file = "bcrypt-4.1.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:48429c83292b57bf4af6ab75809f8f4daf52aa5d480632e53707805cc1ce9b74"},
     {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a8bea4c152b91fd8319fef4c6a790da5c07840421c2b785084989bf8bbb7455"},
@@ -166,6 +182,7 @@ version = "1.34.118"
 description = "The AWS SDK for Python"
 optional = false
 python-versions = ">=3.8"
+groups = ["main", "integration"]
 files = [
     {file = "boto3-1.34.118-py3-none-any.whl", hash = "sha256:e9edaf979fbe59737e158f2f0f3f0861ff1d61233f18f6be8ebb483905f24587"},
     {file = "boto3-1.34.118.tar.gz", hash = "sha256:4eb8019421cb664a6fcbbee6152aa95a28ce8bbc1c4ee263871c09cdd58bf8ee"},
@@ -185,6 +202,7 @@ version = "1.34.118"
 description = "Low-level, data-driven core of boto 3."
 optional = false
 python-versions = ">=3.8"
+groups = ["main", "integration"]
 files = [
     {file = "botocore-1.34.118-py3-none-any.whl", hash = "sha256:e3f6c5636a4394768e81e33a16f5c6ae7f364f512415d423f9b9dc67fc638df4"},
     {file = "botocore-1.34.118.tar.gz", hash = "sha256:0a3d1ec0186f8b516deb39474de3d226d531f77f92a0f56ad79b80219db3ae9e"},
@@ -204,6 +222,7 @@ version = "5.3.3"
 description = "Extensible memoizing collections and decorators"
 optional = false
 python-versions = ">=3.7"
+groups = ["integration"]
 files = [
     {file = "cachetools-5.3.3-py3-none-any.whl", hash = "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945"},
     {file = "cachetools-5.3.3.tar.gz", hash = "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"},
@@ -215,6 +234,7 @@ version = "2024.6.2"
 description = "Python package for providing Mozilla's CA Bundle."
 optional = false
 python-versions = ">=3.6"
+groups = ["main", "charm-libs", "integration"]
 files = [
     {file = "certifi-2024.6.2-py3-none-any.whl", hash = "sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56"},
     {file = "certifi-2024.6.2.tar.gz", hash = "sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516"},
@@ -226,6 +246,7 @@ version = "1.16.0"
 description = "Foreign Function Interface for Python calling C code."
 optional = false
 python-versions = ">=3.8"
+groups = ["charm-libs", "integration"]
 files = [
     {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"},
     {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"},
@@ -280,6 +301,7 @@ files = [
     {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"},
     {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"},
 ]
+markers = {charm-libs = "platform_python_implementation != \"PyPy\""}
 
 [package.dependencies]
 pycparser = "*"
@@ -290,6 +312,7 @@ version = "3.3.2"
 description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
 optional = false
 python-versions = ">=3.7.0"
+groups = ["charm-libs", "integration"]
 files = [
     {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
     {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
@@ -389,6 +412,7 @@ version = "2.3.0"
 description = "Codespell"
 optional = false
 python-versions = ">=3.8"
+groups = ["lint"]
 files = [
     {file = "codespell-2.3.0-py3-none-any.whl", hash = "sha256:a9c7cef2501c9cfede2110fd6d4e5e62296920efe9abfb84648df866e47f58d1"},
     {file = "codespell-2.3.0.tar.gz", hash = "sha256:360c7d10f75e65f67bad720af7007e1060a5d395670ec11a7ed1fed9dd17471f"},
@@ -406,6 +430,8 @@ version = "0.4.6"
 description = "Cross-platform colored terminal text."
 optional = false
 python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+groups = ["integration", "unit"]
+markers = "sys_platform == \"win32\""
 files = [
     {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
     {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
@@ -413,18 +439,22 @@ files = [
 
 [[package]]
 name = "cosl"
-version = "0.0.11"
+version = "0.0.54"
 description = "Utils for COS Lite charms"
 optional = false
 python-versions = ">=3.8"
+groups = ["charm-libs"]
 files = [
-    {file = "cosl-0.0.11-py3-none-any.whl", hash = "sha256:46d78d6441ba628bae386cd8c10b8144558ab208115522020e7858f97837988d"},
-    {file = "cosl-0.0.11.tar.gz", hash = "sha256:15cac6ed20b65e9d33cda3c3da32e299c82f9feea64e393448cd3d3cf2bef32a"},
+    {file = "cosl-0.0.54-py3-none-any.whl", hash = "sha256:b16520d73c72ac83cb42f0abe997d36510732d4f8499f70e9068cfa05f0d02fa"},
+    {file = "cosl-0.0.54.tar.gz", hash = "sha256:6baa889cc4468b0c0f746cc6319892a30ea8fbe38cbf5c49c6885f6fdf89d6a9"},
 ]
 
 [package.dependencies]
+lightkube = ">=v0.15.4"
 ops = "*"
+pydantic = "*"
 pyyaml = "*"
+tenacity = "*"
 typing-extensions = "*"
 
 [[package]]
@@ -433,6 +463,7 @@ version = "7.5.3"
 description = "Code coverage measurement for Python"
 optional = false
 python-versions = ">=3.8"
+groups = ["unit"]
 files = [
     {file = "coverage-7.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a6519d917abb15e12380406d721e37613e2a67d166f9fb7e5a8ce0375744cd45"},
     {file = "coverage-7.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aea7da970f1feccf48be7335f8b2ca64baf9b589d79e05b9397a06696ce1a1ec"},
@@ -500,6 +531,7 @@ version = "43.0.1"
 description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
 optional = false
 python-versions = ">=3.7"
+groups = ["charm-libs", "integration"]
 files = [
     {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"},
     {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"},
@@ -549,6 +581,7 @@ version = "5.1.1"
 description = "Decorators for Humans"
 optional = false
 python-versions = ">=3.5"
+groups = ["integration"]
 files = [
     {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"},
     {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"},
@@ -560,6 +593,7 @@ version = "1.2.14"
 description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
 optional = false
 python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+groups = ["charm-libs"]
 files = [
     {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"},
     {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"},
@@ -577,6 +611,8 @@ version = "1.2.1"
 description = "Backport of PEP 654 (exception groups)"
 optional = false
 python-versions = ">=3.7"
+groups = ["main", "charm-libs", "integration", "unit"]
+markers = "python_version < \"3.11\""
 files = [
     {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"},
     {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"},
@@ -591,6 +627,7 @@ version = "2.0.1"
 description = "Get the currently executing AST node of a frame, and other information"
 optional = false
 python-versions = ">=3.5"
+groups = ["integration"]
 files = [
     {file = "executing-2.0.1-py2.py3-none-any.whl", hash = "sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc"},
     {file = "executing-2.0.1.tar.gz", hash = "sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147"},
@@ -605,6 +642,7 @@ version = "2.29.0"
 description = "Google Authentication Library"
 optional = false
 python-versions = ">=3.7"
+groups = ["integration"]
 files = [
     {file = "google-auth-2.29.0.tar.gz", hash = "sha256:672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360"},
     {file = "google_auth-2.29.0-py2.py3-none-any.whl", hash = "sha256:d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415"},
@@ -628,6 +666,7 @@ version = "1.63.1"
 description = "Common protobufs used in Google APIs"
 optional = false
 python-versions = ">=3.7"
+groups = ["charm-libs"]
 files = [
     {file = "googleapis-common-protos-1.63.1.tar.gz", hash = "sha256:c6442f7a0a6b2a80369457d79e6672bb7dcbaab88e0848302497e3ec80780a6a"},
     {file = "googleapis_common_protos-1.63.1-py2.py3-none-any.whl", hash = "sha256:0e1c2cdfcbc354b76e4a211a35ea35d6926a835cba1377073c4861db904a1877"},
@@ -645,6 +684,7 @@ version = "0.14.0"
 description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
 optional = false
 python-versions = ">=3.7"
+groups = ["main", "charm-libs", "integration"]
 files = [
     {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
     {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
@@ -656,6 +696,7 @@ version = "1.0.5"
 description = "A minimal low-level HTTP client."
 optional = false
 python-versions = ">=3.8"
+groups = ["main", "charm-libs", "integration"]
 files = [
     {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"},
     {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"},
@@ -677,6 +718,7 @@ version = "0.27.0"
 description = "The next generation HTTP client."
 optional = false
 python-versions = ">=3.8"
+groups = ["main", "charm-libs", "integration"]
 files = [
     {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"},
     {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"},
@@ -701,6 +743,7 @@ version = "2.2.0"
 description = "HashiCorp Vault API client"
 optional = false
 python-versions = "<4.0,>=3.8"
+groups = ["integration"]
 files = [
     {file = "hvac-2.2.0-py3-none-any.whl", hash = "sha256:f287a19940c6fc518c723f8276cc9927f7400734303ee5872ac2e84539466d8d"},
     {file = "hvac-2.2.0.tar.gz", hash = "sha256:e4b0248c5672cb9a6f5974e7c8f5271a09c6c663cbf8ab11733a227f3d2db2c2"},
@@ -718,6 +761,7 @@ version = "3.7"
 description = "Internationalized Domain Names in Applications (IDNA)"
 optional = false
 python-versions = ">=3.5"
+groups = ["main", "charm-libs", "integration"]
 files = [
     {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"},
     {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"},
@@ -729,6 +773,7 @@ version = "6.11.0"
 description = "Read metadata from Python packages"
 optional = false
 python-versions = ">=3.8"
+groups = ["charm-libs"]
 files = [
     {file = "importlib_metadata-6.11.0-py3-none-any.whl", hash = "sha256:f0afba6205ad8f8947c7d338b5342d5db2afbfd82f9cbef7879a9539cc12eb9b"},
     {file = "importlib_metadata-6.11.0.tar.gz", hash = "sha256:1231cf92d825c9e03cfc4da076a16de6422c863558229ea0b22b675657463443"},
@@ -748,6 +793,7 @@ version = "2.0.0"
 description = "brain-dead simple config-ini parsing"
 optional = false
 python-versions = ">=3.7"
+groups = ["integration", "unit"]
 files = [
     {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
     {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
@@ -759,6 +805,7 @@ version = "0.13.13"
 description = "IPython-enabled pdb"
 optional = false
 python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+groups = ["integration"]
 files = [
     {file = "ipdb-0.13.13-py3-none-any.whl", hash = "sha256:45529994741c4ab6d2388bfa5d7b725c2cf7fe9deffabdb8a6113aa5ed449ed4"},
     {file = "ipdb-0.13.13.tar.gz", hash = "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726"},
@@ -775,6 +822,7 @@ version = "8.25.0"
 description = "IPython: Productive Interactive Computing"
 optional = false
 python-versions = ">=3.10"
+groups = ["integration"]
 files = [
     {file = "ipython-8.25.0-py3-none-any.whl", hash = "sha256:53eee7ad44df903a06655871cbab66d156a051fd86f3ec6750470ac9604ac1ab"},
     {file = "ipython-8.25.0.tar.gz", hash = "sha256:c6ed726a140b6e725b911528f80439c534fac915246af3efc39440a6b0f9d716"},
@@ -813,6 +861,7 @@ version = "0.19.1"
 description = "An autocompletion tool for Python that can be used for text editors."
 optional = false
 python-versions = ">=3.6"
+groups = ["integration"]
 files = [
     {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"},
     {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"},
@@ -832,6 +881,7 @@ version = "3.1.4"
 description = "A very fast and expressive template engine."
 optional = false
 python-versions = ">=3.7"
+groups = ["main", "integration"]
 files = [
     {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"},
     {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"},
@@ -849,6 +899,7 @@ version = "1.0.1"
 description = "JSON Matching Expressions"
 optional = false
 python-versions = ">=3.7"
+groups = ["main", "integration"]
 files = [
     {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"},
     {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"},
@@ -860,6 +911,7 @@ version = "4.22.0"
 description = "An implementation of JSON Schema validation for Python"
 optional = false
 python-versions = ">=3.8"
+groups = ["charm-libs"]
 files = [
     {file = "jsonschema-4.22.0-py3-none-any.whl", hash = "sha256:ff4cfd6b1367a40e7bc6411caec72effadd3db0bbe5017de188f2d6108335802"},
     {file = "jsonschema-4.22.0.tar.gz", hash = "sha256:5b22d434a45935119af990552c862e5d6d564e8f6601206b305a61fdf661a2b7"},
@@ -881,6 +933,7 @@ version = "2023.12.1"
 description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
 optional = false
 python-versions = ">=3.8"
+groups = ["charm-libs"]
 files = [
     {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"},
     {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"},
@@ -891,17 +944,20 @@ referencing = ">=0.31.0"
 
 [[package]]
 name = "juju"
-version = "3.5.2.0"
+version = "3.6.0.0"
 description = "Python library for Juju"
 optional = false
-python-versions = "*"
+python-versions = ">=3.8.6"
+groups = ["integration"]
 files = [
-    {file = "juju-3.5.2.0.tar.gz", hash = "sha256:dd9a36330e63acd8f62bf478fd7e385e51f44dc3918e7a67d0593fd054e1e80a"},
+    {file = "juju-3.6.0.0-py3-none-any.whl", hash = "sha256:0e925183c62de6ad46316c0ff1814d67ff4bd7374e3dde297f457d443e8a9651"},
+    {file = "juju-3.6.0.0.tar.gz", hash = "sha256:72d76fe69fce5ce59b2ad0461911c42f4f41760893cf241709f4f118fa33751d"},
 ]
 
 [package.dependencies]
+"backports.strenum" = {version = ">=1.3.1", markers = "python_version < \"3.11\""}
 hvac = "*"
-kubernetes = ">=12.0.1"
+kubernetes = ">=12.0.1,<31.0.0"
 macaroonbakery = ">=1.1,<2.0"
 packaging = "*"
 paramiko = ">=2.4.0"
@@ -909,8 +965,13 @@ pyasn1 = ">=0.4.4"
 pyRFC3339 = ">=1.0,<2.0"
 pyyaml = ">=5.1.2"
 toposort = ">=1.5,<2"
+typing-extensions = ">=4.5.0"
 typing_inspect = ">=0.6.0"
-websockets = ">=8.1"
+websockets = ">=13.0.1"
+
+[package.extras]
+dev = ["Twine", "pytest", "pytest-asyncio", "typing-inspect"]
+docs = ["sphinx (==5.3.0)", "sphinx_rtd_theme", "sphinxcontrib-asyncio"]
 
 [[package]]
 name = "kubernetes"
@@ -918,6 +979,7 @@ version = "27.2.0"
 description = "Kubernetes python client"
 optional = false
 python-versions = ">=3.6"
+groups = ["integration"]
 files = [
     {file = "kubernetes-27.2.0-py2.py3-none-any.whl", hash = "sha256:0f9376329c85cf07615ed6886bf9bf21eb1cbfc05e14ec7b0f74ed8153cd2815"},
     {file = "kubernetes-27.2.0.tar.gz", hash = "sha256:d479931c6f37561dbfdf28fc5f46384b1cb8b28f9db344ed4a232ce91990825a"},
@@ -940,26 +1002,31 @@ adal = ["adal (>=1.0.2)"]
 
 [[package]]
 name = "lightkube"
-version = "0.14.0"
+version = "0.15.8"
 description = "Lightweight kubernetes client library"
 optional = false
 python-versions = "*"
+groups = ["main", "charm-libs", "integration"]
 files = [
-    {file = "lightkube-0.14.0-py3-none-any.whl", hash = "sha256:448926ce667d18f9e07e9a600c7db0d0424447883f1873be31e60d9e86176d71"},
-    {file = "lightkube-0.14.0.tar.gz", hash = "sha256:9c0a8e7543debded4494ae42606d73b775561eb0950504bd735f565b25ef8dc2"},
+    {file = "lightkube-0.15.8-py3-none-any.whl", hash = "sha256:236f6d11e9281764a8ae896ab2c28a4bc943dc0576822445064577eaa90677ba"},
+    {file = "lightkube-0.15.8.tar.gz", hash = "sha256:ac950d24ddbb59904708730f13ce254b05b6255a471dfab027cbe44c4123bfc6"},
 ]
 
 [package.dependencies]
-httpx = ">=0.24.0"
+httpx = ">=0.24.0,<0.28.0"
 lightkube-models = ">=1.15.12.0"
 PyYAML = "*"
 
+[package.extras]
+dev = ["pytest", "pytest-asyncio (<0.17.0)", "respx"]
+
 [[package]]
 name = "lightkube-models"
 version = "1.30.0.8"
 description = "Models and Resources for lightkube module"
 optional = false
 python-versions = "*"
+groups = ["main", "charm-libs", "integration"]
 files = [
     {file = "lightkube-models-1.30.0.8.tar.gz", hash = "sha256:d1fe87b6680a04d27440bb746f2bf2fb665e1515bab12efc3ace65118ecb7eac"},
     {file = "lightkube_models-1.30.0.8-py3-none-any.whl", hash = "sha256:34c43ae4824214eb8bc5e219da57c319ba411a51f503f79510d8c1ae6966ecb5"},
@@ -971,6 +1038,7 @@ version = "1.3.4"
 description = "A Python library port for bakery, higher level operation to work with macaroons"
 optional = false
 python-versions = "*"
+groups = ["integration"]
 files = [
     {file = "macaroonbakery-1.3.4-py2.py3-none-any.whl", hash = "sha256:1e952a189f5c1e96ef82b081b2852c770d7daa20987e2088e762dd5689fb253b"},
     {file = "macaroonbakery-1.3.4.tar.gz", hash = "sha256:41ca993a23e4f8ef2fe7723b5cd4a30c759735f1d5021e990770c8a0e0f33970"},
@@ -990,6 +1058,7 @@ version = "2.1.5"
 description = "Safely add untrusted strings to HTML/XML markup."
 optional = false
 python-versions = ">=3.7"
+groups = ["main", "integration"]
 files = [
     {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"},
     {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"},
@@ -1059,6 +1128,7 @@ version = "0.1.7"
 description = "Inline Matplotlib backend for Jupyter"
 optional = false
 python-versions = ">=3.8"
+groups = ["integration"]
 files = [
     {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"},
     {file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"},
@@ -1073,6 +1143,7 @@ version = "1.0.0"
 description = "Type system extensions for programs checked with the mypy type checker."
 optional = false
 python-versions = ">=3.5"
+groups = ["integration"]
 files = [
     {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
     {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
@@ -1084,6 +1155,7 @@ version = "8.0.33"
 description = "MySQL driver written in Python"
 optional = false
 python-versions = "*"
+groups = ["integration"]
 files = [
     {file = "mysql-connector-python-8.0.33.tar.gz", hash = "sha256:9775331fa60b5d5a6925781d77eee4384e2b54a12dea694ffdefd1cf1a9c0fdb"},
     {file = "mysql_connector_python-8.0.33-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:241483065ad062256985e082e3cbb3e7d1d6d2275cee17c66d22525b09096201"},
@@ -1126,6 +1198,7 @@ version = "3.2.2"
 description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic"
 optional = false
 python-versions = ">=3.6"
+groups = ["integration"]
 files = [
     {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"},
     {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"},
@@ -1142,6 +1215,7 @@ version = "1.21.0"
 description = "OpenTelemetry Python API"
 optional = false
 python-versions = ">=3.7"
+groups = ["charm-libs"]
 files = [
     {file = "opentelemetry_api-1.21.0-py3-none-any.whl", hash = "sha256:4bb86b28627b7e41098f0e93280fe4892a1abed1b79a19aec6f928f39b17dffb"},
     {file = "opentelemetry_api-1.21.0.tar.gz", hash = "sha256:d6185fd5043e000075d921822fd2d26b953eba8ca21b1e2fa360dd46a7686316"},
@@ -1157,6 +1231,7 @@ version = "1.21.0"
 description = "OpenTelemetry Protobuf encoding"
 optional = false
 python-versions = ">=3.7"
+groups = ["charm-libs"]
 files = [
     {file = "opentelemetry_exporter_otlp_proto_common-1.21.0-py3-none-any.whl", hash = "sha256:97b1022b38270ec65d11fbfa348e0cd49d12006485c2321ea3b1b7037d42b6ec"},
     {file = "opentelemetry_exporter_otlp_proto_common-1.21.0.tar.gz", hash = "sha256:61db274d8a68d636fb2ec2a0f281922949361cdd8236e25ff5539edf942b3226"},
@@ -1172,6 +1247,7 @@ version = "1.21.0"
 description = "OpenTelemetry Collector Protobuf over HTTP Exporter"
 optional = false
 python-versions = ">=3.7"
+groups = ["charm-libs"]
 files = [
     {file = "opentelemetry_exporter_otlp_proto_http-1.21.0-py3-none-any.whl", hash = "sha256:56837773de6fb2714c01fc4895caebe876f6397bbc4d16afddf89e1299a55ee2"},
     {file = "opentelemetry_exporter_otlp_proto_http-1.21.0.tar.gz", hash = "sha256:19d60afa4ae8597f7ef61ad75c8b6c6b7ef8cb73a33fb4aed4dbc86d5c8d3301"},
@@ -1196,6 +1272,7 @@ version = "1.21.0"
 description = "OpenTelemetry Python Proto"
 optional = false
 python-versions = ">=3.7"
+groups = ["charm-libs"]
 files = [
     {file = "opentelemetry_proto-1.21.0-py3-none-any.whl", hash = "sha256:32fc4248e83eebd80994e13963e683f25f3b443226336bb12b5b6d53638f50ba"},
     {file = "opentelemetry_proto-1.21.0.tar.gz", hash = "sha256:7d5172c29ed1b525b5ecf4ebe758c7138a9224441b3cfe683d0a237c33b1941f"},
@@ -1210,6 +1287,7 @@ version = "1.21.0"
 description = "OpenTelemetry Python SDK"
 optional = false
 python-versions = ">=3.7"
+groups = ["charm-libs"]
 files = [
     {file = "opentelemetry_sdk-1.21.0-py3-none-any.whl", hash = "sha256:9fe633243a8c655fedace3a0b89ccdfc654c0290ea2d8e839bd5db3131186f73"},
     {file = "opentelemetry_sdk-1.21.0.tar.gz", hash = "sha256:3ec8cd3020328d6bc5c9991ccaf9ae820ccb6395a5648d9a95d3ec88275b8879"},
@@ -1226,6 +1304,7 @@ version = "0.42b0"
 description = "OpenTelemetry Semantic Conventions"
 optional = false
 python-versions = ">=3.7"
+groups = ["charm-libs"]
 files = [
     {file = "opentelemetry_semantic_conventions-0.42b0-py3-none-any.whl", hash = "sha256:5cd719cbfec448af658860796c5d0fcea2fdf0945a2bed2363f42cb1ee39f526"},
     {file = "opentelemetry_semantic_conventions-0.42b0.tar.gz", hash = "sha256:44ae67a0a3252a05072877857e5cc1242c98d4cf12870159f1a94bec800d38ec"},
@@ -1237,6 +1316,7 @@ version = "2.15.0"
 description = "The Python library behind great charms"
 optional = false
 python-versions = ">=3.8"
+groups = ["main", "charm-libs", "integration"]
 files = [
     {file = "ops-2.15.0-py3-none-any.whl", hash = "sha256:8e47ab8a814301776b0ff42b32544ebdece7f1639168d2c86dc7a25930d2e493"},
     {file = "ops-2.15.0.tar.gz", hash = "sha256:f3bad7417e98e8f390523fad097702eed16e99b38a25e9fe856aad226474b057"},
@@ -1255,6 +1335,7 @@ version = "24.0"
 description = "Core utilities for Python packages"
 optional = false
 python-versions = ">=3.7"
+groups = ["integration", "unit"]
 files = [
     {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"},
     {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"},
@@ -1266,6 +1347,7 @@ version = "0.9.0"
 description = "Parameterized testing with any Python test framework"
 optional = false
 python-versions = ">=3.7"
+groups = ["unit"]
 files = [
     {file = "parameterized-0.9.0-py2.py3-none-any.whl", hash = "sha256:4e0758e3d41bea3bbd05ec14fc2c24736723f243b28d702081aef438c9372b1b"},
     {file = "parameterized-0.9.0.tar.gz", hash = "sha256:7fc905272cefa4f364c1a3429cbbe9c0f98b793988efb5bf90aac80f08db09b1"},
@@ -1280,6 +1362,7 @@ version = "3.4.0"
 description = "SSH2 protocol library"
 optional = false
 python-versions = ">=3.6"
+groups = ["integration"]
 files = [
     {file = "paramiko-3.4.0-py3-none-any.whl", hash = "sha256:43f0b51115a896f9c00f59618023484cb3a14b98bbceab43394a39c6739b7ee7"},
     {file = "paramiko-3.4.0.tar.gz", hash = "sha256:aac08f26a31dc4dffd92821527d1682d99d52f9ef6851968114a8728f3c274d3"},
@@ -1301,6 +1384,7 @@ version = "0.8.4"
 description = "A Python Parser"
 optional = false
 python-versions = ">=3.6"
+groups = ["integration"]
 files = [
     {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"},
     {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"},
@@ -1316,6 +1400,8 @@ version = "4.9.0"
 description = "Pexpect allows easy control of interactive console applications."
 optional = false
 python-versions = "*"
+groups = ["integration"]
+markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\""
 files = [
     {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"},
     {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"},
@@ -1330,6 +1416,7 @@ version = "1.5.0"
 description = "plugin and hook calling mechanisms for python"
 optional = false
 python-versions = ">=3.8"
+groups = ["integration", "unit"]
 files = [
     {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
     {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
@@ -1345,6 +1432,7 @@ version = "1.9.0"
 description = "Poetry PEP 517 Build Backend"
 optional = false
 python-versions = ">=3.8,<4.0"
+groups = ["charm-libs"]
 files = [
     {file = "poetry_core-1.9.0-py3-none-any.whl", hash = "sha256:4e0c9c6ad8cf89956f03b308736d84ea6ddb44089d16f2adc94050108ec1f5a1"},
     {file = "poetry_core-1.9.0.tar.gz", hash = "sha256:fa7a4001eae8aa572ee84f35feb510b321bd652e5cf9293249d62853e1f935a2"},
@@ -1356,6 +1444,7 @@ version = "3.0.46"
 description = "Library for building powerful interactive command lines in Python"
 optional = false
 python-versions = ">=3.7.0"
+groups = ["integration"]
 files = [
     {file = "prompt_toolkit-3.0.46-py3-none-any.whl", hash = "sha256:45abe60a8300f3c618b23c16c4bb98c6fc80af8ce8b17c7ae92db48db3ee63c1"},
     {file = "prompt_toolkit-3.0.46.tar.gz", hash = "sha256:869c50d682152336e23c4db7f74667639b5047494202ffe7670817053fd57795"},
@@ -1370,6 +1459,7 @@ version = "3.20.3"
 description = "Protocol Buffers"
 optional = false
 python-versions = ">=3.7"
+groups = ["charm-libs", "integration"]
 files = [
     {file = "protobuf-3.20.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99"},
     {file = "protobuf-3.20.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e"},
@@ -1401,6 +1491,8 @@ version = "0.7.0"
 description = "Run a subprocess in a pseudo terminal"
 optional = false
 python-versions = "*"
+groups = ["integration"]
+markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\""
 files = [
     {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"},
     {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"},
@@ -1412,6 +1504,7 @@ version = "0.2.2"
 description = "Safely evaluate AST nodes without side effects"
 optional = false
 python-versions = "*"
+groups = ["integration"]
 files = [
     {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"},
     {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"},
@@ -1426,6 +1519,7 @@ version = "0.6.0"
 description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
 optional = false
 python-versions = ">=3.8"
+groups = ["integration"]
 files = [
     {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"},
     {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"},
@@ -1437,6 +1531,7 @@ version = "0.4.0"
 description = "A collection of ASN.1-based protocols modules"
 optional = false
 python-versions = ">=3.8"
+groups = ["integration"]
 files = [
     {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"},
     {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"},
@@ -1451,10 +1546,12 @@ version = "2.22"
 description = "C parser in Python"
 optional = false
 python-versions = ">=3.8"
+groups = ["charm-libs", "integration"]
 files = [
     {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"},
     {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"},
 ]
+markers = {charm-libs = "platform_python_implementation != \"PyPy\""}
 
 [[package]]
 name = "pydantic"
@@ -1462,6 +1559,7 @@ version = "1.10.15"
 description = "Data validation and settings management using python type hints"
 optional = false
 python-versions = ">=3.7"
+groups = ["charm-libs"]
 files = [
     {file = "pydantic-1.10.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:22ed12ee588b1df028a2aa5d66f07bf8f8b4c8579c2e96d5a9c1f96b77f3bb55"},
     {file = "pydantic-1.10.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:75279d3cac98186b6ebc2597b06bcbc7244744f6b0b44a23e4ef01e5683cc0d2"},
@@ -1514,6 +1612,7 @@ version = "2.18.0"
 description = "Pygments is a syntax highlighting package written in Python."
 optional = false
 python-versions = ">=3.8"
+groups = ["integration"]
 files = [
     {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"},
     {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"},
@@ -1528,6 +1627,7 @@ version = "0.13.0"
 description = "Macaroon library for Python"
 optional = false
 python-versions = "*"
+groups = ["integration"]
 files = [
     {file = "pymacaroons-0.13.0-py2.py3-none-any.whl", hash = "sha256:3e14dff6a262fdbf1a15e769ce635a8aea72e6f8f91e408f9a97166c53b91907"},
     {file = "pymacaroons-0.13.0.tar.gz", hash = "sha256:1e6bba42a5f66c245adf38a5a4006a99dcc06a0703786ea636098667d42903b8"},
@@ -1543,6 +1643,7 @@ version = "1.5.0"
 description = "Python binding to the Networking and Cryptography (NaCl) library"
 optional = false
 python-versions = ">=3.6"
+groups = ["integration"]
 files = [
     {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"},
     {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"},
@@ -1569,6 +1670,7 @@ version = "1.1"
 description = "Generate and parse RFC 3339 timestamps"
 optional = false
 python-versions = "*"
+groups = ["integration"]
 files = [
     {file = "pyRFC3339-1.1-py2.py3-none-any.whl", hash = "sha256:67196cb83b470709c580bb4738b83165e67c6cc60e1f2e4f286cfcb402a926f4"},
     {file = "pyRFC3339-1.1.tar.gz", hash = "sha256:81b8cbe1519cdb79bed04910dd6fa4e181faf8c88dff1e1b987b5f7ab23a5b1a"},
@@ -1583,6 +1685,7 @@ version = "7.4.4"
 description = "pytest: simple powerful testing with Python"
 optional = false
 python-versions = ">=3.7"
+groups = ["integration", "unit"]
 files = [
     {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
     {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
@@ -1605,6 +1708,7 @@ version = "0.21.2"
 description = "Pytest support for asyncio"
 optional = false
 python-versions = ">=3.7"
+groups = ["integration"]
 files = [
     {file = "pytest_asyncio-0.21.2-py3-none-any.whl", hash = "sha256:ab664c88bb7998f711d8039cacd4884da6430886ae8bbd4eded552ed2004f16b"},
     {file = "pytest_asyncio-0.21.2.tar.gz", hash = "sha256:d67738fc232b94b326b9d060750beb16e0074210b98dd8b58a5239fa2a154f45"},
@@ -1617,48 +1721,13 @@ pytest = ">=7.0.0"
 docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
 testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"]
 
-[[package]]
-name = "pytest-github-secrets"
-version = "0.1.0"
-description = ""
-optional = false
-python-versions = "^3.8"
-files = []
-develop = false
-
-[package.source]
-type = "git"
-url = "https://github.com/canonical/data-platform-workflows"
-reference = "v21.0.1"
-resolved_reference = "06f252ea079edfd055cee236ede28c237467f9b0"
-subdirectory = "python/pytest_plugins/github_secrets"
-
-[[package]]
-name = "pytest-microceph"
-version = "0.1.0"
-description = ""
-optional = false
-python-versions = "^3.8"
-files = []
-develop = false
-
-[package.dependencies]
-boto3 = "*"
-pytest = "*"
-
-[package.source]
-type = "git"
-url = "https://github.com/canonical/data-platform-workflows"
-reference = "v21.0.1"
-resolved_reference = "06f252ea079edfd055cee236ede28c237467f9b0"
-subdirectory = "python/pytest_plugins/microceph"
-
 [[package]]
 name = "pytest-mock"
 version = "3.14.0"
 description = "Thin-wrapper around the mock package for easier use with pytest"
 optional = false
 python-versions = ">=3.8"
+groups = ["unit"]
 files = [
     {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"},
     {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"},
@@ -1676,6 +1745,7 @@ version = "0.28.0"
 description = "Fixtures for Operators"
 optional = false
 python-versions = "*"
+groups = ["integration"]
 files = [
     {file = "pytest-operator-0.28.0.tar.gz", hash = "sha256:efac98697da71558790eb5d4c9d42f11f3d5fb43dff22a802aee69e1801edce8"},
     {file = "pytest_operator-0.28.0-py3-none-any.whl", hash = "sha256:b3cb5a8ebf838f890133a25ee520c25c8be259b54341e42e39f64a6d97735d9f"},
@@ -1689,50 +1759,13 @@ pytest = "*"
 pytest-asyncio = "*"
 pyyaml = "*"
 
-[[package]]
-name = "pytest-operator-cache"
-version = "0.1.0"
-description = ""
-optional = false
-python-versions = "^3.8"
-files = []
-develop = false
-
-[package.dependencies]
-pyyaml = "*"
-
-[package.source]
-type = "git"
-url = "https://github.com/canonical/data-platform-workflows"
-reference = "v21.0.1"
-resolved_reference = "06f252ea079edfd055cee236ede28c237467f9b0"
-subdirectory = "python/pytest_plugins/pytest_operator_cache"
-
-[[package]]
-name = "pytest-operator-groups"
-version = "0.1.0"
-description = ""
-optional = false
-python-versions = "^3.8"
-files = []
-develop = false
-
-[package.dependencies]
-pytest = "*"
-
-[package.source]
-type = "git"
-url = "https://github.com/canonical/data-platform-workflows"
-reference = "v21.0.1"
-resolved_reference = "06f252ea079edfd055cee236ede28c237467f9b0"
-subdirectory = "python/pytest_plugins/pytest_operator_groups"
-
 [[package]]
 name = "python-dateutil"
 version = "2.9.0.post0"
 description = "Extensions to the standard Python datetime module"
 optional = false
 python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+groups = ["main", "integration"]
 files = [
     {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
     {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
@@ -1747,6 +1780,7 @@ version = "2024.1"
 description = "World timezone definitions, modern and historical"
 optional = false
 python-versions = "*"
+groups = ["integration"]
 files = [
     {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"},
     {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"},
@@ -1754,62 +1788,65 @@ files = [
 
 [[package]]
 name = "pyyaml"
-version = "6.0.1"
+version = "6.0.2"
 description = "YAML parser and emitter for Python"
 optional = false
-python-versions = ">=3.6"
-files = [
-    {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
-    {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"},
-    {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
-    {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
-    {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
-    {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
-    {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
-    {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
-    {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
-    {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"},
-    {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
-    {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
-    {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
-    {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
-    {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
-    {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
-    {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
-    {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
-    {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
-    {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
-    {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
-    {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
-    {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
-    {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
-    {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
-    {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
-    {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"},
-    {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"},
-    {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"},
-    {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"},
-    {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"},
-    {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"},
-    {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"},
-    {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"},
-    {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"},
-    {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"},
-    {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
-    {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
-    {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
-    {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
-    {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
-    {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
-    {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
-    {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"},
-    {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
-    {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
-    {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
-    {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
-    {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
-    {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
-    {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
+python-versions = ">=3.8"
+groups = ["main", "charm-libs", "integration"]
+files = [
+    {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
+    {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
+    {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
+    {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
+    {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
+    {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
+    {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
+    {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
+    {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
+    {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
+    {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
+    {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
+    {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
+    {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
+    {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
+    {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
+    {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
+    {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
+    {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
+    {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
+    {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
+    {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
+    {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
+    {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
+    {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
+    {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
+    {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
+    {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
+    {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
+    {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
+    {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
+    {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
+    {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
+    {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
+    {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
+    {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
+    {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
+    {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
+    {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
+    {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
+    {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
+    {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
+    {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
+    {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
+    {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
+    {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
+    {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
+    {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
+    {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
+    {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
+    {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
+    {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
+    {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
 ]
 
 [[package]]
@@ -1818,6 +1855,7 @@ version = "0.35.1"
 description = "JSON Referencing + Python"
 optional = false
 python-versions = ">=3.8"
+groups = ["charm-libs"]
 files = [
     {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"},
     {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"},
@@ -1833,6 +1871,7 @@ version = "2.32.3"
 description = "Python HTTP for Humans."
 optional = false
 python-versions = ">=3.8"
+groups = ["charm-libs", "integration"]
 files = [
     {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
     {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
@@ -1854,6 +1893,7 @@ version = "2.0.0"
 description = "OAuthlib authentication support for Requests."
 optional = false
 python-versions = ">=3.4"
+groups = ["integration"]
 files = [
     {file = "requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9"},
     {file = "requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36"},
@@ -1872,6 +1912,7 @@ version = "0.18.1"
 description = "Python bindings to Rust's persistent data structures (rpds)"
 optional = false
 python-versions = ">=3.8"
+groups = ["charm-libs"]
 files = [
     {file = "rpds_py-0.18.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d31dea506d718693b6b2cffc0648a8929bdc51c70a311b2770f09611caa10d53"},
     {file = "rpds_py-0.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:732672fbc449bab754e0b15356c077cc31566df874964d4801ab14f71951ea80"},
@@ -1980,6 +2021,7 @@ version = "4.9"
 description = "Pure-Python RSA implementation"
 optional = false
 python-versions = ">=3.6,<4"
+groups = ["integration"]
 files = [
     {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"},
     {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"},
@@ -1994,6 +2036,7 @@ version = "0.4.8"
 description = "An extremely fast Python linter and code formatter, written in Rust."
 optional = false
 python-versions = ">=3.7"
+groups = ["format", "lint"]
 files = [
     {file = "ruff-0.4.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:7663a6d78f6adb0eab270fa9cf1ff2d28618ca3a652b60f2a234d92b9ec89066"},
     {file = "ruff-0.4.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eeceb78da8afb6de0ddada93112869852d04f1cd0f6b80fe464fd4e35c330913"},
@@ -2020,6 +2063,7 @@ version = "0.10.1"
 description = "An Amazon S3 Transfer Manager"
 optional = false
 python-versions = ">= 3.8"
+groups = ["main", "integration"]
 files = [
     {file = "s3transfer-0.10.1-py3-none-any.whl", hash = "sha256:ceb252b11bcf87080fb7850a224fb6e05c8a776bab8f2b64b7f25b969464839d"},
     {file = "s3transfer-0.10.1.tar.gz", hash = "sha256:5683916b4c724f799e600f41dd9e10a9ff19871bf87623cc8f491cb4f5fa0a19"},
@@ -2037,6 +2081,7 @@ version = "0.9.0.6"
 description = "Python wrapper around invoking shellcheck (https://www.shellcheck.net/)"
 optional = false
 python-versions = ">=3.8"
+groups = ["lint"]
 files = [
     {file = "shellcheck_py-0.9.0.6-py2.py3-none-macosx_11_0_x86_64.whl", hash = "sha256:38d48a4e2279f5deac374574e7625cd53b7f615301f36b1b1fffd22105dc066d"},
     {file = "shellcheck_py-0.9.0.6-py2.py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:730235c4f92657884f8b343d5426e4dc28e9a6ba9ad54d469cd038e340ea5be0"},
@@ -2050,6 +2095,7 @@ version = "1.16.0"
 description = "Python 2 and 3 compatibility utilities"
 optional = false
 python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+groups = ["main", "integration"]
 files = [
     {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
     {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
@@ -2061,6 +2107,7 @@ version = "1.3.1"
 description = "Sniff out which async library your code is running under"
 optional = false
 python-versions = ">=3.7"
+groups = ["main", "charm-libs", "integration"]
 files = [
     {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
     {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
@@ -2072,6 +2119,7 @@ version = "0.6.3"
 description = "Extract data from python stack frames and tracebacks for informative displays"
 optional = false
 python-versions = "*"
+groups = ["integration"]
 files = [
     {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"},
     {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"},
@@ -2091,6 +2139,7 @@ version = "8.3.0"
 description = "Retry code until it succeeds"
 optional = false
 python-versions = ">=3.8"
+groups = ["main", "charm-libs", "integration"]
 files = [
     {file = "tenacity-8.3.0-py3-none-any.whl", hash = "sha256:3649f6443dbc0d9b01b9d8020a9c4ec7a1ff5f6f3c6c8a036ef371f573fe9185"},
     {file = "tenacity-8.3.0.tar.gz", hash = "sha256:953d4e6ad24357bceffbc9707bc74349aca9d245f68eb65419cf0c249a1949a2"},
@@ -2106,10 +2155,12 @@ version = "2.0.1"
 description = "A lil' TOML parser"
 optional = false
 python-versions = ">=3.7"
+groups = ["integration", "unit"]
 files = [
     {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
     {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
 ]
+markers = {integration = "python_version < \"3.11\"", unit = "python_full_version <= \"3.11.0a6\""}
 
 [[package]]
 name = "toposort"
@@ -2117,6 +2168,7 @@ version = "1.10"
 description = "Implements a topological sort algorithm."
 optional = false
 python-versions = "*"
+groups = ["integration"]
 files = [
     {file = "toposort-1.10-py3-none-any.whl", hash = "sha256:cbdbc0d0bee4d2695ab2ceec97fe0679e9c10eab4b2a87a9372b929e70563a87"},
     {file = "toposort-1.10.tar.gz", hash = "sha256:bfbb479c53d0a696ea7402601f4e693c97b0367837c8898bc6471adfca37a6bd"},
@@ -2128,6 +2180,7 @@ version = "5.14.3"
 description = "Traitlets Python configuration system"
 optional = false
 python-versions = ">=3.8"
+groups = ["integration"]
 files = [
     {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"},
     {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"},
@@ -2143,10 +2196,12 @@ version = "4.12.1"
 description = "Backported and Experimental Type Hints for Python 3.8+"
 optional = false
 python-versions = ">=3.8"
+groups = ["main", "charm-libs", "integration"]
 files = [
     {file = "typing_extensions-4.12.1-py3-none-any.whl", hash = "sha256:6024b58b69089e5a89c347397254e35f1bf02a907728ec7fee9bf0fe837d203a"},
     {file = "typing_extensions-4.12.1.tar.gz", hash = "sha256:915f5e35ff76f56588223f15fdd5938f9a1cf9195c0de25130c627e4d597f6d1"},
 ]
+markers = {main = "python_version < \"3.11\""}
 
 [[package]]
 name = "typing-inspect"
@@ -2154,6 +2209,7 @@ version = "0.9.0"
 description = "Runtime inspection utilities for typing module."
 optional = false
 python-versions = "*"
+groups = ["integration"]
 files = [
     {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"},
     {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"},
@@ -2169,6 +2225,7 @@ version = "1.26.19"
 description = "HTTP library with thread-safe connection pooling, file post, and more."
 optional = false
 python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
+groups = ["main", "charm-libs", "integration"]
 files = [
     {file = "urllib3-1.26.19-py2.py3-none-any.whl", hash = "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3"},
     {file = "urllib3-1.26.19.tar.gz", hash = "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429"},
@@ -2185,6 +2242,7 @@ version = "0.2.13"
 description = "Measures the displayed width of unicode strings in a terminal"
 optional = false
 python-versions = "*"
+groups = ["integration"]
 files = [
     {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"},
     {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"},
@@ -2196,6 +2254,7 @@ version = "1.8.0"
 description = "WebSocket client for Python with low level API options"
 optional = false
 python-versions = ">=3.8"
+groups = ["main", "charm-libs", "integration"]
 files = [
     {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"},
     {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"},
@@ -2208,83 +2267,81 @@ test = ["websockets"]
 
 [[package]]
 name = "websockets"
-version = "12.0"
+version = "14.1"
 description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)"
 optional = false
-python-versions = ">=3.8"
-files = [
-    {file = "websockets-12.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d554236b2a2006e0ce16315c16eaa0d628dab009c33b63ea03f41c6107958374"},
-    {file = "websockets-12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2d225bb6886591b1746b17c0573e29804619c8f755b5598d875bb4235ea639be"},
-    {file = "websockets-12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eb809e816916a3b210bed3c82fb88eaf16e8afcf9c115ebb2bacede1797d2547"},
-    {file = "websockets-12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c588f6abc13f78a67044c6b1273a99e1cf31038ad51815b3b016ce699f0d75c2"},
-    {file = "websockets-12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5aa9348186d79a5f232115ed3fa9020eab66d6c3437d72f9d2c8ac0c6858c558"},
-    {file = "websockets-12.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6350b14a40c95ddd53e775dbdbbbc59b124a5c8ecd6fbb09c2e52029f7a9f480"},
-    {file = "websockets-12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:70ec754cc2a769bcd218ed8d7209055667b30860ffecb8633a834dde27d6307c"},
-    {file = "websockets-12.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6e96f5ed1b83a8ddb07909b45bd94833b0710f738115751cdaa9da1fb0cb66e8"},
-    {file = "websockets-12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4d87be612cbef86f994178d5186add3d94e9f31cc3cb499a0482b866ec477603"},
-    {file = "websockets-12.0-cp310-cp310-win32.whl", hash = "sha256:befe90632d66caaf72e8b2ed4d7f02b348913813c8b0a32fae1cc5fe3730902f"},
-    {file = "websockets-12.0-cp310-cp310-win_amd64.whl", hash = "sha256:363f57ca8bc8576195d0540c648aa58ac18cf85b76ad5202b9f976918f4219cf"},
-    {file = "websockets-12.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5d873c7de42dea355d73f170be0f23788cf3fa9f7bed718fd2830eefedce01b4"},
-    {file = "websockets-12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3f61726cae9f65b872502ff3c1496abc93ffbe31b278455c418492016e2afc8f"},
-    {file = "websockets-12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed2fcf7a07334c77fc8a230755c2209223a7cc44fc27597729b8ef5425aa61a3"},
-    {file = "websockets-12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e332c210b14b57904869ca9f9bf4ca32f5427a03eeb625da9b616c85a3a506c"},
-    {file = "websockets-12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5693ef74233122f8ebab026817b1b37fe25c411ecfca084b29bc7d6efc548f45"},
-    {file = "websockets-12.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e9e7db18b4539a29cc5ad8c8b252738a30e2b13f033c2d6e9d0549b45841c04"},
-    {file = "websockets-12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6e2df67b8014767d0f785baa98393725739287684b9f8d8a1001eb2839031447"},
-    {file = "websockets-12.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bea88d71630c5900690fcb03161ab18f8f244805c59e2e0dc4ffadae0a7ee0ca"},
-    {file = "websockets-12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dff6cdf35e31d1315790149fee351f9e52978130cef6c87c4b6c9b3baf78bc53"},
-    {file = "websockets-12.0-cp311-cp311-win32.whl", hash = "sha256:3e3aa8c468af01d70332a382350ee95f6986db479ce7af14d5e81ec52aa2b402"},
-    {file = "websockets-12.0-cp311-cp311-win_amd64.whl", hash = "sha256:25eb766c8ad27da0f79420b2af4b85d29914ba0edf69f547cc4f06ca6f1d403b"},
-    {file = "websockets-12.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0e6e2711d5a8e6e482cacb927a49a3d432345dfe7dea8ace7b5790df5932e4df"},
-    {file = "websockets-12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dbcf72a37f0b3316e993e13ecf32f10c0e1259c28ffd0a85cee26e8549595fbc"},
-    {file = "websockets-12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12743ab88ab2af1d17dd4acb4645677cb7063ef4db93abffbf164218a5d54c6b"},
-    {file = "websockets-12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b645f491f3c48d3f8a00d1fce07445fab7347fec54a3e65f0725d730d5b99cb"},
-    {file = "websockets-12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9893d1aa45a7f8b3bc4510f6ccf8db8c3b62120917af15e3de247f0780294b92"},
-    {file = "websockets-12.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f38a7b376117ef7aff996e737583172bdf535932c9ca021746573bce40165ed"},
-    {file = "websockets-12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:f764ba54e33daf20e167915edc443b6f88956f37fb606449b4a5b10ba42235a5"},
-    {file = "websockets-12.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1e4b3f8ea6a9cfa8be8484c9221ec0257508e3a1ec43c36acdefb2a9c3b00aa2"},
-    {file = "websockets-12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9fdf06fd06c32205a07e47328ab49c40fc1407cdec801d698a7c41167ea45113"},
-    {file = "websockets-12.0-cp312-cp312-win32.whl", hash = "sha256:baa386875b70cbd81798fa9f71be689c1bf484f65fd6fb08d051a0ee4e79924d"},
-    {file = "websockets-12.0-cp312-cp312-win_amd64.whl", hash = "sha256:ae0a5da8f35a5be197f328d4727dbcfafa53d1824fac3d96cdd3a642fe09394f"},
-    {file = "websockets-12.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5f6ffe2c6598f7f7207eef9a1228b6f5c818f9f4d53ee920aacd35cec8110438"},
-    {file = "websockets-12.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9edf3fc590cc2ec20dc9d7a45108b5bbaf21c0d89f9fd3fd1685e223771dc0b2"},
-    {file = "websockets-12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8572132c7be52632201a35f5e08348137f658e5ffd21f51f94572ca6c05ea81d"},
-    {file = "websockets-12.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:604428d1b87edbf02b233e2c207d7d528460fa978f9e391bd8aaf9c8311de137"},
-    {file = "websockets-12.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a9d160fd080c6285e202327aba140fc9a0d910b09e423afff4ae5cbbf1c7205"},
-    {file = "websockets-12.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87b4aafed34653e465eb77b7c93ef058516cb5acf3eb21e42f33928616172def"},
-    {file = "websockets-12.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b2ee7288b85959797970114deae81ab41b731f19ebcd3bd499ae9ca0e3f1d2c8"},
-    {file = "websockets-12.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7fa3d25e81bfe6a89718e9791128398a50dec6d57faf23770787ff441d851967"},
-    {file = "websockets-12.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a571f035a47212288e3b3519944f6bf4ac7bc7553243e41eac50dd48552b6df7"},
-    {file = "websockets-12.0-cp38-cp38-win32.whl", hash = "sha256:3c6cc1360c10c17463aadd29dd3af332d4a1adaa8796f6b0e9f9df1fdb0bad62"},
-    {file = "websockets-12.0-cp38-cp38-win_amd64.whl", hash = "sha256:1bf386089178ea69d720f8db6199a0504a406209a0fc23e603b27b300fdd6892"},
-    {file = "websockets-12.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ab3d732ad50a4fbd04a4490ef08acd0517b6ae6b77eb967251f4c263011a990d"},
-    {file = "websockets-12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a1d9697f3337a89691e3bd8dc56dea45a6f6d975f92e7d5f773bc715c15dde28"},
-    {file = "websockets-12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1df2fbd2c8a98d38a66f5238484405b8d1d16f929bb7a33ed73e4801222a6f53"},
-    {file = "websockets-12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23509452b3bc38e3a057382c2e941d5ac2e01e251acce7adc74011d7d8de434c"},
-    {file = "websockets-12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e5fc14ec6ea568200ea4ef46545073da81900a2b67b3e666f04adf53ad452ec"},
-    {file = "websockets-12.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46e71dbbd12850224243f5d2aeec90f0aaa0f2dde5aeeb8fc8df21e04d99eff9"},
-    {file = "websockets-12.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b81f90dcc6c85a9b7f29873beb56c94c85d6f0dac2ea8b60d995bd18bf3e2aae"},
-    {file = "websockets-12.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a02413bc474feda2849c59ed2dfb2cddb4cd3d2f03a2fedec51d6e959d9b608b"},
-    {file = "websockets-12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bbe6013f9f791944ed31ca08b077e26249309639313fff132bfbf3ba105673b9"},
-    {file = "websockets-12.0-cp39-cp39-win32.whl", hash = "sha256:cbe83a6bbdf207ff0541de01e11904827540aa069293696dd528a6640bd6a5f6"},
-    {file = "websockets-12.0-cp39-cp39-win_amd64.whl", hash = "sha256:fc4e7fa5414512b481a2483775a8e8be7803a35b30ca805afa4998a84f9fd9e8"},
-    {file = "websockets-12.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:248d8e2446e13c1d4326e0a6a4e9629cb13a11195051a73acf414812700badbd"},
-    {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f44069528d45a933997a6fef143030d8ca8042f0dfaad753e2906398290e2870"},
-    {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c4e37d36f0d19f0a4413d3e18c0d03d0c268ada2061868c1e6f5ab1a6d575077"},
-    {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d829f975fc2e527a3ef2f9c8f25e553eb7bc779c6665e8e1d52aa22800bb38b"},
-    {file = "websockets-12.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2c71bd45a777433dd9113847af751aae36e448bc6b8c361a566cb043eda6ec30"},
-    {file = "websockets-12.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0bee75f400895aef54157b36ed6d3b308fcab62e5260703add87f44cee9c82a6"},
-    {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:423fc1ed29f7512fceb727e2d2aecb952c46aa34895e9ed96071821309951123"},
-    {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27a5e9964ef509016759f2ef3f2c1e13f403725a5e6a1775555994966a66e931"},
-    {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3181df4583c4d3994d31fb235dc681d2aaad744fbdbf94c4802485ececdecf2"},
-    {file = "websockets-12.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b067cb952ce8bf40115f6c19f478dc71c5e719b7fbaa511359795dfd9d1a6468"},
-    {file = "websockets-12.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:00700340c6c7ab788f176d118775202aadea7602c5cc6be6ae127761c16d6b0b"},
-    {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e469d01137942849cff40517c97a30a93ae79917752b34029f0ec72df6b46399"},
-    {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffefa1374cd508d633646d51a8e9277763a9b78ae71324183693959cf94635a7"},
-    {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba0cab91b3956dfa9f512147860783a1829a8d905ee218a9837c18f683239611"},
-    {file = "websockets-12.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2cb388a5bfb56df4d9a406783b7f9dbefb888c09b71629351cc6b036e9259370"},
-    {file = "websockets-12.0-py3-none-any.whl", hash = "sha256:dc284bbc8d7c78a6c69e0c7325ab46ee5e40bb4d50e494d8131a07ef47500e9e"},
-    {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"},
+python-versions = ">=3.9"
+groups = ["integration"]
+files = [
+    {file = "websockets-14.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a0adf84bc2e7c86e8a202537b4fd50e6f7f0e4a6b6bf64d7ccb96c4cd3330b29"},
+    {file = "websockets-14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90b5d9dfbb6d07a84ed3e696012610b6da074d97453bd01e0e30744b472c8179"},
+    {file = "websockets-14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2177ee3901075167f01c5e335a6685e71b162a54a89a56001f1c3e9e3d2ad250"},
+    {file = "websockets-14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f14a96a0034a27f9d47fd9788913924c89612225878f8078bb9d55f859272b0"},
+    {file = "websockets-14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f874ba705deea77bcf64a9da42c1f5fc2466d8f14daf410bc7d4ceae0a9fcb0"},
+    {file = "websockets-14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9607b9a442392e690a57909c362811184ea429585a71061cd5d3c2b98065c199"},
+    {file = "websockets-14.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bea45f19b7ca000380fbd4e02552be86343080120d074b87f25593ce1700ad58"},
+    {file = "websockets-14.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:219c8187b3ceeadbf2afcf0f25a4918d02da7b944d703b97d12fb01510869078"},
+    {file = "websockets-14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ad2ab2547761d79926effe63de21479dfaf29834c50f98c4bf5b5480b5838434"},
+    {file = "websockets-14.1-cp310-cp310-win32.whl", hash = "sha256:1288369a6a84e81b90da5dbed48610cd7e5d60af62df9851ed1d1d23a9069f10"},
+    {file = "websockets-14.1-cp310-cp310-win_amd64.whl", hash = "sha256:e0744623852f1497d825a49a99bfbec9bea4f3f946df6eb9d8a2f0c37a2fec2e"},
+    {file = "websockets-14.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:449d77d636f8d9c17952628cc7e3b8faf6e92a17ec581ec0c0256300717e1512"},
+    {file = "websockets-14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a35f704be14768cea9790d921c2c1cc4fc52700410b1c10948511039be824aac"},
+    {file = "websockets-14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b1f3628a0510bd58968c0f60447e7a692933589b791a6b572fcef374053ca280"},
+    {file = "websockets-14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c3deac3748ec73ef24fc7be0b68220d14d47d6647d2f85b2771cb35ea847aa1"},
+    {file = "websockets-14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7048eb4415d46368ef29d32133134c513f507fff7d953c18c91104738a68c3b3"},
+    {file = "websockets-14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6cf0ad281c979306a6a34242b371e90e891bce504509fb6bb5246bbbf31e7b6"},
+    {file = "websockets-14.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cc1fc87428c1d18b643479caa7b15db7d544652e5bf610513d4a3478dbe823d0"},
+    {file = "websockets-14.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f95ba34d71e2fa0c5d225bde3b3bdb152e957150100e75c86bc7f3964c450d89"},
+    {file = "websockets-14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9481a6de29105d73cf4515f2bef8eb71e17ac184c19d0b9918a3701c6c9c4f23"},
+    {file = "websockets-14.1-cp311-cp311-win32.whl", hash = "sha256:368a05465f49c5949e27afd6fbe0a77ce53082185bbb2ac096a3a8afaf4de52e"},
+    {file = "websockets-14.1-cp311-cp311-win_amd64.whl", hash = "sha256:6d24fc337fc055c9e83414c94e1ee0dee902a486d19d2a7f0929e49d7d604b09"},
+    {file = "websockets-14.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ed907449fe5e021933e46a3e65d651f641975a768d0649fee59f10c2985529ed"},
+    {file = "websockets-14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:87e31011b5c14a33b29f17eb48932e63e1dcd3fa31d72209848652310d3d1f0d"},
+    {file = "websockets-14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bc6ccf7d54c02ae47a48ddf9414c54d48af9c01076a2e1023e3b486b6e72c707"},
+    {file = "websockets-14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9777564c0a72a1d457f0848977a1cbe15cfa75fa2f67ce267441e465717dcf1a"},
+    {file = "websockets-14.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a655bde548ca98f55b43711b0ceefd2a88a71af6350b0c168aa77562104f3f45"},
+    {file = "websockets-14.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3dfff83ca578cada2d19e665e9c8368e1598d4e787422a460ec70e531dbdd58"},
+    {file = "websockets-14.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6a6c9bcf7cdc0fd41cc7b7944447982e8acfd9f0d560ea6d6845428ed0562058"},
+    {file = "websockets-14.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4b6caec8576e760f2c7dd878ba817653144d5f369200b6ddf9771d64385b84d4"},
+    {file = "websockets-14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eb6d38971c800ff02e4a6afd791bbe3b923a9a57ca9aeab7314c21c84bf9ff05"},
+    {file = "websockets-14.1-cp312-cp312-win32.whl", hash = "sha256:1d045cbe1358d76b24d5e20e7b1878efe578d9897a25c24e6006eef788c0fdf0"},
+    {file = "websockets-14.1-cp312-cp312-win_amd64.whl", hash = "sha256:90f4c7a069c733d95c308380aae314f2cb45bd8a904fb03eb36d1a4983a4993f"},
+    {file = "websockets-14.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:3630b670d5057cd9e08b9c4dab6493670e8e762a24c2c94ef312783870736ab9"},
+    {file = "websockets-14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:36ebd71db3b89e1f7b1a5deaa341a654852c3518ea7a8ddfdf69cc66acc2db1b"},
+    {file = "websockets-14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5b918d288958dc3fa1c5a0b9aa3256cb2b2b84c54407f4813c45d52267600cd3"},
+    {file = "websockets-14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00fe5da3f037041da1ee0cf8e308374e236883f9842c7c465aa65098b1c9af59"},
+    {file = "websockets-14.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8149a0f5a72ca36720981418eeffeb5c2729ea55fa179091c81a0910a114a5d2"},
+    {file = "websockets-14.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77569d19a13015e840b81550922056acabc25e3f52782625bc6843cfa034e1da"},
+    {file = "websockets-14.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cf5201a04550136ef870aa60ad3d29d2a59e452a7f96b94193bee6d73b8ad9a9"},
+    {file = "websockets-14.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:88cf9163ef674b5be5736a584c999e98daf3aabac6e536e43286eb74c126b9c7"},
+    {file = "websockets-14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:836bef7ae338a072e9d1863502026f01b14027250a4545672673057997d5c05a"},
+    {file = "websockets-14.1-cp313-cp313-win32.whl", hash = "sha256:0d4290d559d68288da9f444089fd82490c8d2744309113fc26e2da6e48b65da6"},
+    {file = "websockets-14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8621a07991add373c3c5c2cf89e1d277e49dc82ed72c75e3afc74bd0acc446f0"},
+    {file = "websockets-14.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:01bb2d4f0a6d04538d3c5dfd27c0643269656c28045a53439cbf1c004f90897a"},
+    {file = "websockets-14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:414ffe86f4d6f434a8c3b7913655a1a5383b617f9bf38720e7c0799fac3ab1c6"},
+    {file = "websockets-14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8fda642151d5affdee8a430bd85496f2e2517be3a2b9d2484d633d5712b15c56"},
+    {file = "websockets-14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd7c11968bc3860d5c78577f0dbc535257ccec41750675d58d8dc66aa47fe52c"},
+    {file = "websockets-14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a032855dc7db987dff813583d04f4950d14326665d7e714d584560b140ae6b8b"},
+    {file = "websockets-14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7e7ea2f782408c32d86b87a0d2c1fd8871b0399dd762364c731d86c86069a78"},
+    {file = "websockets-14.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:39450e6215f7d9f6f7bc2a6da21d79374729f5d052333da4d5825af8a97e6735"},
+    {file = "websockets-14.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ceada5be22fa5a5a4cdeec74e761c2ee7db287208f54c718f2df4b7e200b8d4a"},
+    {file = "websockets-14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3fc753451d471cff90b8f467a1fc0ae64031cf2d81b7b34e1811b7e2691bc4bc"},
+    {file = "websockets-14.1-cp39-cp39-win32.whl", hash = "sha256:14839f54786987ccd9d03ed7f334baec0f02272e7ec4f6e9d427ff584aeea8b4"},
+    {file = "websockets-14.1-cp39-cp39-win_amd64.whl", hash = "sha256:d9fd19ecc3a4d5ae82ddbfb30962cf6d874ff943e56e0c81f5169be2fda62979"},
+    {file = "websockets-14.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e5dc25a9dbd1a7f61eca4b7cb04e74ae4b963d658f9e4f9aad9cd00b688692c8"},
+    {file = "websockets-14.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:04a97aca96ca2acedf0d1f332c861c5a4486fdcba7bcef35873820f940c4231e"},
+    {file = "websockets-14.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df174ece723b228d3e8734a6f2a6febbd413ddec39b3dc592f5a4aa0aff28098"},
+    {file = "websockets-14.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:034feb9f4286476f273b9a245fb15f02c34d9586a5bc936aff108c3ba1b21beb"},
+    {file = "websockets-14.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:660c308dabd2b380807ab64b62985eaccf923a78ebc572bd485375b9ca2b7dc7"},
+    {file = "websockets-14.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5a42d3ecbb2db5080fc578314439b1d79eef71d323dc661aa616fb492436af5d"},
+    {file = "websockets-14.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ddaa4a390af911da6f680be8be4ff5aaf31c4c834c1a9147bc21cbcbca2d4370"},
+    {file = "websockets-14.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a4c805c6034206143fbabd2d259ec5e757f8b29d0a2f0bf3d2fe5d1f60147a4a"},
+    {file = "websockets-14.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:205f672a6c2c671a86d33f6d47c9b35781a998728d2c7c2a3e1cf3333fcb62b7"},
+    {file = "websockets-14.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef440054124728cc49b01c33469de06755e5a7a4e83ef61934ad95fc327fbb0"},
+    {file = "websockets-14.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7591d6f440af7f73c4bd9404f3772bfee064e639d2b6cc8c94076e71b2471c1"},
+    {file = "websockets-14.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:25225cc79cfebc95ba1d24cd3ab86aaa35bcd315d12fa4358939bd55e9bd74a5"},
+    {file = "websockets-14.1-py3-none-any.whl", hash = "sha256:4d4fc827a20abe6d544a119896f6b78ee13fe81cbfef416f3f2ddf09a03f0e2e"},
+    {file = "websockets-14.1.tar.gz", hash = "sha256:398b10c77d471c0aab20a845e7a60076b6390bfdaac7a6d2edb0d2c59d75e8d8"},
 ]
 
 [[package]]
@@ -2293,6 +2350,7 @@ version = "1.16.0"
 description = "Module for decorators, wrappers and monkey patching."
 optional = false
 python-versions = ">=3.6"
+groups = ["charm-libs"]
 files = [
     {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"},
     {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"},
@@ -2372,6 +2430,7 @@ version = "3.19.2"
 description = "Backport of pathlib-compatible object wrapper for zip files"
 optional = false
 python-versions = ">=3.8"
+groups = ["charm-libs"]
 files = [
     {file = "zipp-3.19.2-py3-none-any.whl", hash = "sha256:f091755f667055f2d02b32c53771a7a6c8b47e1fdbc4b72a8b9072b3eef8015c"},
     {file = "zipp-3.19.2.tar.gz", hash = "sha256:bf1dcf6450f873a13e952a29504887c89e6de7506209e5b1bcc3460135d4de19"},
@@ -2382,6 +2441,6 @@ doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linke
 test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"]
 
 [metadata]
-lock-version = "2.0"
+lock-version = "2.1"
 python-versions = "^3.10"
-content-hash = "4e15707ef6a14da7f861bde89a613729dde0a11cd177e6279b795ed3c468c716"
+content-hash = "ac9febc02ab990dafb783af9ccee29d5f54ea209420a72feb1de6d5d5f4a54fe"
diff --git a/pyproject.toml b/pyproject.toml
index cd2c435f6..1c50358c6 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,29 +3,33 @@
 
 [tool.poetry]
 package-mode = false
+requires-poetry = ">=2.0.0"
 
 [tool.poetry.dependencies]
 python = "^3.10"
 ops = "^2.15.0"
-lightkube = "^0.14.0"
+lightkube = "^0.15.0"
 tenacity = "^8.2.2"
 boto3 = "^1.28.22"
 jinja2 = "^3.1.2"
+pyyaml = "^6.0.2"
 
 [tool.poetry.group.charm-libs.dependencies]
 # data_platform_libs/v0/data_interfaces.py
 ops = ">=2.0.0"
 # data_platform_libs/v0/upgrade.py
-# grafana_agent/v0/cos_agent.py requires pydantic <2
 poetry-core = "*"
-pydantic = "^1.10, <2"
+# data_platform_libs/v0/upgrade.py requires pydantic ^1.10
+# data_platform_libs/v0/data_models.py requires pydantic ^1.10
+# tempo_coordinator_k8s/v0/charm_tracing.py requires pydantic
+pydantic = "^1.10"
 # tls_certificates_interface/v1/tls_certificates.py
 # tls_certificates lib uses a feature only available in cryptography >=42.0.5
 cryptography = ">=42.0.5"
 jsonschema = "*"
 # loki_k8s/v0/loki_push_api.py and prometheus_k8s/v0/prometheus_scrape.py
-cosl = "*"
-# tempo_k8s/v1/charm_tracing.py
+cosl = ">=0.0.50"
+# tempo_coordinator_k8s/v0/charm_tracing.py
 opentelemetry-exporter-otlp-proto-http = "1.21.0"
 
 [tool.poetry.group.format]
@@ -50,22 +54,18 @@ parameterized = "^0.9.0"
 
 [tool.poetry.group.integration.dependencies]
 pytest = "^7.4.0"
-pytest-github-secrets = {git = "https://github.com/canonical/data-platform-workflows", tag = "v21.0.1", subdirectory = "python/pytest_plugins/github_secrets"}
-pytest-microceph = {git = "https://github.com/canonical/data-platform-workflows", tag = "v21.0.1", subdirectory = "python/pytest_plugins/microceph"}
 pytest-operator = "^0.28.0"
-pytest-operator-cache = {git = "https://github.com/canonical/data-platform-workflows", tag = "v21.0.1", subdirectory = "python/pytest_plugins/pytest_operator_cache"}
-pytest-operator-groups = {git = "https://github.com/canonical/data-platform-workflows", tag = "v21.0.1", subdirectory = "python/pytest_plugins/pytest_operator_groups"}
-juju = "^3.5.2.0"
+juju = "^3.6.0.0"
 ops = "^2.15.0"
 mysql-connector-python = "~8.0.33"
 tenacity = "^8.2.2"
 boto3 = "^1.28.22"
 pyyaml = "^6.0.1"
 urllib3 = "^1.26.16"
-lightkube = "^0.14.0"
+lightkube = "^0.15.0"
 kubernetes = "^27.2.0"
 allure-pytest = "^2.13.2"
-allure-pytest-collection-report = {git = "https://github.com/canonical/data-platform-workflows", tag = "v21.0.1", subdirectory = "python/pytest_plugins/allure_pytest_collection_report"}
+allure-pytest-default-results = "^0.1.2"
 pytest-asyncio = "^0.21.1"
 
 [tool.coverage.run]
@@ -77,7 +77,7 @@ show_missing = true
 [tool.pytest.ini_options]
 minversion = "6.0"
 log_cli_level = "INFO"
-markers = ["unstable", "juju3", "only_with_juju_secrets", "only_without_juju_secrets"]
+markers = ["juju3", "only_with_juju_secrets", "only_without_juju_secrets"]
 asyncio_mode = "auto"
 
 # Linting tools configuration
diff --git a/spread.yaml b/spread.yaml
new file mode 100644
index 000000000..9cde86db4
--- /dev/null
+++ b/spread.yaml
@@ -0,0 +1,134 @@
+project: mysql-k8s-operator
+
+backends:
+  # Derived from https://github.com/jnsgruk/zinc-k8s-operator/blob/a21eae8399eb3b9df4ddb934b837af25ef831976/spread.yaml#L11
+  lxd-vm:
+    # TODO: remove after https://github.com/canonical/spread/pull/185 merged & in charmcraft
+    type: adhoc
+    allocate: |
+      hash=$(python3 -c "import hashlib; print(hashlib.sha256('$SPREAD_PASSWORD'.encode()).hexdigest()[:6])")
+      VM_NAME="${VM_NAME:-${SPREAD_SYSTEM//./-}-${hash}}"
+      DISK="${DISK:-20}"
+      CPU="${CPU:-4}"
+      MEM="${MEM:-8}"
+
+      cloud_config="#cloud-config
+      ssh_pwauth: true
+      users:
+        - default
+        - name: runner
+          plain_text_passwd: $SPREAD_PASSWORD
+          lock_passwd: false
+          sudo: ALL=(ALL) NOPASSWD:ALL
+      "
+
+      lxc launch --vm \
+        "${SPREAD_SYSTEM//-/:}" \
+        "${VM_NAME}" \
+        -c user.user-data="${cloud_config}" \
+        -c limits.cpu="${CPU}" \
+        -c limits.memory="${MEM}GiB" \
+        -d root,size="${DISK}GiB"
+
+      # Wait for the runner user
+      while ! lxc exec "${VM_NAME}" -- id -u runner &>/dev/null; do sleep 0.5; done
+
+      # Set the instance address for spread
+      ADDRESS "$(lxc ls -f csv | grep "${VM_NAME}" | cut -d"," -f3 | cut -d" " -f1)"
+    discard: |
+      hash=$(python3 -c "import hashlib; print(hashlib.sha256('$SPREAD_PASSWORD'.encode()).hexdigest()[:6])")
+      VM_NAME="${VM_NAME:-${SPREAD_SYSTEM//./-}-${hash}}"
+      lxc delete --force "${VM_NAME}"
+    environment:
+      CONCIERGE_EXTRA_SNAPS: charmcraft
+      CONCIERGE_EXTRA_DEBS: pipx
+    systems:
+      - ubuntu-24.04:
+          username: runner
+    prepare: |
+      systemctl disable --now unattended-upgrades.service
+      systemctl mask unattended-upgrades.service
+      pipx install charmcraftcache
+      cd "$SPREAD_PATH"
+      charmcraftcache pack -v
+    restore-each: |
+      cd "$SPREAD_PATH"
+      # Revert python-libjuju version override
+      git restore pyproject.toml poetry.lock
+
+      # Use instead of `concierge restore` to save time between tests
+      # For example, with microk8s, using `concierge restore` takes twice as long as this (e.g. 6
+      # min instead of 3 min between every spread job)
+      juju destroy-model --force --no-wait --destroy-storage --no-prompt testing
+      juju kill-controller --no-prompt concierge-microk8s
+    restore: |
+      rm -rf "$SPREAD_PATH"
+
+  github-ci:
+    type: adhoc
+    # Only run on CI
+    manual: true
+    # HACK: spread requires runners to be accessible via SSH
+    # Configure local sshd & instruct spread to connect to the same machine spread is running on
+    # (spread cannot provision GitHub Actions runners, so we provision a GitHub Actions runner for
+    # each spread job & select a single job when running spread)
+    # Derived from https://github.com/jnsgruk/zinc-k8s-operator/blob/a21eae8399eb3b9df4ddb934b837af25ef831976/spread.yaml#L47
+    allocate: |
+      sudo tee /etc/ssh/sshd_config.d/10-spread-github-ci.conf << 'EOF'
+      PasswordAuthentication yes
+      PermitEmptyPasswords yes
+      EOF
+
+      ADDRESS localhost
+    # HACK: spread does not pass environment variables set on runner
+    # Manually pass specific environment variables
+    environment:
+      CI: '$(HOST: echo $CI)'
+      AWS_ACCESS_KEY: '$(HOST: echo $AWS_ACCESS_KEY)'
+      AWS_SECRET_KEY: '$(HOST: echo $AWS_SECRET_KEY)'
+      GCP_ACCESS_KEY: '$(HOST: echo $GCP_ACCESS_KEY)'
+      GCP_SECRET_KEY: '$(HOST: echo $GCP_SECRET_KEY)'
+    systems:
+      - ubuntu-24.04:
+          username: runner
+      - ubuntu-24.04-arm:
+          username: runner
+          variants:
+            - -juju29
+
+suites:
+  tests/spread/:
+    summary: Spread tests
+
+path: /root/spread_project
+
+kill-timeout: 3h
+environment:
+  PATH: $PATH:$(pipx environment --value PIPX_BIN_DIR)
+  CONCIERGE_JUJU_CHANNEL/juju36: 3.6/stable
+  CONCIERGE_JUJU_CHANNEL/juju29: 2.9/stable
+prepare: |
+  snap refresh --hold
+  chown -R root:root "$SPREAD_PATH"
+  cd "$SPREAD_PATH"
+  snap install --classic concierge
+
+  # Install charmcraft & pipx (on lxd-vm backend)
+  concierge prepare --trace
+
+  pipx install tox poetry
+prepare-each: |
+  cd "$SPREAD_PATH"
+  if [[ $SPREAD_VARIANT == *"juju29"* ]]
+  then
+    # Each version of python-libjuju is only compatible with one major Juju version
+    # Override python-libjuju version pinned in poetry.lock
+    poetry add --lock --group integration juju@^2
+  fi
+  # `concierge prepare` needs to be run for each spread job in case Juju version changed
+  concierge prepare --trace
+
+  # Unable to set constraint on all models because of Juju bug:
+  # https://bugs.launchpad.net/juju/+bug/2065050
+  juju set-model-constraints arch="$(dpkg --print-architecture)"
+# Only restore on lxd backend—no need to restore on CI
diff --git a/src/charm.py b/src/charm.py
index 1a0368cef..ddc5d37e1 100755
--- a/src/charm.py
+++ b/src/charm.py
@@ -4,6 +4,14 @@
 
 """Charm for MySQL."""
 
+from charms.mysql.v0.architecture import WrongArchitectureWarningCharm, is_wrong_architecture
+from ops.main import main
+
+from log_rotation_setup import LogRotationSetup
+
+if is_wrong_architecture() and __name__ == "__main__":
+    main(WrongArchitectureWarningCharm)
+
 import logging
 import random
 from socket import getfqdn
@@ -28,22 +36,22 @@
     MySQLConfigureMySQLUsersError,
     MySQLCreateClusterError,
     MySQLGetClusterPrimaryAddressError,
-    MySQLGetMemberStateError,
     MySQLGetMySQLVersionError,
     MySQLInitializeJujuOperationsTableError,
     MySQLLockAcquisitionError,
+    MySQLNoMemberStateError,
     MySQLRebootFromCompleteOutageError,
     MySQLServiceNotRunningError,
     MySQLSetClusterPrimaryError,
+    MySQLUnableToGetMemberStateError,
 )
 from charms.mysql.v0.tls import MySQLTLS
 from charms.prometheus_k8s.v0.prometheus_scrape import MetricsEndpointProvider
 from charms.rolling_ops.v0.rollingops import RollingOpsManager
-from charms.tempo_k8s.v1.charm_tracing import trace_charm
-from charms.tempo_k8s.v2.tracing import TracingEndpointRequirer
-from ops import EventBase, RelationBrokenEvent, RelationCreatedEvent
+from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm
+from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer
+from ops import EventBase, ModelError, RelationBrokenEvent, RelationCreatedEvent
 from ops.charm import RelationChangedEvent, UpdateStatusEvent
-from ops.main import main
 from ops.model import (
     ActiveStatus,
     BlockedStatus,
@@ -52,7 +60,8 @@
     Unit,
     WaitingStatus,
 )
-from ops.pebble import Layer
+from ops.pebble import ChangeError, Layer
+from tenacity import RetryError, Retrying, stop_after_attempt, wait_fixed
 
 from config import CharmConfig, MySQLConfig
 from constants import (
@@ -71,7 +80,8 @@
     MYSQLD_CONFIG_FILE,
     MYSQLD_EXPORTER_PORT,
     MYSQLD_EXPORTER_SERVICE,
-    MYSQLD_SAFE_SERVICE,
+    MYSQLD_LOCATION,
+    MYSQLD_SERVICE,
     PASSWORD_LENGTH,
     PEER,
     ROOT_PASSWORD_KEY,
@@ -178,6 +188,7 @@ def __init__(self, *args):
         self.log_rotate_manager = LogRotateManager(self)
         self.log_rotate_manager.start_log_rotate_manager()
 
+        self.log_rotate_setup = LogRotationSetup(self)
         self.rotate_mysql_logs = RotateMySQLLogs(self)
         self.replication_offer = MySQLAsyncReplicationOffer(self)
         self.replication_consumer = MySQLAsyncReplicationConsumer(self)
@@ -216,18 +227,30 @@ def _mysql(self) -> MySQL:
     @property
     def _pebble_layer(self) -> Layer:
         """Return a layer for the mysqld pebble service."""
+        mysqld_cmd = [
+            MYSQLD_LOCATION,
+            "--basedir=/usr",
+            "--datadir=/var/lib/mysql",
+            "--plugin-dir=/usr/lib/mysql/plugin",
+            "--log-error=/var/log/mysql/error.log",
+            f"--pid-file={self.unit_label}.pid",
+        ]
+
         layer = {
             "summary": "mysqld services layer",
             "description": "pebble config layer for mysqld safe and exporter",
             "services": {
-                MYSQLD_SAFE_SERVICE: {
+                MYSQLD_SERVICE: {
                     "override": "replace",
                     "summary": "mysqld safe",
-                    "command": MYSQLD_SAFE_SERVICE,
+                    "command": " ".join(mysqld_cmd),
                     "startup": "enabled",
                     "user": MYSQL_SYSTEM_USER,
                     "group": MYSQL_SYSTEM_GROUP,
                     "kill-delay": "24h",
+                    "environment": {
+                        "MYSQLD_PARENT_PID": 1,
+                    },
                 },
                 MYSQLD_EXPORTER_SERVICE: {
                     "override": "replace",
@@ -255,6 +278,42 @@ def unit_address(self) -> str:
         """Return the address of this unit."""
         return self.get_unit_address()
 
+    @property
+    def is_new_unit(self) -> bool:
+        """Return whether the unit is a clean state.
+
+        e.g. scaling from zero units
+        """
+        _default_unit_data_keys = {
+            "egress-subnets",
+            "ingress-address",
+            "private-address",
+        }
+        return self.unit_peer_data.keys() == _default_unit_data_keys
+
+    @property
+    def text_logs(self) -> list:
+        """Enabled text logs."""
+        # slow logs isn't enabled by default
+        text_logs = ["error"]
+
+        if self.config.plugin_audit_enabled:
+            text_logs.append("audit")
+
+        return text_logs
+
+    @property
+    def unit_initialized(self) -> bool:
+        """Return whether a unit is started.
+
+        Override parent class method to include container accessibility check.
+        """
+        container = self.unit.get_container(CONTAINER_NAME)
+        if container.can_connect():
+            return super().unit_initialized
+        else:
+            return False
+
     def get_unit_hostname(self, unit_name: Optional[str] = None) -> str:
         """Get the hostname.localdomain for a unit.
 
@@ -328,7 +387,7 @@ def join_unit_to_cluster(self) -> None:
                 cluster_primary = self._get_primary_from_online_peer()
                 if not cluster_primary:
                     self.unit.status = WaitingStatus("waiting to get cluster primary from peers")
-                    logger.debug("waiting: unable to retrieve the cluster primary from peers")
+                    logger.info("waiting: unable to retrieve the cluster primary from peers")
                     return
 
                 if (
@@ -357,7 +416,7 @@ def join_unit_to_cluster(self) -> None:
 
                 if self._mysql.are_locks_acquired(from_instance=lock_instance or cluster_primary):
                     self.unit.status = WaitingStatus("waiting to join the cluster")
-                    logger.debug("waiting: cluster lock is held")
+                    logger.info("waiting: cluster lock is held")
                     return
 
                 self.unit.status = MaintenanceStatus("joining the cluster")
@@ -386,18 +445,17 @@ def join_unit_to_cluster(self) -> None:
                     from_instance=cluster_primary,
                     lock_instance=lock_instance,
                 )
-                logger.debug(f"Added instance {instance_address} to cluster")
             except MySQLAddInstanceToClusterError:
-                logger.debug(f"Unable to add instance {instance_address} to cluster.")
+                logger.info(f"Unable to add instance {instance_address} to cluster.")
                 return
             except MySQLLockAcquisitionError:
                 self.unit.status = WaitingStatus("waiting to join the cluster")
-                logger.debug("waiting: failed to acquire lock when adding instance to cluster")
+                logger.info("waiting: failed to acquire lock when adding instance to cluster")
                 return
 
         self.unit_peer_data["member-state"] = "online"
         self.unit.status = ActiveStatus(self.active_status_message)
-        logger.debug(f"Instance {instance_label} is cluster member")
+        logger.info(f"Instance {instance_label} added to cluster")
 
     def _reconcile_pebble_layer(self, container: Container) -> None:
         """Reconcile pebble layer."""
@@ -407,7 +465,7 @@ def _reconcile_pebble_layer(self, container: Container) -> None:
         if new_layer.services != current_layer.services:
             logger.info("Reconciling the pebble layer")
 
-            container.add_layer(MYSQLD_SAFE_SERVICE, new_layer, combine=True)
+            container.add_layer(MYSQLD_SERVICE, new_layer, combine=True)
             container.replan()
             self._mysql.wait_until_mysql_connection()
 
@@ -419,36 +477,55 @@ def _reconcile_pebble_layer(self, container: Container) -> None:
             ):
                 container.stop(MYSQLD_EXPORTER_SERVICE)
 
-            self._on_update_status(None)
-
-    def _restart(self, event: EventBase) -> None:
+    def recover_unit_after_restart(self) -> None:
+        """Wait for unit recovery/rejoin after restart."""
+        recovery_attempts = 30
+        logger.info("Recovering unit")
+        if self.app.planned_units() == 1:
+            self._mysql.reboot_from_complete_outage()
+        else:
+            try:
+                for attempt in Retrying(
+                    stop=stop_after_attempt(recovery_attempts), wait=wait_fixed(15)
+                ):
+                    with attempt:
+                        self._mysql.hold_if_recovering()
+                        if not self._mysql.is_instance_in_cluster(self.unit_label):
+                            logger.debug(
+                                "Instance not yet back in the cluster."
+                                f" Retry {attempt.retry_state.attempt_number}/{recovery_attempts}"
+                            )
+                            raise Exception
+            except RetryError:
+                raise
+
+    def _restart(self, _: EventBase) -> None:
         """Restart the service."""
-        if self.peers.units != self.restart_peers.units:
-            # defer restart until all units are in the relation
-            logger.debug("Deferring restart until all units are in the relation")
-            event.defer()
+        container = self.unit.get_container(CONTAINER_NAME)
+        if not container.can_connect():
             return
-        if self.peers.units and self._mysql.is_unit_primary(self.unit_label):
-            # delay primary on multi units
-            restart_states = {
-                self.restart_peers.data[unit].get("state", "unset") for unit in self.peers.units
-            }
-            if restart_states == {"unset"}:
-                logger.info("Restarting primary")
-            elif restart_states != {"release"}:
-                # Wait other units restart first to minimize primary switchover
-                message = "Primary restart deferred after other units"
-                logger.info(message)
-                self.unit.status = WaitingStatus(message)
-                event.defer()
-                return
+
+        if not self.unit_initialized:
+            logger.debug("Restarting standalone mysqld")
+            container.restart(MYSQLD_SERVICE)
+            return
+
+        if self.app.planned_units() > 1 and self._mysql.is_unit_primary(self.unit_label):
+            try:
+                new_primary = self.get_unit_address(self.peers.units.pop())
+                logger.debug(f"Switching primary to {new_primary}")
+                self._mysql.set_cluster_primary(new_primary)
+            except MySQLSetClusterPrimaryError:
+                logger.warning("Changing primary failed")
+
+        logger.debug("Restarting mysqld")
         self.unit.status = MaintenanceStatus("restarting MySQL")
-        container = self.unit.get_container(CONTAINER_NAME)
-        if container.can_connect():
-            logger.debug("Restarting mysqld")
-            container.pebble.restart_services([MYSQLD_SAFE_SERVICE], timeout=3600)
-            sleep(10)
-            self._on_update_status(None)
+        container.pebble.restart_services([MYSQLD_SERVICE], timeout=3600)
+        self.unit.status = MaintenanceStatus("recovering unit after restart")
+        sleep(10)
+        self.recover_unit_after_restart()
+
+        self._on_update_status(None)
 
     # =========================================================================
     # Charm event handlers
@@ -458,6 +535,10 @@ def _reconcile_mysqld_exporter(
         self, event: RelationCreatedEvent | RelationBrokenEvent
     ) -> None:
         """Handle a COS relation created or broken event."""
+        if not self._is_peer_data_set:
+            logger.debug("Unit not yet ready to reconcile mysqld exporter. Waiting...")
+            return
+
         container = self.unit.get_container(CONTAINER_NAME)
         if not container.can_connect():
             # reconciliation is done on pebble ready
@@ -504,34 +585,24 @@ def _on_config_changed(self, _: EventBase) -> None:  # noqa: C901
 
         previous_config_dict = self.mysql_config.custom_config(config_content)
 
-        # render the new config
-        memory_limit_bytes = (self.config.profile_limit_memory or 0) * BYTES_1MB
-        new_config_content, new_config_dict = self._mysql.render_mysqld_configuration(
-            profile=self.config.profile,
-            audit_log_enabled=self.config.plugin_audit_enabled,
-            audit_log_strategy=self.config.plugin_audit_strategy,
-            memory_limit=memory_limit_bytes,
-            experimental_max_connections=self.config.experimental_max_connections,
-            binlog_retention_days=self.config.binlog_retention_days,
-        )
+        # always setup log rotation
+        self.log_rotate_setup.setup()
 
+        logger.info("Persisting configuration changes to file")
+        new_config_dict = self._write_mysqld_configuration()
         changed_config = compare_dictionaries(previous_config_dict, new_config_dict)
 
         if self.mysql_config.keys_requires_restart(changed_config):
             # there are static configurations in changed keys
-            logger.info("Persisting configuration changes to file")
-
-            # persist config to file
-            self._mysql.write_content_to_file(path=MYSQLD_CONFIG_FILE, content=new_config_content)
 
             if self._mysql.is_mysqld_running():
                 logger.info("Configuration change requires restart")
                 if "loose-audit_log_format" in changed_config:
                     # plugins are manipulated running daemon
                     if self.config.plugin_audit_enabled:
-                        self._mysql.install_plugins(["audit_log", "audit_log_filter"])
+                        self._mysql.install_plugins(["audit_log"])
                     else:
-                        self._mysql.uninstall_plugins(["audit_log", "audit_log_filter"])
+                        self._mysql.uninstall_plugins(["audit_log"])
                 # restart the service
                 self.on[f"{self.restart.name}"].acquire_lock.emit()
                 return
@@ -540,7 +611,9 @@ def _on_config_changed(self, _: EventBase) -> None:  # noqa: C901
             # if only dynamic config changed, apply it
             logger.info("Configuration does not requires restart")
             for config in dynamic_config:
-                self._mysql.set_dynamic_variable(config, new_config_dict[config])
+                self._mysql.set_dynamic_variable(
+                    config.removeprefix("loose-"), new_config_dict[config]
+                )
 
     def _on_leader_elected(self, _) -> None:
         """Handle the leader elected event.
@@ -556,9 +629,9 @@ def _on_leader_elected(self, _) -> None:
             BACKUPS_PASSWORD_KEY,
         ]
 
+        logger.info("Generating internal user credentials")
         for required_password in required_passwords:
             if not self.get_secret("app", required_password):
-                logger.debug(f"Setting {required_password}")
                 self.set_secret(
                     "app", required_password, generate_random_password(PASSWORD_LENGTH)
                 )
@@ -583,43 +656,48 @@ def _open_ports(self) -> None:
             except ops.ModelError:
                 logger.exception("failed to open port")
 
-    def _write_mysqld_configuration(self):
+    def _write_mysqld_configuration(self) -> dict:
         """Write the mysqld configuration to the file."""
         memory_limit_bytes = (self.config.profile_limit_memory or 0) * BYTES_1MB
-        new_config_content, _ = self._mysql.render_mysqld_configuration(
+        new_config_content, new_config_dict = self._mysql.render_mysqld_configuration(
             profile=self.config.profile,
             audit_log_enabled=self.config.plugin_audit_enabled,
             audit_log_strategy=self.config.plugin_audit_strategy,
+            audit_log_policy=self.config.logs_audit_policy,
             memory_limit=memory_limit_bytes,
             experimental_max_connections=self.config.experimental_max_connections,
             binlog_retention_days=self.config.binlog_retention_days,
         )
         self._mysql.write_content_to_file(path=MYSQLD_CONFIG_FILE, content=new_config_content)
+        return new_config_dict
 
     def _configure_instance(self, container) -> None:
         """Configure the instance for use in Group Replication."""
         # Run mysqld for the first time to
         # bootstrap the data directory and users
-        logger.debug("Initializing instance")
+        logger.info("Initializing mysqld")
         try:
             self._mysql.fix_data_dir(container)
             self._mysql.initialise_mysqld()
 
             # Add the pebble layer
-            logger.debug("Adding pebble layer")
-            container.add_layer(MYSQLD_SAFE_SERVICE, self._pebble_layer, combine=True)
-            container.restart(MYSQLD_SAFE_SERVICE)
+            logger.info("Adding pebble layer")
+            container.add_layer(MYSQLD_SERVICE, self._pebble_layer, combine=True)
+            container.restart(MYSQLD_SERVICE)
 
-            logger.debug("Waiting for instance to be ready")
+            logger.info("Waiting for instance to be ready")
             self._mysql.wait_until_mysql_connection(check_port=False)
 
-            logger.info("Configuring instance")
+            logger.info("Resetting root password and starting mysqld")
+            self._mysql.reset_root_password_and_start_mysqld()
+
+            logger.info("Configuring initialized mysqld")
             # Configure all base users and revoke privileges from the root users
-            self._mysql.configure_mysql_users(password_needed=False)
+            self._mysql.configure_mysql_users()
 
             if self.config.plugin_audit_enabled:
                 # Enable the audit plugin
-                self._mysql.install_plugins(["audit_log", "audit_log_filter"])
+                self._mysql.install_plugins(["audit_log"])
 
             # Configure instance as a cluster node
             self._mysql.configure_instance()
@@ -628,6 +706,9 @@ def _configure_instance(self, container) -> None:
             MySQLServiceNotRunningError,
             MySQLConfigureMySQLUsersError,
             MySQLConfigureInstanceError,
+            ChangeError,
+            TimeoutError,
+            ModelError,
         ):
             # On any error, reset the data directory so hook is retried
             # on empty data directory
@@ -685,14 +766,28 @@ def _on_mysql_pebble_ready(self, event) -> None:
         container = event.workload
         self._write_mysqld_configuration()
 
-        logger.info("Setting up the logrotate configurations")
-        self._mysql.setup_logrotate_config()
+        self.log_rotate_setup.setup()
 
         if self._mysql.is_data_dir_initialised():
             # Data directory is already initialised, skip configuration
             self.unit.status = MaintenanceStatus("Starting mysqld")
-            logger.debug("Data directory is already initialised, skipping configuration")
+            logger.info("Data directory is already initialised, skipping configuration")
             self._reconcile_pebble_layer(container)
+            if self.is_new_unit:
+                # when unit is new and has data, it means the app is scaling out
+                # from zero units
+                logger.info("Scaling out from zero units")
+                if self.unit.is_leader():
+                    # create the cluster due it being dissolved on scale-down
+                    self.create_cluster()
+                    self._on_update_status(None)
+                else:
+                    # Non-leader units try to join cluster
+                    self.unit.status = WaitingStatus("Waiting for instance to join the cluster")
+                    self.unit_peer_data.update({
+                        "member-role": "secondary",
+                        "member-state": "waiting",
+                    })
             return
 
         self.unit.status = MaintenanceStatus("Initialising mysqld")
@@ -700,7 +795,12 @@ def _on_mysql_pebble_ready(self, event) -> None:
         # First run setup
         self._configure_instance(container)
 
-        if not self.unit.is_leader() or self.cluster_initialized:
+        # We consider cluster initialized only if a primary already exists
+        # (as there can be metadata in the database but no primary if pod
+        # crashes while cluster is being created)
+        if not self.unit.is_leader() or (
+            self.cluster_initialized and self._get_primary_from_online_peer()
+        ):
             # Non-leader units try to join cluster
             self.unit.status = WaitingStatus("Waiting for instance to join the cluster")
             self.unit_peer_data.update({"member-role": "secondary", "member-state": "waiting"})
@@ -710,29 +810,36 @@ def _on_mysql_pebble_ready(self, event) -> None:
         try:
             # Create the cluster when is the leader unit
             logger.info(f"Creating cluster {self.app_peer_data['cluster-name']}")
+            self.unit.status = MaintenanceStatus("Creating cluster")
             self.create_cluster()
             self.unit.status = ops.ActiveStatus(self.active_status_message)
 
         except (
             MySQLCreateClusterError,
-            MySQLGetMemberStateError,
+            MySQLUnableToGetMemberStateError,
+            MySQLNoMemberStateError,
             MySQLInitializeJujuOperationsTableError,
             MySQLCreateClusterError,
         ):
             logger.exception("Failed to initialize primary")
             raise
 
-    def _handle_potential_cluster_crash_scenario(self) -> bool:
+    def _handle_potential_cluster_crash_scenario(self) -> bool:  # noqa: C901
         """Handle potential full cluster crash scenarios.
 
         Returns:
             bool indicating whether the caller should return
         """
-        if not self.cluster_initialized or not self.unit_peer_data.get("member-role"):
-            # health checks are only after cluster and members are initialized
+        if not self._mysql.is_mysqld_running():
             return True
 
-        if not self._mysql.is_mysqld_running():
+        only_single_uninitialized_node_across_cluster = (
+            self.only_one_cluster_node_thats_uninitialized
+        )
+
+        if (
+            not self.cluster_initialized and not only_single_uninitialized_node_across_cluster
+        ) or not self.unit_peer_data.get("member-role"):
             return True
 
         # retrieve and persist state for every unit
@@ -740,7 +847,7 @@ def _handle_potential_cluster_crash_scenario(self) -> bool:
             state, role = self._mysql.get_member_state()
             self.unit_peer_data["member-state"] = state
             self.unit_peer_data["member-role"] = role
-        except MySQLGetMemberStateError:
+        except (MySQLNoMemberStateError, MySQLUnableToGetMemberStateError):
             logger.error("Error getting member state. Avoiding potential cluster crash recovery")
             self.unit.status = MaintenanceStatus("Unable to get member state")
             return True
@@ -757,38 +864,91 @@ def _handle_potential_cluster_crash_scenario(self) -> bool:
         if state == "recovering":
             return True
 
-        if state in ["offline"]:
+        if state == "offline":
             # Group Replication is active but the member does not belong to any group
             all_states = {
                 self.peers.data[unit].get("member-state", "unknown") for unit in self.peers.units
             }
-            # Add state for this unit (self.peers.units does not include this unit)
-            all_states.add("offline")
 
-            if all_states == {"offline"} and self.unit.is_leader():
+            # Add state 'offline' for this unit (self.peers.unit does not
+            # include this unit)
+            if (all_states | {"offline"} == {"offline"} and self.unit.is_leader()) or (
+                only_single_uninitialized_node_across_cluster and all_states == {"waiting"}
+            ):
                 # All instance are off, reboot cluster from outage from the leader unit
 
                 logger.info("Attempting reboot from complete outage.")
                 try:
-                    self._mysql.reboot_from_complete_outage()
+                    # Need condition to avoid rebooting on all units of application
+                    if self.unit.is_leader() or only_single_uninitialized_node_across_cluster:
+                        self._mysql.reboot_from_complete_outage()
                 except MySQLRebootFromCompleteOutageError:
                     logger.error("Failed to reboot cluster from complete outage.")
-                    self.unit.status = BlockedStatus("failed to recover cluster.")
+
+                    if only_single_uninitialized_node_across_cluster and all_states == {"waiting"}:
+                        self._mysql.drop_group_replication_metadata_schema()
+                        self.create_cluster()
+                        self.unit.status = ActiveStatus(self.active_status_message)
+                    else:
+                        self.unit.status = BlockedStatus("failed to recover cluster.")
+                finally:
+                    return True
+
+            if self._mysql.is_cluster_auto_rejoin_ongoing():
+                logger.info("Cluster auto-rejoin attempts are still ongoing.")
+            else:
+                logger.info("Cluster auto-rejoin attempts are exhausted. Attempting manual rejoin")
+                self._execute_manual_rejoin()
 
             return True
 
         return False
 
+    def _execute_manual_rejoin(self) -> None:
+        """Executes an instance manual rejoin.
+
+        It is supposed to be called when the MySQL 8.0.21+ auto-rejoin attempts have been exhausted,
+        on an OFFLINE replica that still belongs to the cluster
+        """
+        if not self._mysql.is_instance_in_cluster(self.unit_label):
+            logger.warning("Instance does not belong to the cluster. Cannot perform manual rejoin")
+            return
+
+        cluster_primary = self._get_primary_from_online_peer()
+        if not cluster_primary:
+            logger.warning("Instance does not have ONLINE peers. Cannot perform manual rejoin")
+            return
+
+        self._mysql.remove_instance(
+            unit_label=self.unit_label,
+        )
+        self._mysql.add_instance_to_cluster(
+            instance_address=self.unit_address,
+            instance_unit_label=self.unit_label,
+            from_instance=cluster_primary,
+        )
+
     def _is_cluster_blocked(self) -> bool:
         """Performs cluster state checks for the update-status handler.
 
         Returns: a boolean indicating whether the update-status (caller) should
             no-op and return.
         """
-        unit_member_state = self.unit_peer_data.get("member-state")
-        if unit_member_state in ["waiting", "restarting"]:
+        # We need to query member state from the server since member state would
+        # be 'offline' if pod rescheduled during cluster creation, however
+        # member-state in the unit peer databag will be 'waiting'
+        try:
+            member_state, _ = self._mysql.get_member_state()
+        except MySQLUnableToGetMemberStateError:
+            logger.error("Error getting member state while checking if cluster is blocked")
+            self.unit.status = MaintenanceStatus("Unable to get member state")
+            return True
+        except MySQLNoMemberStateError:
+            member_state = None
+
+        if not member_state or member_state == "restarting":
             # avoid changing status while tls is being set up or charm is being initialized
-            logger.info(f"Unit state is {unit_member_state}")
+            logger.info(f"Unit {member_state=}")
             return True
 
         # avoid changing status while async replication is setting up
@@ -798,20 +958,23 @@ def _on_update_status(self, _: Optional[UpdateStatusEvent]) -> None:
         """Handle the update status event."""
         if not self.upgrade.idle:
             # avoid changing status while upgrade is in progress
-            logger.debug("Application is upgrading. Skipping.")
+            logger.info("Application is upgrading. Skipping.")
             return
         if not self.unit.is_leader() and self._is_unit_waiting_to_join_cluster():
             # join cluster test takes precedence over blocked test
             # due to matching criteria
+            logger.info("Attempting to join cluster")
             self.join_unit_to_cluster()
             return
 
         if self._is_cluster_blocked():
+            logger.info("Cluster is blocked. Skipping.")
             return
         del self.restart_peers.data[self.unit]["state"]
 
         container = self.unit.get_container(CONTAINER_NAME)
         if not container.can_connect():
+            logger.info("Cannot connect to pebble in the mysql container")
             return
 
         if self._handle_potential_cluster_crash_scenario():
@@ -883,6 +1046,11 @@ def _on_database_storage_detaching(self, _) -> None:
         # Inform other hooks of current status
         self.unit_peer_data["unit-status"] = "removing"
 
+        if self.unit.is_leader():
+            # Update 'units-added-to-cluster' counter in the peer relation databag
+            units = int(self.app_peer_data.get("units-added-to-cluster", 1))
+            self.app_peer_data["units-added-to-cluster"] = str(units - 1)
+
 
 if __name__ == "__main__":
     main(MySQLOperatorCharm)
diff --git a/src/config.py b/src/config.py
index a075bff4d..d110076ec 100644
--- a/src/config.py
+++ b/src/config.py
@@ -62,6 +62,8 @@ class CharmConfig(BaseConfigModel):
     binlog_retention_days: int
     plugin_audit_enabled: bool
     plugin_audit_strategy: str
+    logs_audit_policy: str
+    logs_retention_period: str
 
     @validator("profile")
     @classmethod
@@ -132,7 +134,8 @@ def experimental_max_connections_validator(cls, value: int) -> Optional[int]:
         """Check experimental max connections."""
         if value < MAX_CONNECTIONS_FLOOR:
             raise ValueError(
-                f"experimental-max-connections must be greater than {MAX_CONNECTIONS_FLOOR}"
+                f"experimental-max-connections ({value=}) must be equal or greater "
+                + f" than {MAX_CONNECTIONS_FLOOR}"
             )
 
         return value
@@ -151,6 +154,25 @@ def binlog_retention_days_validator(cls, value: int) -> int:
     def plugin_audit_strategy_validator(cls, value: str) -> Optional[str]:
         """Check profile config option is one of `testing` or `production`."""
         if value not in ["async", "semi-async"]:
-            raise ValueError("Value not one of 'async' or 'semi-async'")
+            raise ValueError("plugin_audit_strategy not one of 'async' or 'semi-async'")
+
+        return value
+
+    @validator("logs_audit_policy")
+    @classmethod
+    def logs_audit_policy_validator(cls, value: str) -> Optional[str]:
+        """Check values for audit log policy."""
+        valid_values = ["all", "logins", "queries"]
+        if value not in valid_values:
+            raise ValueError(f"logs_audit_policy not one of {', '.join(valid_values)}")
+
+        return value
+
+    @validator("logs_retention_period")
+    @classmethod
+    def logs_retention_period_validator(cls, value: str) -> str:
+        """Check logs retention period."""
+        if not re.match(r"auto|\d{1,3}", value) or value == "0":
+            raise ValueError("logs_retention_period must be integer greater than 0 or `auto`")
 
         return value
diff --git a/src/constants.py b/src/constants.py
index ffa9100f7..4185b6ba8 100644
--- a/src/constants.py
+++ b/src/constants.py
@@ -6,8 +6,8 @@
 PASSWORD_LENGTH = 24
 PEER = "database-peers"
 CONTAINER_NAME = "mysql"
-MYSQLD_LOCATION = "mysqld"
-MYSQLD_SAFE_SERVICE = "mysqld_safe"
+MYSQLD_SERVICE = "mysqld"
+MYSQLD_LOCATION = f"/usr/sbin/{MYSQLD_SERVICE}"
 ROOT_USERNAME = "root"
 CLUSTER_ADMIN_USERNAME = "clusteradmin"
 SERVER_CONFIG_USERNAME = "serverconfig"
@@ -33,10 +33,12 @@
 MYSQLD_SOCK_FILE = "/var/run/mysqld/mysqld.sock"
 MYSQLSH_SCRIPT_FILE = "/tmp/script.py"
 MYSQLD_CONFIG_FILE = "/etc/mysql/mysql.conf.d/z-custom.cnf"
+MYSQLD_INIT_CONFIG_FILE = "/etc/mysql/mysql.conf.d/z-custom-init-file.cnf"
+MYSQL_LOG_DIR = "/var/log/mysql"
 MYSQL_LOG_FILES = [
-    "/var/log/mysql/error.log",
-    "/var/log/mysql/audit.log",
-    "/var/log/mysql/general.log",
+    f"{MYSQL_LOG_DIR}/error.log",
+    f"{MYSQL_LOG_DIR}/audit.log",
+    f"{MYSQL_LOG_DIR}/general.log",
 ]
 MYSQL_SYSTEM_USER = "mysql"
 MYSQL_SYSTEM_GROUP = "mysql"
@@ -50,6 +52,7 @@
 GR_MAX_MEMBERS = 9
 # TODO: should be changed when adopting cos-agent
 COS_AGENT_RELATION_NAME = "metrics-endpoint"
+COS_LOGGING_RELATION_NAME = "logging"
 LOG_ROTATE_CONFIG_FILE = "/etc/logrotate.d/flush_mysql_logs"
 ROOT_SYSTEM_USER = "root"
 SECRET_KEY_FALLBACKS = {
diff --git a/src/dependency.json b/src/dependency.json
index 8299a5aa8..350d90028 100644
--- a/src/dependency.json
+++ b/src/dependency.json
@@ -9,6 +9,6 @@
     "dependencies": {},
     "name": "charmed-mysql",
     "upgrade_supported": ">8.0.31",
-    "version": "8.0.37"
+    "version": "8.0.41"
   }
 }
diff --git a/src/log_rotation_setup.py b/src/log_rotation_setup.py
new file mode 100644
index 000000000..89ba7a26a
--- /dev/null
+++ b/src/log_rotation_setup.py
@@ -0,0 +1,111 @@
+# Copyright 2025 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+"""Handler for log rotation setup in relation to COS."""
+
+import logging
+import typing
+
+import yaml
+from ops.framework import Object
+
+from constants import CONTAINER_NAME, COS_LOGGING_RELATION_NAME
+
+if typing.TYPE_CHECKING:
+    from charm import MySQLOperatorCharm
+
+logger = logging.getLogger(__name__)
+
+_POSITIONS_FILE = "/opt/promtail/positions.yaml"
+_LOGS_SYNCED = "logs_synced"
+
+
+class LogRotationSetup(Object):
+    """Configure logrotation settings in relation to COS integration."""
+
+    def __init__(self, charm: "MySQLOperatorCharm"):
+        super().__init__(charm, "log-rotation-setup")
+
+        self.charm = charm
+
+        self.framework.observe(self.charm.on.update_status, self._update_logs_rotation)
+        self.framework.observe(
+            self.charm.on[COS_LOGGING_RELATION_NAME].relation_created, self._cos_relation_created
+        )
+        self.framework.observe(
+            self.charm.on[COS_LOGGING_RELATION_NAME].relation_broken, self._cos_relation_broken
+        )
+
+    @property
+    def _logs_are_syncing(self):
+        return self.charm.unit_peer_data.get(_LOGS_SYNCED) == "true"
+
+    def setup(self):
+        """Setup log rotation."""
+        # retention setting
+        if self.charm.config.logs_retention_period == "auto":
+            retention_period = 1 if self._logs_are_syncing else 3
+        else:
+            retention_period = int(self.charm.config.logs_retention_period)
+
+        # compression setting
+        compress = self._logs_are_syncing or not self.charm.has_cos_relation
+
+        self.charm._mysql.setup_logrotate_config(retention_period, self.charm.text_logs, compress)
+
+    def _update_logs_rotation(self, _):
+        """Check for log rotation auto configuration handler.
+
+        Reconfigure log rotation if promtail/gagent start sync.
+        """
+        if not self.model.get_relation(COS_LOGGING_RELATION_NAME):
+            return
+
+        container = self.charm.unit.get_container(CONTAINER_NAME)
+        if not container.can_connect():
+            return
+
+        if self._logs_are_syncing:
+            # reconfiguration done
+            return
+
+        not_started_msg = "Log syncing not yet started."
+        if not container.exists(_POSITIONS_FILE):
+            logger.debug(not_started_msg)
+            return
+
+        positions_file = container.pull(_POSITIONS_FILE, encoding="utf-8")
+        positions = yaml.safe_load(positions_file.read())
+
+        if sync_files := positions.get("positions"):
+            for log_file, line in sync_files.items():
+                if "mysql" in log_file and int(line) > 0:
+                    break
+            else:
+                logger.debug(not_started_msg)
+                return
+        else:
+            logger.debug(not_started_msg)
+            return
+
+        logger.info("Reconfigure log rotation after logs upload started")
+        self.charm.unit_peer_data[_LOGS_SYNCED] = "true"
+        self.setup()
+
+    def _cos_relation_created(self, _):
+        """Handle relation created."""
+        container = self.charm.unit.get_container(CONTAINER_NAME)
+        if not container.can_connect():
+            return
+        logger.info("Reconfigure log rotation on cos relation created")
+        self.setup()
+
+    def _cos_relation_broken(self, _):
+        """Unset auto value for log retention."""
+        container = self.charm.unit.get_container(CONTAINER_NAME)
+        if not container.can_connect():
+            return
+        logger.info("Reconfigure log rotation after logs upload stops")
+
+        del self.charm.unit_peer_data["logs_synced"]
+        self.setup()
diff --git a/src/mysql_k8s_helpers.py b/src/mysql_k8s_helpers.py
index bcf054f18..b3d505bbe 100644
--- a/src/mysql_k8s_helpers.py
+++ b/src/mysql_k8s_helpers.py
@@ -6,7 +6,7 @@
 
 import json
 import logging
-from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
+from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union
 
 import jinja2
 from charms.mysql.v0.mysql import (
@@ -37,14 +37,15 @@
     LOG_ROTATE_CONFIG_FILE,
     MYSQL_CLI_LOCATION,
     MYSQL_DATA_DIR,
+    MYSQL_LOG_DIR,
     MYSQL_SYSTEM_GROUP,
     MYSQL_SYSTEM_USER,
     MYSQLD_DEFAULTS_CONFIG_FILE,
+    MYSQLD_INIT_CONFIG_FILE,
     MYSQLD_LOCATION,
-    MYSQLD_SAFE_SERVICE,
+    MYSQLD_SERVICE,
     MYSQLD_SOCK_FILE,
     MYSQLSH_LOCATION,
-    MYSQLSH_SCRIPT_FILE,
     ROOT_SYSTEM_USER,
     XTRABACKUP_PLUGIN_DIR,
 )
@@ -57,6 +58,10 @@
     from charm import MySQLOperatorCharm
 
 
+class MySQLResetRootPasswordAndStartMySQLDError(Error):
+    """Exception raised when there's an error resetting root password and starting mysqld."""
+
+
 class MySQLInitialiseMySQLDError(Error):
     """Exception raised when there is an issue initialising an instance."""
 
@@ -187,12 +192,13 @@ def fix_data_dir(self, container: Container) -> None:
         if paths[0].user != MYSQL_SYSTEM_USER or paths[0].group != MYSQL_SYSTEM_GROUP:
             logger.debug(f"Changing ownership to {MYSQL_SYSTEM_USER}:{MYSQL_SYSTEM_GROUP}")
             try:
-                container.exec([
+                process = container.exec([
                     "chown",
                     "-R",
                     f"{MYSQL_SYSTEM_USER}:{MYSQL_SYSTEM_GROUP}",
                     MYSQL_DATA_DIR,
                 ])
+                process.wait()
             except ExecError as e:
                 logger.error(f"Exited with code {e.exit_code}. Stderr:\n{e.stderr}")
                 raise MySQLInitialiseMySQLDError(e.stderr or "")
@@ -204,7 +210,12 @@ def initialise_mysqld(self) -> None:
         Initialise mysql data directory and create blank password root@localhost user.
         Raises MySQLInitialiseMySQLDError if the instance bootstrap fails.
         """
-        bootstrap_command = [MYSQLD_LOCATION, "--initialize-insecure", "-u", MYSQL_SYSTEM_USER]
+        bootstrap_command = [
+            MYSQLD_LOCATION,
+            "--initialize",
+            "-u",
+            MYSQL_SYSTEM_USER,
+        ]
 
         try:
             process = self.container.exec(
@@ -218,6 +229,48 @@ def initialise_mysqld(self) -> None:
             self.reset_data_dir()
             raise MySQLInitialiseMySQLDError
 
+    def reset_root_password_and_start_mysqld(self) -> None:
+        """Reset the root user password and start mysqld."""
+        logger.debug("Resetting root user password and starting mysqld")
+        alter_user_queries = [
+            f"ALTER USER 'root'@'localhost' IDENTIFIED BY '{self.root_password}';",
+            "FLUSH PRIVILEGES;",
+        ]
+
+        self.container.push(
+            "/alter-root-user.sql",
+            "\n".join(alter_user_queries),
+            encoding="utf-8",
+            permissions=0o600,
+            user=MYSQL_SYSTEM_USER,
+            group=MYSQL_SYSTEM_GROUP,
+        )
+
+        try:
+            self.container.push(
+                MYSQLD_INIT_CONFIG_FILE,
+                "[mysqld]\ninit_file = /alter-root-user.sql",
+                encoding="utf-8",
+                permissions=0o600,
+                user=MYSQL_SYSTEM_USER,
+                group=MYSQL_SYSTEM_GROUP,
+            )
+        except PathError:
+            self.container.remove_path("/alter-root-user.sql")
+
+            logger.exception("Failed to write the custom config file for init-file")
+            raise
+
+        try:
+            self.container.restart(MYSQLD_SERVICE)
+            self.wait_until_mysql_connection(check_port=False)
+        except (TypeError, MySQLServiceNotRunningError):
+            logger.exception("Failed to run init-file and wait for connection")
+            raise
+        finally:
+            self.container.remove_path("/alter-root-user.sql")
+            self.container.remove_path(MYSQLD_INIT_CONFIG_FILE)
+
     @retry(reraise=True, stop=stop_after_delay(120), wait=wait_fixed(2))
     def wait_until_mysql_connection(self, check_port: bool = True) -> None:
         """Wait until a connection to MySQL daemon is possible.
@@ -235,16 +288,29 @@ def wait_until_mysql_connection(self, check_port: bool = True) -> None:
 
         logger.debug("MySQL connection possible")
 
-    def setup_logrotate_config(self) -> None:
+    def setup_logrotate_config(
+        self,
+        logs_retention_period: int,
+        enabled_log_files: Iterable,
+        logs_compression: bool,
+    ) -> None:
         """Set up logrotate config in the workload container."""
         logger.debug("Creating the logrotate config file")
 
+        # days * minutes/day = amount of rotated files to keep
+        logs_rotations = logs_retention_period * 1440
+
         with open("templates/logrotate.j2", "r") as file:
             template = jinja2.Template(file.read())
 
         rendered = template.render(
             system_user=MYSQL_SYSTEM_USER,
             system_group=MYSQL_SYSTEM_GROUP,
+            log_dir=MYSQL_LOG_DIR,
+            logs_retention_period=logs_retention_period,
+            logs_rotations=logs_rotations,
+            logs_compression_enabled=logs_compression,
+            enabled_log_files=enabled_log_files,
         )
 
         logger.debug("Writing the logrotate config file to the workload container")
@@ -257,24 +323,35 @@ def setup_logrotate_config(self) -> None:
 
     def execute_backup_commands(
         self,
-        s3_directory: str,
+        s3_path: str,
         s3_parameters: Dict[str, str],
+        xtrabackup_location: str = CHARMED_MYSQL_XTRABACKUP_LOCATION,
+        xbcloud_location: str = CHARMED_MYSQL_XBCLOUD_LOCATION,
+        xtrabackup_plugin_dir: str = XTRABACKUP_PLUGIN_DIR,
+        mysqld_socket_file: str = MYSQLD_SOCK_FILE,
+        tmp_base_directory: str = MYSQL_DATA_DIR,
+        defaults_config_file: str = MYSQLD_DEFAULTS_CONFIG_FILE,
+        user: Optional[str] = MYSQL_SYSTEM_USER,
+        group: Optional[str] = MYSQL_SYSTEM_GROUP,
     ) -> Tuple[str, str]:
         """Executes commands to create a backup."""
         return super().execute_backup_commands(
-            s3_directory,
+            s3_path,
             s3_parameters,
-            CHARMED_MYSQL_XTRABACKUP_LOCATION,
-            CHARMED_MYSQL_XBCLOUD_LOCATION,
-            XTRABACKUP_PLUGIN_DIR,
-            MYSQLD_SOCK_FILE,
-            MYSQL_DATA_DIR,
-            MYSQLD_DEFAULTS_CONFIG_FILE,
-            user=MYSQL_SYSTEM_USER,
-            group=MYSQL_SYSTEM_GROUP,
+            xtrabackup_location,
+            xbcloud_location,
+            xtrabackup_plugin_dir,
+            mysqld_socket_file,
+            tmp_base_directory,
+            defaults_config_file,
+            user,
+            group,
         )
 
-    def delete_temp_backup_directory(self, from_directory: str = MYSQL_DATA_DIR) -> None:
+    def delete_temp_backup_directory(
+        self,
+        from_directory: str = MYSQL_DATA_DIR,
+    ) -> None:
         """Delete the temp backup directory in the data directory."""
         super().delete_temp_backup_directory(
             from_directory,
@@ -286,6 +363,11 @@ def retrieve_backup_with_xbcloud(
         self,
         backup_id: str,
         s3_parameters: Dict[str, str],
+        temp_restore_directory: str = MYSQL_DATA_DIR,
+        xbcloud_location: str = CHARMED_MYSQL_XBCLOUD_LOCATION,
+        xbstream_location: str = CHARMED_MYSQL_XBSTREAM_LOCATION,
+        user: str = MYSQL_SYSTEM_USER,
+        group: str = MYSQL_SYSTEM_GROUP,
     ) -> Tuple[str, str, str]:
         """Retrieve the specified backup from S3.
 
@@ -295,11 +377,11 @@ def retrieve_backup_with_xbcloud(
         return super().retrieve_backup_with_xbcloud(
             backup_id,
             s3_parameters,
-            MYSQL_DATA_DIR,
-            CHARMED_MYSQL_XBCLOUD_LOCATION,
-            CHARMED_MYSQL_XBSTREAM_LOCATION,
-            user=MYSQL_SYSTEM_USER,
-            group=MYSQL_SYSTEM_GROUP,
+            temp_restore_directory,
+            xbcloud_location,
+            xbstream_location,
+            user,
+            group,
         )
 
     def prepare_backup_for_restore(self, backup_location: str) -> Tuple[str, str]:
@@ -377,14 +459,16 @@ def create_database(self, database_name: str) -> None:
         """
         try:
             create_database_commands = (
-                (
-                    f"shell.connect_to_primary('{self.server_config_user}:"
-                    f"{self.server_config_password}@{self.instance_address}')"
-                ),
+                "shell.connect_to_primary()",
                 f'session.run_sql("CREATE DATABASE IF NOT EXISTS `{database_name}`;")',
             )
 
-            self._run_mysqlsh_script("\n".join(create_database_commands))
+            self._run_mysqlsh_script(
+                "\n".join(create_database_commands),
+                user=self.server_config_user,
+                host=self.instance_address,
+                password=self.server_config_password,
+            )
         except MySQLClientError as e:
             logger.exception(f"Failed to create database {database_name}", exc_info=e)
             raise MySQLCreateDatabaseError(e.message)
@@ -404,19 +488,21 @@ def create_user(self, username: str, password: str, label: str, hostname: str =
         try:
             escaped_user_attributes = json.dumps({"label": label}).replace('"', r"\"")
             create_user_commands = (
-                (
-                    f"shell.connect_to_primary('{self.server_config_user}:"
-                    f"{self.server_config_password}@{self.instance_address}')"
-                ),
+                "shell.connect_to_primary()",
                 (
                     f'session.run_sql("CREATE USER `{username}`@`{hostname}` IDENTIFIED'
                     f" BY '{password}' ATTRIBUTE '{escaped_user_attributes}';\")"
                 ),
             )
 
-            self._run_mysqlsh_script("\n".join(create_user_commands))
+            self._run_mysqlsh_script(
+                "\n".join(create_user_commands),
+                user=self.server_config_user,
+                host=self.instance_address,
+                password=self.server_config_password,
+            )
         except MySQLClientError as e:
-            logger.exception(f"Failed to create user {username}@{hostname}", exc_info=e)
+            logger.exception(f"Failed to create user {username}@{hostname}")
             raise MySQLCreateUserError(e.message)
 
     def escalate_user_privileges(self, username: str, hostname: str = "%") -> None:
@@ -444,19 +530,22 @@ def escalate_user_privileges(self, username: str, hostname: str = "%") -> None:
             )
 
             escalate_user_privileges_commands = (
-                (
-                    f"shell.connect_to_primary('{self.server_config_user}:"
-                    f"{self.server_config_password}@{self.instance_address}')"
-                ),
+                "shell.connect_to_primary()",
                 f'session.run_sql("GRANT ALL ON *.* TO `{username}`@`{hostname}` WITH GRANT OPTION;")',
-                f"session.run_sql(\"REVOKE {', '.join(super_privileges_to_revoke)} ON *.* FROM `{username}`@`{hostname}`;\")",
+                f'session.run_sql("REVOKE {", ".join(super_privileges_to_revoke)} ON *.* FROM `{username}`@`{hostname}`;")',
                 'session.run_sql("FLUSH PRIVILEGES;")',
             )
 
-            self._run_mysqlsh_script("\n".join(escalate_user_privileges_commands))
+            self._run_mysqlsh_script(
+                "\n".join(escalate_user_privileges_commands),
+                user=self.server_config_user,
+                host=self.instance_address,
+                password=self.server_config_password,
+            )
         except MySQLClientError as e:
             logger.exception(
-                f"Failed to escalate user privileges for {username}@{hostname}", exc_info=e
+                f"Failed to escalate user privileges for {username}@{hostname}",
+                exc_info=e,
             )
             raise MySQLEscalateUserPrivilegesError(e.message)
 
@@ -479,12 +568,11 @@ def delete_users_with_label(self, label_name: str, label_value: str) -> None:
 
         try:
             output = self._run_mysqlcli_script(
-                "; ".join(get_label_users),
+                get_label_users,
                 user=self.server_config_user,
                 password=self.server_config_password,
             )
-            users = [line.strip() for line in output.split("\n") if line.strip()][1:]
-            users = [f"'{user.split('@')[0]}'@'{user.split('@')[1]}'" for user in users]
+            users = [f"'{user[0].split('@')[0]}'@'{user[0].split('@')[1]}'" for user in output]
 
             if len(users) == 0:
                 logger.debug(f"There are no users to drop for label {label_name}={label_value}")
@@ -492,13 +580,15 @@ def delete_users_with_label(self, label_name: str, label_value: str) -> None:
 
             # Using server_config_user as we are sure it has drop user grants
             drop_users_command = (
-                (
-                    f"shell.connect_to_primary('{self.server_config_user}:"
-                    f"{self.server_config_password}@{self.instance_address}')"
-                ),
-                f"session.run_sql(\"DROP USER IF EXISTS {', '.join(users)};\")",
+                "shell.connect_to_primary()",
+                f'session.run_sql("DROP USER IF EXISTS {", ".join(users)};")',
+            )
+            self._run_mysqlsh_script(
+                "\n".join(drop_users_command),
+                user=self.server_config_user,
+                host=self.instance_address,
+                password=self.server_config_password,
             )
-            self._run_mysqlsh_script("\n".join(drop_users_command))
         except MySQLClientError as e:
             logger.exception(
                 f"Failed to query and delete users for label {label_name}={label_value}",
@@ -518,22 +608,22 @@ def stop_mysqld(self) -> None:
         """Stops the mysqld process."""
         try:
             # call low-level pebble API to access timeout parameter
-            self.container.pebble.stop_services([MYSQLD_SAFE_SERVICE], timeout=5 * 60)
+            self.container.pebble.stop_services([MYSQLD_SERVICE], timeout=5 * 60)
         except ChangeError:
-            error_message = f"Failed to stop service {MYSQLD_SAFE_SERVICE}"
+            error_message = f"Failed to stop service {MYSQLD_SERVICE}"
             logger.exception(error_message)
             raise MySQLStopMySQLDError(error_message)
 
     def start_mysqld(self) -> None:
         """Starts the mysqld process."""
         try:
-            self.container.start(MYSQLD_SAFE_SERVICE)
+            self.container.start(MYSQLD_SERVICE)
             self.wait_until_mysql_connection()
         except (
             ChangeError,
             MySQLServiceNotRunningError,
         ):
-            error_message = f"Failed to start service {MYSQLD_SAFE_SERVICE}"
+            error_message = f"Failed to start service {MYSQLD_SERVICE}"
             logger.exception(error_message)
             raise MySQLStartMySQLDError(error_message)
 
@@ -581,7 +671,14 @@ def _execute_commands(
             raise MySQLExecError from None
 
     def _run_mysqlsh_script(
-        self, script: str, timeout: Optional[int] = None, verbose: int = 0
+        self,
+        script: str,
+        user: str,
+        host: str,
+        password: str,
+        timeout: Optional[int] = None,
+        exception_as_warning: bool = False,
+        verbose: int = 0,
     ) -> str:
         """Execute a MySQL shell script.
 
@@ -589,26 +686,29 @@ def _run_mysqlsh_script(
 
         Args:
             script: mysql-shell python script string
+            user: User to invoke the mysqlsh script with
+            host: Host to run the script on
+            password: Password to invoke the mysqlsh script
             verbose: mysqlsh verbosity level
             timeout: timeout to wait for the script
+            exception_as_warning: (optional) whether the exception should be treated as warning
 
         Returns:
             stdout of the script
         """
         # TODO: remove timeout from _run_mysqlsh_script contract/signature in the mysql lib
-        self.container.push(path=MYSQLSH_SCRIPT_FILE, source=script)
+        prepend_cmd = "shell.options.set('useWizards', False)\nprint('###')\n"
+        script = prepend_cmd + script
 
         # render command with remove file after run
         cmd = [
             MYSQLSH_LOCATION,
-            "--no-wizard",
+            "--passwords-from-stdin",
+            f"--uri={user}@{host}",
             "--python",
             f"--verbose={verbose}",
-            "-f",
-            MYSQLSH_SCRIPT_FILE,
-            ";",
-            "rm",
-            MYSQLSH_SCRIPT_FILE,
+            "-c",
+            script,
         ]
 
         # workaround for timeout not working on pebble exec
@@ -618,21 +718,25 @@ def _run_mysqlsh_script(
             cmd.insert(0, "timeout")
 
         try:
-            process = self.container.exec(cmd)
+            process = self.container.exec(cmd, stdin=password)
             stdout, _ = process.wait_output()
-            return stdout
-        except ExecError:
-            raise MySQLClientError
-        except ChangeError:
+            return stdout.split("###")[1].strip()
+        except (ExecError, ChangeError) as e:
+            if exception_as_warning:
+                logger.warning("Failed to execute mysql-shell command")
+            else:
+                self.strip_off_passwords_from_exception(e)
+                logger.exception("Failed to execute mysql-shell command")
             raise MySQLClientError
 
     def _run_mysqlcli_script(
         self,
-        script: str,
+        script: Union[Tuple[Any, ...], List[Any]],
         user: str = "root",
         password: Optional[str] = None,
         timeout: Optional[int] = None,
-    ) -> str:
+        exception_as_warning: bool = False,
+    ) -> list:
         """Execute a MySQL CLI script.
 
         Execute SQL script as instance root user.
@@ -640,31 +744,38 @@ def _run_mysqlcli_script(
 
         Args:
             script: raw SQL script string
-            password: root password to use for the script when needed
             user: user to run the script
+            password: root password to use for the script when needed
             timeout: a timeout to execute the mysqlcli script
+            exception_as_warning: (optional) whether the exception should be treated as warning
         """
         command = [
             MYSQL_CLI_LOCATION,
             "-u",
             user,
-            "--protocol=SOCKET",
+            "-N",
             f"--socket={MYSQLD_SOCK_FILE}",
             "-e",
-            script,
+            ";".join(script),
         ]
-        if password:
-            # password is needed after user
-            command.append(f"--password={password}")
 
         try:
-            process = self.container.exec(command, timeout=timeout)
+            if password:
+                # password is needed after user
+                command.insert(3, "-p")
+                process = self.container.exec(command, timeout=timeout, stdin=password)
+            else:
+                process = self.container.exec(command, timeout=timeout)
+
             stdout, _ = process.wait_output()
-            return stdout
-        except ExecError as e:
-            raise MySQLClientError(self.strip_off_passwords(e.stderr))
-        except ChangeError as e:
-            raise MySQLClientError(self.strip_off_passwords(e.err))
+            return [line.split("\t") for line in stdout.strip().split("\n")] if stdout else []
+        except (ExecError, ChangeError) as e:
+            if exception_as_warning:
+                logger.warning("Failed to execute MySQL cli command")
+            else:
+                self.strip_off_passwords_from_exception(e)
+                logger.exception("Failed to execute MySQL cli command")
+            raise MySQLClientError
 
     def write_content_to_file(
         self,
@@ -728,12 +839,12 @@ def check_if_mysqld_process_stopped(self) -> bool:
             for line in stdout.strip().split("\n"):
                 [comm, stat] = line.split()
 
-                if comm == MYSQLD_SAFE_SERVICE:
+                if comm == MYSQLD_SERVICE:
                     return "T" in stat
 
             return True
         except ExecError as e:
-            raise MySQLClientError(e.stderr)
+            raise MySQLClientError(e.stderr or "")
 
     def get_available_memory(self) -> int:
         """Get available memory for the container in bytes."""
@@ -812,3 +923,11 @@ def set_cluster_primary(self, new_primary_address: str) -> None:
     def fetch_error_log(self) -> Optional[str]:
         """Fetch the MySQL error log."""
         return self.read_file_content("/var/log/mysql/error.log")
+
+    def _file_exists(self, path: str) -> bool:
+        """Check if a file exists.
+
+        Args:
+            path: Path to the file to check
+        """
+        return self.container.exists(path)
diff --git a/src/prometheus_alert_rules/metrics_alert_rules.yaml b/src/prometheus_alert_rules/metrics_alert_rules.yaml
index ded822a5a..ff0a604c1 100644
--- a/src/prometheus_alert_rules/metrics_alert_rules.yaml
+++ b/src/prometheus_alert_rules/metrics_alert_rules.yaml
@@ -1,5 +1,5 @@
 groups:
-  - name: MySQLExporter
+  - name: MySQLExporterK8s
 
     rules:
       # 2.1.1
@@ -9,8 +9,9 @@ groups:
         labels:
           severity: critical
         annotations:
-          summary: MySQL Down (instance {{ $labels.instance }})
-          description: "MySQL instance is down\n  VALUE = {{ $value }}\n  LABELS = {{ $labels }}"
+          summary: MySQL instance {{ $labels.instance }} is down. 
+          description: |
+            LABELS = {{ $labels }}.
 
       # 2.1.2
       # customized: 80% -> 90%
@@ -20,18 +21,10 @@ groups:
         labels:
           severity: warning
         annotations:
-          summary: MySQL too many connections (> 90%) (instance {{ $labels.instance }})
-          description: "More than 90% of MySQL connections are in use on {{ $labels.instance }}\n  VALUE = {{ $value }}\n  LABELS = {{ $labels }}"
-
-      # 2.1.3
-      - alert: MySQLHighPreparedStatementsUtilization(>80%)
-        expr: max_over_time(mysql_global_status_prepared_stmt_count[1m]) / mysql_global_variables_max_prepared_stmt_count * 100 > 80
-        for: 2m
-        labels:
-          severity: warning
-        annotations:
-          summary: MySQL high prepared statements utilization (> 80%) (instance {{ $labels.instance }})
-          description: "High utilization of prepared statements (>80%) on {{ $labels.instance }}\n  VALUE = {{ $value }}\n  LABELS = {{ $labels }}"
+          summary: MySQL instance {{ $labels.instance }} is using > 90% of `max_connections`.
+          description: |
+            Consider checking the client application responsible for generating those additional connections.
+            LABELS = {{ $labels }}. 
 
       # 2.1.4
       # customized: 60% -> 80%
@@ -41,8 +34,22 @@ groups:
         labels:
           severity: warning
         annotations:
-          summary: MySQL high threads running (instance {{ $labels.instance }})
-          description: "More than 80% of MySQL connections are in running state on {{ $labels.instance }}\n  VALUE = {{ $value }}\n  LABELS = {{ $labels }}"
+          summary: MySQL instance {{ $labels.instance }} is actively using > 80% of `max_connections`.
+          description: |
+            Consider reviewing the value of the `max-connections` config parameter or allocate more resources to your database server. 
+            LABELS = {{ $labels }}. 
+            
+      # 2.1.3
+      - alert: MySQLHighPreparedStatementsUtilization(>80%)
+        expr: max_over_time(mysql_global_status_prepared_stmt_count[1m]) / mysql_global_variables_max_prepared_stmt_count * 100 > 80
+        for: 2m
+        labels:
+          severity: warning
+        annotations:
+          summary:  MySQL instance {{ $labels.instance }} is using > 80% of `max_prepared_stmt_count`.
+          description: |
+            Too many prepared statements might consume a lot of memory. 
+            LABELS = {{ $labels }}. 
 
       # 2.1.8
       # customized: warning -> info
@@ -52,8 +59,10 @@ groups:
         labels:
           severity: info
         annotations:
-          summary: MySQL slow queries (instance {{ $labels.instance }})
-          description: "MySQL server mysql has some new slow query.\n  VALUE = {{ $value }}\n  LABELS = {{ $labels }}"
+          summary: MySQL instance {{ $labels.instance }} has a slow query.
+          description: |
+            Consider optimizing the query by reviewing its execution plan, then rewrite the query and add any relevant indexes. 
+            LABELS = {{ $labels }}.
 
       # 2.1.9
       - alert: MySQLInnoDBLogWaits
@@ -62,8 +71,11 @@ groups:
         labels:
           severity: warning
         annotations:
-          summary: MySQL InnoDB log waits (instance {{ $labels.instance }})
-          description: "MySQL innodb log writes stalling\n  VALUE = {{ $value }}\n  LABELS = {{ $labels }}"
+          summary: MySQL instance {{ $labels.instance }} has long InnoDB log waits. 
+          description: |
+            MySQL InnoDB log writes might be stalling. 
+            Check I/O activity on your nodes to find the responsible process or query. Consider using iotop and the performance_schema. 
+            LABELS = {{ $labels }}.
 
       # 2.1.10
       - alert: MySQLRestarted
@@ -72,5 +84,8 @@ groups:
         labels:
           severity: info
         annotations:
-          summary: MySQL restarted (instance {{ $labels.instance }})
-          description: "MySQL has just been restarted, less than one minute ago on {{ $labels.instance }}.\n  VALUE = {{ $value }}\n  LABELS = {{ $labels }}"
+          summary: MySQL instance {{ $labels.instance }} restarted.
+          description: |
+            MySQL restarted less than one minute ago. 
+            If the restart was unplanned or frequent, check Loki logs (e.g. `error.log`). 
+            LABELS = {{ $labels }}.
diff --git a/src/upgrade.py b/src/upgrade.py
index 287e8ea46..9ccddb860 100644
--- a/src/upgrade.py
+++ b/src/upgrade.py
@@ -28,13 +28,11 @@
 from ops.model import BlockedStatus, MaintenanceStatus, RelationDataContent
 from ops.pebble import ChangeError
 from pydantic import BaseModel
-from tenacity import RetryError, Retrying
-from tenacity.stop import stop_after_attempt
-from tenacity.wait import wait_fixed
+from tenacity import RetryError
 from typing_extensions import override
 
 import k8s_helpers
-from constants import CONTAINER_NAME, MYSQLD_SAFE_SERVICE
+from constants import CONTAINER_NAME, MYSQLD_SERVICE
 
 if TYPE_CHECKING:
     from charm import MySQLOperatorCharm
@@ -42,9 +40,6 @@
 logger = logging.getLogger(__name__)
 
 
-RECOVER_ATTEMPTS = 10
-
-
 class MySQLK8sDependenciesModel(BaseModel):
     """MySQL dependencies model."""
 
@@ -225,18 +220,15 @@ def _on_pebble_ready(self, event) -> None:
         self.charm._write_mysqld_configuration()
 
         logger.info("Setting up the logrotate configurations")
-        self.charm._mysql.setup_logrotate_config()
+        self.charm.log_rotate_setup.setup()
 
         try:
             self.charm._reconcile_pebble_layer(container)
             self._check_server_upgradeability()
             self.charm.unit.status = MaintenanceStatus("recovering unit after upgrade")
-            if self.charm.app.planned_units() > 1:
-                self._recover_multi_unit_cluster()
-            else:
-                self._recover_single_unit_cluster()
+            self.charm.recover_unit_after_restart()
             if self.charm.config.plugin_audit_enabled:
-                self.charm._mysql.install_plugins(["audit_log", "audit_log_filter"])
+                self.charm._mysql.install_plugins(["audit_log"])
             self._complete_upgrade()
         except MySQLRebootFromCompleteOutageError:
             logger.error("Failed to reboot single unit from outage after upgrade")
@@ -269,28 +261,6 @@ def _on_pebble_ready(self, event) -> None:
             self._reset_on_unsupported_downgrade(container)
             self._complete_upgrade()
 
-    def _recover_multi_unit_cluster(self) -> None:
-        logger.debug("Recovering unit")
-        try:
-            for attempt in Retrying(
-                stop=stop_after_attempt(RECOVER_ATTEMPTS), wait=wait_fixed(10)
-            ):
-                with attempt:
-                    self.charm._mysql.hold_if_recovering()
-                    if not self.charm._mysql.is_instance_in_cluster(self.charm.unit_label):
-                        logger.debug(
-                            "Instance not yet back in the cluster."
-                            f" Retry {attempt.retry_state.attempt_number}/{RECOVER_ATTEMPTS}"
-                        )
-                        raise Exception
-        except RetryError:
-            raise
-
-    def _recover_single_unit_cluster(self) -> None:
-        """Recover single unit cluster."""
-        logger.debug("Recovering single unit cluster")
-        self.charm._mysql.reboot_from_complete_outage()
-
     def _complete_upgrade(self):
         # complete upgrade for the unit
         logger.debug("Upgraded unit is healthy. Set upgrade state to `completed`")
@@ -332,7 +302,7 @@ def _check_server_upgradeability(self) -> None:
             return
         instance = getfqdn(self.charm.get_unit_hostname(f"{self.charm.app.name}/0"))
         self.charm._mysql.verify_server_upgradable(instance=instance)
-        logger.debug("MySQL server is upgradeable")
+        logger.info("Check MySQL server upgradeability passed")
 
     def _check_server_unsupported_downgrade(self) -> bool:
         """Check error log for unsupported downgrade.
@@ -346,7 +316,7 @@ def _check_server_unsupported_downgrade(self) -> bool:
 
     def _reset_on_unsupported_downgrade(self, container: Container) -> None:
         """Reset the cluster on unsupported downgrade."""
-        container.stop(MYSQLD_SAFE_SERVICE)
+        container.stop(MYSQLD_SERVICE)
         self.charm._mysql.reset_data_dir()
         self.charm._write_mysqld_configuration()
         self.charm._configure_instance(container)
diff --git a/templates/logrotate.j2 b/templates/logrotate.j2
index 29b92fe31..5104fce1c 100644
--- a/templates/logrotate.j2
+++ b/templates/logrotate.j2
@@ -6,8 +6,15 @@ createolddir 770 {{ system_user }} {{ system_group }}
 
 # Frequency of logs rotation
 hourly
-maxage 7
-rotate 10800
+maxage {{ logs_retention_period }}
+rotate {{ logs_rotations }}
+
+# Compression settings
+{% if logs_compression_enabled %}
+compress
+{% else %}
+nocompress
+{% endif %}
 
 # Naming of rotated files should be in the format:
 dateext
@@ -16,23 +23,13 @@ dateformat -%Y%m%d_%H%M
 # Settings to prevent misconfigurations and unwanted behaviours
 ifempty
 missingok
-nocompress
 nomail
 nosharedscripts
 nocopytruncate
 
-/var/log/mysql/error.log {
-    olddir archive_error
-}
-
-/var/log/mysql/general.log {
-    olddir archive_general
-}
-
-/var/log/mysql/slowquery.log {
-    olddir archive_slowquery
+{% for log in enabled_log_files %}
+{{ log_dir }}/{{ log }}.log {
+    olddir archive_{{ log }}
 }
+{% endfor %}
 
-/var/log/mysql/audit.log {
-    olddir archive_audit
-}
diff --git a/terraform/main.tf b/terraform/main.tf
new file mode 100644
index 000000000..1879af188
--- /dev/null
+++ b/terraform/main.tf
@@ -0,0 +1,21 @@
+resource "juju_application" "k8s_mysql" {
+  name  = var.app_name
+  model = var.juju_model_name
+  trust = true
+
+  charm {
+    name     = "mysql-k8s"
+    channel  = var.channel
+    revision = var.revision
+    base     = var.base
+  }
+
+  storage_directives = {
+    database = var.storage_size
+  }
+
+  units       = var.units
+  constraints = var.constraints
+  config      = var.config
+  resources   = var.resources
+}
diff --git a/terraform/outputs.tf b/terraform/outputs.tf
new file mode 100644
index 000000000..e2ce666aa
--- /dev/null
+++ b/terraform/outputs.tf
@@ -0,0 +1,20 @@
+output "application_name" {
+  value = juju_application.k8s_mysql.name
+}
+
+
+output "provides" {
+  value = {
+    database          = "database",
+    metrics_endpoint  = "metrics-endpoint",
+    grafana_dashboard = "grafana-dashboard"
+  }
+}
+
+output "requires" {
+  value = {
+    logging       = "logging"
+    certificates  = "certificates"
+    s3_parameters = "s3-parameters"
+  }
+}
diff --git a/terraform/variables.tf b/terraform/variables.tf
new file mode 100644
index 000000000..6fdabfe80
--- /dev/null
+++ b/terraform/variables.tf
@@ -0,0 +1,58 @@
+variable "juju_model_name" {
+  description = "Juju model name"
+  type        = string
+}
+
+variable "app_name" {
+  description = "Name of the application in the Juju model."
+  type        = string
+  default     = "mysql-k8s"
+}
+
+variable "channel" {
+  description = "Charm channel to use when deploying"
+  type        = string
+  default     = "8.0/stable"
+}
+
+variable "revision" {
+  description = "Revision number to deploy charm"
+  type        = number
+  default     = null
+}
+
+variable "base" {
+  description = "Application base"
+  type        = string
+  default     = "ubuntu@22.04"
+}
+
+variable "units" {
+  description = "Number of units to deploy"
+  type        = number
+  default     = 1
+}
+
+variable "constraints" {
+  description = "Juju constraints to apply for this application."
+  type        = string
+  default     = "arch=amd64"
+}
+
+variable "storage_size" {
+  description = "Storage size"
+  type        = string
+  default     = "10G"
+}
+
+variable "config" {
+  description = "Application configuration. Details at https://charmhub.io/mysql-k8s/configurations"
+  type        = map(string)
+  default     = {}
+}
+
+variable "resources" {
+  description = "Resources to use with the application"
+  type        = map(string)
+  default     = {}
+}
diff --git a/terraform/versions.tf b/terraform/versions.tf
new file mode 100644
index 000000000..358626157
--- /dev/null
+++ b/terraform/versions.tf
@@ -0,0 +1,9 @@
+terraform {
+  required_version = ">= 1.6.6"
+  required_providers {
+    juju = {
+      source  = "juju/juju"
+      version = ">= 0.14.0"
+    }
+  }
+}
diff --git a/tests/integration/bundle_templates/grafana_agent_integration.j2 b/tests/integration/bundle_templates/grafana_agent_integration.j2
new file mode 100644
index 000000000..08df77d44
--- /dev/null
+++ b/tests/integration/bundle_templates/grafana_agent_integration.j2
@@ -0,0 +1,16 @@
+bundle: kubernetes
+name: testing
+applications:
+  mysql-k8s:
+    charm: {{ mysql_charm_path }}
+    trust: true
+    scale: 1
+    constraints: mem=2G
+    resources:
+      mysql-image: {{ mysql_image_source }}
+  grafana-agent-k8s:
+    charm: grafana-agent-k8s
+    channel: latest/stable
+    scale: 1
+relations:
+  - [mysql-k8s:metrics-endpoint, grafana-agent-k8s:metrics-endpoint]
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index 77c30572a..cadd883b2 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -8,12 +8,20 @@
 
 from constants import SERVER_CONFIG_USERNAME
 
-from . import juju_
+from . import architecture, juju_
 from .high_availability.high_availability_helpers import get_application_name
 
 logger = logging.getLogger(__name__)
 
 
+@pytest.fixture(scope="session")
+def charm():
+    # Return str instead of pathlib.Path since python-libjuju's model.deploy(), juju deploy, and
+    # juju bundle files expect local charms to begin with `./` or `/` to distinguish them from
+    # Charmhub charms.
+    return f"./mysql-k8s_ubuntu@22.04-{architecture.architecture}.charm"
+
+
 @pytest.fixture(scope="function")
 async def credentials(ops_test: OpsTest):
     """Return the credentials for the MySQL cluster."""
diff --git a/tests/integration/connector.py b/tests/integration/connector.py
index 733694eaa..a71c06359 100644
--- a/tests/integration/connector.py
+++ b/tests/integration/connector.py
@@ -37,3 +37,29 @@ def __exit__(self, exc_type, exc_val, exc_tb):
             self.connection.commit()
         self.cursor.close()
         self.connection.close()
+
+
+def create_db_connections(
+    num_connections: int, host: str, username: str, password: str, database: str
+) -> list[mysql.connector.MySQLConnection]:
+    """Create a list of database connections.
+
+    Args:
+        num_connections: Number of connections to create.
+        host: Hostname of the database.
+        username: Username to connect to the database.
+        password: Password to connect to the database.
+        database: Database to connect to.
+    """
+    connections = []
+    for _ in range(num_connections):
+        conn = mysql.connector.connect(
+            host=host,
+            user=username,
+            password=password,
+            database=database,
+            use_pure=True,
+        )
+        if conn.is_connected():
+            connections.append(conn)
+    return connections
diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py
index 056616056..77558c71c 100644
--- a/tests/integration/helpers.py
+++ b/tests/integration/helpers.py
@@ -23,7 +23,7 @@
 from pytest_operator.plugin import OpsTest
 from tenacity import RetryError, Retrying, retry, stop_after_attempt, wait_fixed
 
-from constants import CONTAINER_NAME, MYSQLD_SAFE_SERVICE, SERVER_CONFIG_USERNAME
+from constants import CONTAINER_NAME, MYSQLD_SERVICE, SERVER_CONFIG_USERNAME
 
 from . import juju_
 from .connector import MySQLConnector
@@ -272,18 +272,36 @@ async def scale_application(
             )
 
 
-def is_relation_joined(ops_test: OpsTest, endpoint_one: str, endpoint_two: str) -> bool:
+def is_relation_joined(
+    ops_test: OpsTest,
+    endpoint_one: str,
+    endpoint_two: str,
+    application_one: Optional[str] = None,
+    application_two: Optional[str] = None,
+) -> bool:
     """Check if a relation is joined.
 
     Args:
         ops_test: The ops test object passed into every test case
         endpoint_one: The first endpoint of the relation
         endpoint_two: The second endpoint of the relation
+        application_one: The name of the first application
+        application_two: The name of the second application
     """
     for rel in ops_test.model.relations:
-        endpoints = [endpoint.name for endpoint in rel.endpoints]
-        if endpoint_one in endpoints and endpoint_two in endpoints:
-            return True
+        if application_one and application_two:
+            endpoints = [
+                f"{endpoint.application_name}:{endpoint.name}" for endpoint in rel.endpoints
+            ]
+            if (
+                f"{application_one}:{endpoint_one}" in endpoints
+                and f"{application_two}:{endpoint_two}" in endpoints
+            ):
+                return True
+        else:
+            endpoints = [endpoint.name for endpoint in rel.endpoints]
+            if endpoint_one in endpoints and endpoint_two in endpoints:
+                return True
     return False
 
 
@@ -346,6 +364,25 @@ def is_connection_possible(credentials: Dict, **extra_opts) -> bool:
         return False
 
 
+async def get_model_logs(ops_test: OpsTest, log_level: str, log_lines: int = 100) -> str:
+    """Return the juju logs from a specific model.
+
+    Args:
+        ops_test: The ops test object passed into every test case
+        log_level: The logging level to return messages from
+        log_lines: The maximum lines to return at once
+    """
+    _, output, _ = await ops_test.juju(
+        "debug-log",
+        f"--model={ops_test.model.info.name}",
+        f"--level={log_level}",
+        f"--limit={log_lines}",
+        "--no-tail",
+    )
+
+    return output
+
+
 async def get_process_pid(
     ops_test: OpsTest, unit_name: str, container_name: str, process: str, full_match: bool = False
 ) -> Optional[int]:
@@ -442,7 +479,7 @@ async def stop_mysqld_service(ops_test: OpsTest, unit_name: str) -> None:
         unit_name: The name of the unit
     """
     await ops_test.juju(
-        "ssh", "--container", CONTAINER_NAME, unit_name, "pebble", "stop", MYSQLD_SAFE_SERVICE
+        "ssh", "--container", CONTAINER_NAME, unit_name, "pebble", "stop", MYSQLD_SERVICE
     )
 
 
@@ -454,7 +491,7 @@ async def start_mysqld_service(ops_test: OpsTest, unit_name: str) -> None:
         unit_name: The name of the unit
     """
     await ops_test.juju(
-        "ssh", "--container", CONTAINER_NAME, unit_name, "pebble", "start", MYSQLD_SAFE_SERVICE
+        "ssh", "--container", CONTAINER_NAME, unit_name, "pebble", "start", MYSQLD_SERVICE
     )
 
 
@@ -581,7 +618,7 @@ async def write_content_to_file_in_unit(
         )
 
 
-async def read_contents_from_file_in_unit(
+def read_contents_from_file_in_unit(
     ops_test: OpsTest, unit: Unit, path: str, container_name: str = CONTAINER_NAME
 ) -> str:
     """Read contents from file in the provided unit.
@@ -669,7 +706,7 @@ async def stop_running_log_rotate_dispatcher(ops_test: OpsTest, unit_name: str):
         "pkill",
         "-9",
         "-f",
-        "/usr/bin/python3 scripts/log_rotate_dispatcher.py",
+        "log_rotate_dispatcher.py",
     )
 
     # hold execution until process is stopped
diff --git a/tests/integration/high_availability/conftest.py b/tests/integration/high_availability/conftest.py
index 59c91fd5c..ccf145768 100644
--- a/tests/integration/high_availability/conftest.py
+++ b/tests/integration/high_availability/conftest.py
@@ -3,8 +3,6 @@
 # See LICENSE file for licensing details.
 
 import logging
-import os
-import pathlib
 
 import pytest
 from pytest_operator.plugin import OpsTest
@@ -48,25 +46,16 @@ def chaos_mesh(ops_test: OpsTest) -> None:
     destroy_chaos_mesh(ops_test.model.info.name)
 
 
-@pytest.fixture()
-def built_charm(ops_test: OpsTest) -> pathlib.Path:
-    """Return the path of a previously built charm."""
-    if os.environ.get("CI") == "true":
-        return
-    charms_dst_dir = ops_test.tmp_path / "charms"
-    packed_charm = list(charms_dst_dir.glob("*.charm"))
-    return packed_charm[0].resolve(strict=True)
-
-
 @pytest.fixture(scope="module")
-async def highly_available_cluster(ops_test: OpsTest):
+async def highly_available_cluster(ops_test: OpsTest, charm):
     """Run the set up for high availability tests.
 
     Args:
         ops_test: The ops test framework
+        charm: `charm` fixture
     """
     logger.info("Deploying mysql-k8s and scaling to 3 units")
-    mysql_application_name = await deploy_and_scale_mysql(ops_test)
+    mysql_application_name = await deploy_and_scale_mysql(ops_test, charm)
 
     logger.info("Deploying mysql-test-app")
     application_name = await deploy_and_scale_application(ops_test)
diff --git a/tests/integration/high_availability/high_availability_helpers.py b/tests/integration/high_availability/high_availability_helpers.py
index e527682d6..d7f45d6ea 100644
--- a/tests/integration/high_availability/high_availability_helpers.py
+++ b/tests/integration/high_availability/high_availability_helpers.py
@@ -17,7 +17,7 @@
 from lightkube import Client
 from lightkube.models.meta_v1 import ObjectMeta
 from lightkube.resources.apps_v1 import StatefulSet
-from lightkube.resources.core_v1 import PersistentVolume, PersistentVolumeClaim, Pod
+from lightkube.resources.core_v1 import Endpoints, PersistentVolume, PersistentVolumeClaim, Pod
 from pytest_operator.plugin import OpsTest
 from tenacity import RetryError, Retrying, retry, stop_after_attempt, stop_after_delay, wait_fixed
 
@@ -41,8 +41,6 @@
 APPLICATION_DEFAULT_APP_NAME = "mysql-test-app"
 TIMEOUT = 15 * 60
 
-mysql_charm, application_charm = None, None
-
 logger = logging.getLogger(__name__)
 
 
@@ -118,20 +116,24 @@ async def ensure_n_online_mysql_members(
 
 async def deploy_and_scale_mysql(
     ops_test: OpsTest,
+    charm,
     check_for_existing_application: bool = True,
     mysql_application_name: str = MYSQL_DEFAULT_APP_NAME,
     num_units: int = 3,
     model: Optional[Model] = None,
+    cluster_name: str = CLUSTER_NAME,
 ) -> str:
     """Deploys and scales the mysql application charm.
 
     Args:
         ops_test: The ops test framework
+        charm: `charm` fixture
         check_for_existing_application: Whether to check for existing mysql applications
             in the model
         mysql_application_name: The name of the mysql application if it is to be deployed
         num_units: The number of units to deploy
         model: The model to deploy the mysql application to
+        cluster_name: The name of the mysql cluster
     """
     application_name = get_application_name(ops_test, "mysql")
     if not model:
@@ -144,18 +146,12 @@ async def deploy_and_scale_mysql(
 
         return application_name
 
-    global mysql_charm
-    if not mysql_charm:
-        charm = await ops_test.build_charm(".")
-        # Cache the built charm to avoid rebuilding it between tests
-        mysql_charm = charm
-
-    config = {"cluster-name": CLUSTER_NAME, "profile": "testing"}
+    config = {"cluster-name": cluster_name, "profile": "testing"}
     resources = {"mysql-image": METADATA["resources"]["mysql-image"]["upstream-source"]}
 
     async with ops_test.fast_forward("60s"):
         await ops_test.model.deploy(
-            mysql_charm,
+            charm,
             application_name=mysql_application_name,
             config=config,
             resources=resources,
@@ -177,15 +173,21 @@ async def deploy_and_scale_mysql(
     return mysql_application_name
 
 
-async def deploy_and_scale_application(ops_test: OpsTest) -> str:
+async def deploy_and_scale_application(
+    ops_test: OpsTest,
+    check_for_existing_application: bool = True,
+    test_application_name: str = APPLICATION_DEFAULT_APP_NAME,
+) -> str:
     """Deploys and scales the test application charm.
 
     Args:
         ops_test: The ops test framework
+        check_for_existing_application: Whether to check for existing test applications
+        test_application_name: Name of test application to be deployed
     """
-    application_name = get_application_name(ops_test, APPLICATION_DEFAULT_APP_NAME)
+    application_name = get_application_name(ops_test, test_application_name)
 
-    if application_name:
+    if check_for_existing_application and application_name:
         if len(ops_test.model.applications[application_name].units) != 1:
             async with ops_test.fast_forward("60s"):
                 await scale_application(ops_test, application_name, 1)
@@ -195,22 +197,23 @@ async def deploy_and_scale_application(ops_test: OpsTest) -> str:
     async with ops_test.fast_forward("60s"):
         await ops_test.model.deploy(
             APPLICATION_DEFAULT_APP_NAME,
-            application_name=APPLICATION_DEFAULT_APP_NAME,
+            application_name=test_application_name,
             num_units=1,
             channel="latest/edge",
             base="ubuntu@22.04",
+            config={"sleep_interval": 300},
         )
 
         await ops_test.model.wait_for_idle(
-            apps=[APPLICATION_DEFAULT_APP_NAME],
+            apps=[test_application_name],
             status="waiting",
             raise_on_blocked=True,
             timeout=TIMEOUT,
         )
 
-        assert len(ops_test.model.applications[APPLICATION_DEFAULT_APP_NAME].units) == 1
+        assert len(ops_test.model.applications[test_application_name].units) == 1
 
-    return APPLICATION_DEFAULT_APP_NAME
+    return test_application_name
 
 
 async def relate_mysql_and_application(
@@ -223,13 +226,27 @@ async def relate_mysql_and_application(
         mysql_application_name: The mysql charm application name
         application_name: The continuous writes test charm application name
     """
-    if is_relation_joined(ops_test, "database", "database"):
+    if is_relation_joined(
+        ops_test,
+        "database",
+        "database",
+        application_one=mysql_application_name,
+        application_two=application_name,
+    ):
         return
 
     await ops_test.model.relate(
         f"{application_name}:database", f"{mysql_application_name}:database"
     )
-    await ops_test.model.block_until(lambda: is_relation_joined(ops_test, "database", "database"))
+    await ops_test.model.block_until(
+        lambda: is_relation_joined(
+            ops_test,
+            "database",
+            "database",
+            application_one=mysql_application_name,
+            application_two=application_name,
+        )
+    )
 
     await ops_test.model.wait_for_idle(
         apps=[mysql_application_name, application_name],
@@ -473,10 +490,9 @@ async def ensure_all_units_continuous_writes_incrementing(
     )
 
     async with ops_test.fast_forward(fast_interval="15s"):
-        for attempt in Retrying(stop=stop_after_delay(15 * 60), wait=wait_fixed(10)):
-            with attempt:
-                # ensure that all units are up to date (including the previous primary)
-                for unit in mysql_units:
+        for unit in mysql_units:
+            for attempt in Retrying(stop=stop_after_delay(15 * 60), wait=wait_fixed(10)):
+                with attempt:
                     # ensure the max written value is incrementing (continuous writes is active)
                     max_written_value = await get_max_written_value_in_database(
                         ops_test, unit, credentials
@@ -653,3 +669,31 @@ def delete_pvcs(pvcs: list[PersistentVolumeClaim]) -> None:
             namespace=pvc.metadata.namespace,
             grace_period=0,
         )
+
+
+def delete_pod(ops_test: OpsTest, unit: Unit) -> None:
+    """Delete the provided pod."""
+    pod_name = unit.name.replace("/", "-")
+    subprocess.run(
+        [
+            "microk8s.kubectl",
+            "-n",
+            ops_test.model.info.name,
+            "delete",
+            "pod",
+            pod_name,
+        ],
+        check=True,
+    )
+
+
+def get_endpoint_addresses(ops_test: OpsTest, endpoint_name: str) -> list[str]:
+    """Retrieve the addresses selected by a K8s endpoint."""
+    client = lightkube.Client()
+    endpoint = client.get(
+        Endpoints,
+        namespace=ops_test.model.info.name,
+        name=endpoint_name,
+    )
+
+    return [address.ip for subset in endpoint.subsets for address in subset.addresses]
diff --git a/tests/integration/high_availability/manifests/extend_pebble_restart_delay.yml b/tests/integration/high_availability/manifests/extend_pebble_restart_delay.yml
index 45ad2ff67..3f50c1fc3 100644
--- a/tests/integration/high_availability/manifests/extend_pebble_restart_delay.yml
+++ b/tests/integration/high_availability/manifests/extend_pebble_restart_delay.yml
@@ -1,5 +1,5 @@
 services:
-  mysqld_safe:
+  mysqld:
     override: merge
     backoff-delay: 300s
     backoff-limit: 300s
diff --git a/tests/integration/high_availability/manifests/reduce_pebble_restart_delay.yml b/tests/integration/high_availability/manifests/reduce_pebble_restart_delay.yml
index 80ad31b08..97398c7a6 100644
--- a/tests/integration/high_availability/manifests/reduce_pebble_restart_delay.yml
+++ b/tests/integration/high_availability/manifests/reduce_pebble_restart_delay.yml
@@ -1,5 +1,5 @@
 services:
-  mysqld_safe:
+  mysqld:
     override: merge
     backoff-delay: 500ms
     backoff-limit: 30s
diff --git a/tests/integration/high_availability/scripts/deploy_chaos_mesh.sh b/tests/integration/high_availability/scripts/deploy_chaos_mesh.sh
index f7ecfed08..a015d1fea 100755
--- a/tests/integration/high_availability/scripts/deploy_chaos_mesh.sh
+++ b/tests/integration/high_availability/scripts/deploy_chaos_mesh.sh
@@ -12,18 +12,17 @@ fi
 
 deploy_chaos_mesh() {
 	echo "adding chaos-mesh helm repo"
-	sg snap_microk8s -c "microk8s.helm3 repo add chaos-mesh https://charts.chaos-mesh.org"
+	microk8s.helm3 repo add chaos-mesh https://charts.chaos-mesh.org
 	
 	echo "installing chaos-mesh"
-        sg snap_microk8s -c "microk8s.helm3 install chaos-mesh chaos-mesh/chaos-mesh \
-          --namespace=\"${chaos_mesh_ns}\" \
+        microk8s.helm3 install chaos-mesh chaos-mesh/chaos-mesh \
+          --namespace="${chaos_mesh_ns}" \
           --set chaosDaemon.runtime=containerd \
           --set chaosDaemon.socketPath=/var/snap/microk8s/common/run/containerd.sock \
           --set dashboard.create=false \
-          --version \"${chaos_mesh_version}\" \
+          --version "${chaos_mesh_version}" \
           --set clusterScoped=false \
-          --set controllerManager.targetNamespace=\"${chaos_mesh_ns}\" \
-          "
+          --set controllerManager.targetNamespace="${chaos_mesh_ns}"
 	sleep 10
 }
 
diff --git a/tests/integration/high_availability/scripts/destroy_chaos_mesh.sh b/tests/integration/high_availability/scripts/destroy_chaos_mesh.sh
index 4efd2816a..6b414ffc2 100755
--- a/tests/integration/high_availability/scripts/destroy_chaos_mesh.sh
+++ b/tests/integration/high_availability/scripts/destroy_chaos_mesh.sh
@@ -45,9 +45,9 @@ destroy_chaos_mesh() {
 		timeout 30 microk8s.kubectl delete crd "${args[@]}" || true
 	fi
 
-	if [ -n "${chaos_mesh_ns}" ] && sg snap_microk8s -c "microk8s.helm3 repo list --namespace=${chaos_mesh_ns}" | grep -q 'chaos-mesh'; then
+	if [ -n "${chaos_mesh_ns}" ] && microk8s.helm3 repo list --namespace="${chaos_mesh_ns}" | grep -q 'chaos-mesh'; then
 		echo "uninstalling chaos-mesh helm repo"
-		sg snap_microk8s -c "microk8s.helm3 uninstall chaos-mesh --namespace=\"${chaos_mesh_ns}\"" || true
+		microk8s.helm3 uninstall chaos-mesh --namespace="${chaos_mesh_ns}" || true
 	fi
 }
 
diff --git a/tests/integration/high_availability/test_async_replication.py b/tests/integration/high_availability/test_async_replication.py
index 0a0c29b91..03e90d49c 100644
--- a/tests/integration/high_availability/test_async_replication.py
+++ b/tests/integration/high_availability/test_async_replication.py
@@ -68,17 +68,12 @@ async def second_model(ops_test: OpsTest, first_model, request) -> Model:  # pyr
     await ops_test._controller.destroy_model(second_model_name, destroy_storage=True)
 
 
-@pytest.mark.group(1)
 @markers.juju3
-@markers.amd64_only  # TODO: remove after mysql-router-k8s arm64 stable release
 @pytest.mark.abort_on_fail
 async def test_build_and_deploy(
-    ops_test: OpsTest, first_model: Model, second_model: Model
+    ops_test: OpsTest, charm, first_model: Model, second_model: Model
 ) -> None:
     """Simple test to ensure that the mysql and application charms get deployed."""
-    logger.info("Build mysql charm")
-    charm = await ops_test.build_charm(".")
-
     config = {"cluster-name": "lima", "profile": "testing"}
     resources = {"mysql-image": METADATA["resources"]["mysql-image"]["upstream-source"]}
 
@@ -120,17 +115,19 @@ async def test_build_and_deploy(
     )
 
 
-@pytest.mark.group(1)
 @markers.juju3
-@markers.amd64_only  # TODO: remove after mysql-router-k8s arm64 stable release
 @pytest.mark.abort_on_fail
-async def test_async_relate(first_model: Model, second_model: Model) -> None:
+async def test_async_relate(ops_test: OpsTest, first_model: Model, second_model: Model) -> None:
     """Relate the two mysql clusters."""
     logger.info("Creating offers in first model")
-    await first_model.create_offer(f"{MYSQL_APP1}:replication-offer")
+    offer_command = f"offer {MYSQL_APP1}:replication-offer"
+    await ops_test.juju(*offer_command.split())
 
     logger.info("Consume offer in second model")
-    await second_model.consume(endpoint=f"admin/{first_model.info.name}.{MYSQL_APP1}")
+    consume_command = (
+        f"consume -m {second_model.info.name} admin/{first_model.info.name}.{MYSQL_APP1}"
+    )
+    await ops_test.juju(*consume_command.split())
 
     logger.info("Relating the two mysql clusters")
     await second_model.integrate(f"{MYSQL_APP1}", f"{MYSQL_APP2}:replication")
@@ -154,40 +151,7 @@ async def test_async_relate(first_model: Model, second_model: Model) -> None:
     )
 
 
-@pytest.mark.group(1)
 @markers.juju3
-@markers.amd64_only  # TODO: remove after mysql-router-k8s arm64 stable release
-@pytest.mark.abort_on_fail
-async def test_create_replication(first_model: Model, second_model: Model) -> None:
-    """Run the create replication and wait for the applications to settle."""
-    logger.info("Running create replication action")
-    leader_unit = await get_leader_unit(None, MYSQL_APP1, first_model)
-    assert leader_unit is not None, "No leader unit found"
-
-    await juju_.run_action(
-        leader_unit,
-        "create-replication",
-        **{"--wait": "5m"},
-    )
-
-    logger.info("Waiting for the applications to settle")
-    await gather(
-        first_model.wait_for_idle(
-            apps=[MYSQL_APP1],
-            status="active",
-            timeout=5 * MINUTE,
-        ),
-        second_model.wait_for_idle(
-            apps=[MYSQL_APP2],
-            status="active",
-            timeout=5 * MINUTE,
-        ),
-    )
-
-
-@pytest.mark.group(1)
-@markers.juju3
-@markers.amd64_only  # TODO: remove after mysql-router-k8s arm64 stable release
 @pytest.mark.abort_on_fail
 async def test_deploy_router_and_app(first_model: Model) -> None:
     """Deploy the router and the test application."""
@@ -223,9 +187,36 @@ async def test_deploy_router_and_app(first_model: Model) -> None:
     )
 
 
-@pytest.mark.group(1)
 @markers.juju3
-@markers.amd64_only  # TODO: remove after mysql-router-k8s arm64 stable release
+@pytest.mark.abort_on_fail
+async def test_create_replication(first_model: Model, second_model: Model) -> None:
+    """Run the create replication and wait for the applications to settle."""
+    logger.info("Running create replication action")
+    leader_unit = await get_leader_unit(None, MYSQL_APP1, first_model)
+    assert leader_unit is not None, "No leader unit found"
+
+    await juju_.run_action(
+        leader_unit,
+        "create-replication",
+        **{"--wait": "5m"},
+    )
+
+    logger.info("Waiting for the applications to settle")
+    await gather(
+        first_model.wait_for_idle(
+            apps=[MYSQL_APP1],
+            status="active",
+            timeout=5 * MINUTE,
+        ),
+        second_model.wait_for_idle(
+            apps=[MYSQL_APP2],
+            status="active",
+            timeout=5 * MINUTE,
+        ),
+    )
+
+
+@markers.juju3
 @pytest.mark.abort_on_fail
 async def test_data_replication(
     first_model: Model, second_model: Model, continuous_writes
@@ -237,9 +228,7 @@ async def test_data_replication(
     assert results[0] > 1, "No data was written to the database"
 
 
-@pytest.mark.group(1)
 @markers.juju3
-@markers.amd64_only  # TODO: remove after mysql-router-k8s arm64 stable release
 @pytest.mark.abort_on_fail
 async def test_standby_promotion(
     ops_test: OpsTest, first_model: Model, second_model: Model, continuous_writes
@@ -266,9 +255,7 @@ async def test_standby_promotion(
     ), "standby not promoted to primary"
 
 
-@pytest.mark.group(1)
 @markers.juju3
-@markers.amd64_only  # TODO: remove after mysql-router-k8s arm64 stable release
 @pytest.mark.abort_on_fail
 async def test_failover(ops_test: OpsTest, first_model: Model, second_model: Model) -> None:
     """Test switchover on primary cluster fail."""
@@ -305,9 +292,7 @@ async def test_failover(ops_test: OpsTest, first_model: Model, second_model: Mod
         )
 
 
-@pytest.mark.group(1)
 @markers.juju3
-@markers.amd64_only  # TODO: remove after mysql-router-k8s arm64 stable release
 @pytest.mark.abort_on_fail
 async def test_rejoin_invalidated_cluster(
     first_model: Model, second_model: Model, continuous_writes
@@ -326,22 +311,12 @@ async def test_rejoin_invalidated_cluster(
     assert results[0] > 1, "No data was written to the database"
 
 
-@pytest.mark.group(1)
 @markers.juju3
-@markers.amd64_only  # TODO: remove after mysql-router-k8s arm64 stable release
 @pytest.mark.abort_on_fail
 async def test_remove_relation_and_relate(
     first_model: Model, second_model: Model, continuous_writes
 ) -> None:
     """Test removing and re-relating the two mysql clusters."""
-    logger.info("Stopping continuous writes after 5s")
-    # part 1/2 of workaround for https://github.com/canonical/mysql-k8s-operator/issues/399
-    # sleep is need to ensure there is enough time for the `continuous_writes` database be
-    # created/populated (by the fixture) before stopping the continuous writes
-    sleep(5)
-    application_unit = first_model.applications[APPLICATION_APP_NAME].units[0]
-    await juju_.run_action(application_unit, "stop-continuous-writes")
-
     logger.info("Remove async relation")
     await second_model.applications[MYSQL_APP2].remove_relation(
         f"{MYSQL_APP2}:replication", MYSQL_APP1
@@ -404,9 +379,6 @@ async def test_remove_relation_and_relate(
         ),
     )
 
-    # part 2/2 of workaround for https://github.com/canonical/mysql-k8s-operator/issues/399
-    await juju_.run_action(application_unit, "start-continuous-writes")
-
     results = await get_max_written_value(first_model, second_model)
     assert len(results) == 6, f"Expected 6 results, got {len(results)}"
     assert all(x == results[0] for x in results), "Data is not consistent across units"
diff --git a/tests/integration/high_availability/test_crash_during_setup.py b/tests/integration/high_availability/test_crash_during_setup.py
new file mode 100644
index 000000000..461bd5949
--- /dev/null
+++ b/tests/integration/high_availability/test_crash_during_setup.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python3
+# Copyright 2024 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+from pathlib import Path
+
+import pytest
+import yaml
+
+from .high_availability_helpers import CLUSTER_NAME, delete_pod, scale_application
+
+logger = logging.getLogger(__name__)
+
+METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
+APP_NAME = METADATA["name"]
+TIMEOUT = 15 * 60
+
+
+@pytest.mark.abort_on_fail
+async def test_crash_during_cluster_setup(ops_test, charm) -> None:
+    """Test primary crash during startup.
+
+    It must recover/end setup when the primary got offline.
+    """
+    config = {"cluster-name": CLUSTER_NAME, "profile": "testing"}
+    resources = {"mysql-image": METADATA["resources"]["mysql-image"]["upstream-source"]}
+
+    logger.info("Deploying 1 units of mysql-k8s")
+    mysql_application = await ops_test.model.deploy(
+        charm,
+        application_name=APP_NAME,
+        config=config,
+        resources=resources,
+        num_units=1,
+        base="ubuntu@22.04",
+        trust=True,
+    )
+
+    logger.info("Waiting for single unit to be ready")
+    await ops_test.model.block_until(lambda: mysql_application.status == "active", timeout=TIMEOUT)
+
+    # leader unit is the 1st unit
+    leader_unit = mysql_application.units[0]
+
+    logger.info("Scale to 3 units")
+    await scale_application(ops_test, APP_NAME, 3, False)
+
+    logger.info("Waiting until application enters waiting status")
+    await ops_test.model.block_until(
+        lambda: mysql_application.status == "waiting", timeout=TIMEOUT
+    )
+
+    logger.info("Deleting pod")
+    delete_pod(ops_test, leader_unit)
+
+    async with ops_test.fast_forward("60s"):
+        logger.info("Waiting until cluster is fully active")
+        await ops_test.model.wait_for_idle(
+            apps=[APP_NAME],
+            status="active",
+            raise_on_blocked=False,
+            timeout=TIMEOUT,
+            wait_for_exact_units=3,
+        )
diff --git a/tests/integration/high_availability/test_k8s_endpoints.py b/tests/integration/high_availability/test_k8s_endpoints.py
new file mode 100644
index 000000000..a98026c5b
--- /dev/null
+++ b/tests/integration/high_availability/test_k8s_endpoints.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python3
+# Copyright 2024 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+
+import pytest
+from pytest_operator.plugin import OpsTest
+
+from ..helpers import get_unit_address
+from .high_availability_helpers import (
+    deploy_and_scale_application,
+    deploy_and_scale_mysql,
+    get_endpoint_addresses,
+    relate_mysql_and_application,
+)
+
+logger = logging.getLogger(__name__)
+
+MYSQL_CLUSTER_ONE = "mysql1"
+MYSQL_CLUSTER_TWO = "mysql2"
+MYSQL_CLUSTER_NAME = "test_cluster"
+TEST_APP_ONE = "mysql-test-app1"
+TEST_APP_TWO = "mysql-test-app2"
+
+
+@pytest.mark.abort_on_fail
+async def test_labeling_of_k8s_endpoints(ops_test: OpsTest, charm):
+    """Test the labeling of k8s endpoints when apps with same cluster-name deployed."""
+    logger.info("Deploying first mysql cluster")
+    mysql_cluster_one = await deploy_and_scale_mysql(
+        ops_test,
+        charm,
+        check_for_existing_application=False,
+        mysql_application_name=MYSQL_CLUSTER_ONE,
+        cluster_name=MYSQL_CLUSTER_NAME,
+    )
+
+    logger.info("Deploying and relating test app with cluster")
+    await deploy_and_scale_application(
+        ops_test,
+        check_for_existing_application=False,
+        test_application_name=TEST_APP_ONE,
+    )
+
+    await relate_mysql_and_application(
+        ops_test,
+        mysql_application_name=MYSQL_CLUSTER_ONE,
+        application_name=TEST_APP_ONE,
+    )
+
+    logger.info("Deploying second mysql application with same cluster name")
+    mysql_cluster_two = await deploy_and_scale_mysql(
+        ops_test,
+        charm,
+        check_for_existing_application=False,
+        mysql_application_name=MYSQL_CLUSTER_TWO,
+        cluster_name=MYSQL_CLUSTER_NAME,
+    )
+
+    logger.info("Deploying and relating another test app with second cluster")
+    await deploy_and_scale_application(
+        ops_test,
+        check_for_existing_application=False,
+        test_application_name=TEST_APP_TWO,
+    )
+
+    await relate_mysql_and_application(
+        ops_test,
+        mysql_application_name=MYSQL_CLUSTER_TWO,
+        application_name=TEST_APP_TWO,
+    )
+
+    logger.info("Ensuring that the created k8s endpoints have correct addresses")
+    cluster_one_ips = [
+        await get_unit_address(ops_test, unit.name)
+        for unit in ops_test.model.applications[mysql_cluster_one].units
+    ]
+
+    cluster_one_primary_addresses = get_endpoint_addresses(
+        ops_test, f"{mysql_cluster_one}-primary"
+    )
+    cluster_one_replica_addresses = get_endpoint_addresses(
+        ops_test, f"{mysql_cluster_one}-replicas"
+    )
+
+    for primary in cluster_one_primary_addresses:
+        assert (
+            primary in cluster_one_ips
+        ), f"{primary} (not belonging to cluster 1) should not be in cluster one addresses"
+
+    assert set(cluster_one_primary_addresses + cluster_one_replica_addresses) == set(
+        cluster_one_ips
+    ), "IPs not belonging to cluster one in cluster one addresses"
+
+    cluster_two_ips = [
+        await get_unit_address(ops_test, unit.name)
+        for unit in ops_test.model.applications[mysql_cluster_two].units
+    ]
+
+    cluster_two_primary_addresses = get_endpoint_addresses(
+        ops_test, f"{mysql_cluster_two}-primary"
+    )
+    cluster_two_replica_addresses = get_endpoint_addresses(
+        ops_test, f"{mysql_cluster_two}-replicas"
+    )
+
+    for primary in cluster_two_primary_addresses:
+        assert (
+            primary in cluster_two_ips
+        ), f"{primary} (not belonging to cluster w) should not be in cluster two addresses"
+
+    assert set(cluster_two_primary_addresses + cluster_two_replica_addresses) == set(
+        cluster_two_ips
+    ), "IPs not belonging to cluster two in cluster two addresses"
diff --git a/tests/integration/high_availability/test_log_rotation.py b/tests/integration/high_availability/test_log_rotation.py
index 67bfdc2b0..2a1a43d13 100644
--- a/tests/integration/high_availability/test_log_rotation.py
+++ b/tests/integration/high_availability/test_log_rotation.py
@@ -25,7 +25,6 @@
 APP_NAME = METADATA["name"]
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_log_rotation(
     ops_test: OpsTest, highly_available_cluster, continuous_writes
@@ -36,17 +35,16 @@ async def test_log_rotation(
     when mysql-test-app runs start-continuous-writes (by logging into mysql).
     """
     unit = ops_test.model.applications[APP_NAME].units[0]
+    logger.info(f"Using unit {unit.name}")
 
     logger.info("Extending update-status-hook-interval to 60m")
     await ops_test.model.set_config({"update-status-hook-interval": "60m"})
 
-    # Exclude slowquery log files as slowquery logs are not enabled by default
-    log_types = ["error", "general", "audit"]
-    log_files = ["error.log", "general.log", "audit.log"]
+    # Exclude slow log files as slow logs are not enabled by default
+    log_types = ["error", "audit"]
+    log_files = ["error.log", "audit.log"]
     archive_directories = [
         "archive_error",
-        "archive_general",
-        "archive_slowquery",
         "archive_audit",
     ]
 
@@ -95,33 +93,13 @@ async def test_log_rotation(
     logger.info("Dispatching custom event to rotate logs")
     await dispatch_custom_event_for_logrotate(ops_test, unit.name)
 
-    logger.info("Ensuring log files and archive directories exist")
-    ls_output = await ls_in_unit(ops_test, unit.name, "/var/log/mysql/")
-
-    for file in log_files + archive_directories:
-        # audit.log can be rotated and new file not created until access to db
-        assert (
-            file in ls_output or file == "audit.log"
-        ), f"❌ unexpected files/directories in log directory: {ls_output}"
-
     logger.info("Ensuring log files were rotated")
-    # Exclude checking slowquery log rotation as slowquery logs are disabled by default
+    # Exclude checking slow log rotation as slow logs are disabled by default
     for log in set(log_types):
-        file_contents = await read_contents_from_file_in_unit(
+        file_contents = read_contents_from_file_in_unit(
             ops_test, unit, f"/var/log/mysql/{log}.log"
         )
         assert f"test {log} content" not in file_contents, f"❌ log file {log}.log not rotated"
 
         ls_output = await ls_in_unit(ops_test, unit.name, f"/var/log/mysql/archive_{log}/")
         assert len(ls_output) != 0, f"❌ archive directory is empty: {ls_output}"
-
-        rotated_file_content_exists = False
-        for filename in ls_output:
-            file_contents = await read_contents_from_file_in_unit(
-                ops_test,
-                unit,
-                f"/var/log/mysql/archive_{log}/{filename}",
-            )
-            if f"test {log} content" in file_contents:
-                rotated_file_content_exists = True
-        assert rotated_file_content_exists, f"❌ log file {log}.log not rotated"
diff --git a/tests/integration/high_availability/test_node_drain.py b/tests/integration/high_availability/test_node_drain.py
index 66d4f226f..99ca1e58d 100644
--- a/tests/integration/high_availability/test_node_drain.py
+++ b/tests/integration/high_availability/test_node_drain.py
@@ -27,7 +27,6 @@
 TIMEOUT = 30 * 60
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_pod_eviction_and_pvc_deletion(
     ops_test: OpsTest, highly_available_cluster, continuous_writes, credentials
@@ -55,7 +54,7 @@ async def test_pod_eviction_and_pvc_deletion(
     delete_pvcs(primary_pod_pvcs)
     delete_pvs(primary_pod_pvs)
 
-    async with ops_test.fast_forward():
+    async with ops_test.fast_forward("90s"):
         logger.info("Waiting for evicted primary pod to be rescheduled")
         await ops_test.model.wait_for_idle(
             apps=[mysql_application_name],
diff --git a/tests/integration/high_availability/test_replication.py b/tests/integration/high_availability/test_replication.py
deleted file mode 100644
index fd3d14f78..000000000
--- a/tests/integration/high_availability/test_replication.py
+++ /dev/null
@@ -1,249 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-
-import logging
-import time
-
-import lightkube
-import pytest
-from lightkube.resources.core_v1 import Pod
-from pytest_operator.plugin import OpsTest
-from tenacity import Retrying, stop_after_delay, wait_fixed
-
-from ..helpers import (
-    execute_queries_on_unit,
-    get_primary_unit,
-    get_server_config_credentials,
-    get_unit_address,
-    scale_application,
-)
-from .high_availability_helpers import (
-    clean_up_database_and_table,
-    deploy_and_scale_mysql,
-    ensure_all_units_continuous_writes_incrementing,
-    ensure_n_online_mysql_members,
-    get_application_name,
-    insert_data_into_mysql_and_validate_replication,
-)
-
-logger = logging.getLogger(__name__)
-
-TIMEOUT = 15 * 60
-
-
-@pytest.mark.group(1)
-@pytest.mark.abort_on_fail
-async def test_check_consistency(
-    ops_test: OpsTest, highly_available_cluster, continuous_writes, credentials
-) -> None:
-    """Test to write to primary, and read the same data back from replicas."""
-    mysql_application_name = get_application_name(ops_test, "mysql")
-
-    # assert that there are 3 units in the mysql cluster
-    assert len(ops_test.model.applications[mysql_application_name].units) == 3
-
-    database_name, table_name = "test-check-consistency", "data"
-    await insert_data_into_mysql_and_validate_replication(
-        ops_test, database_name, table_name, credentials
-    )
-    await clean_up_database_and_table(ops_test, database_name, table_name, credentials)
-
-    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
-
-
-@pytest.mark.group(2)
-@pytest.mark.abort_on_fail
-async def test_no_replication_across_clusters(
-    ops_test: OpsTest, highly_available_cluster, continuous_writes, credentials
-) -> None:
-    """Test to ensure that writes to one cluster do not replicate to another cluster."""
-    mysql_application_name = get_application_name(ops_test, "mysql")
-
-    # assert that there are 3 units in the mysql cluster
-    assert len(ops_test.model.applications[mysql_application_name].units) == 3
-
-    # deploy another mysql application cluster with the same 'cluster-name'
-    another_mysql_application_name = "another-mysql"
-    await deploy_and_scale_mysql(
-        ops_test,
-        check_for_existing_application=False,
-        mysql_application_name=another_mysql_application_name,
-        num_units=1,
-    )
-
-    # insert some data into the first/original mysql cluster
-    database_name, table_name = "test-no-replication-across-clusters", "data"
-    await insert_data_into_mysql_and_validate_replication(
-        ops_test, database_name, table_name, credentials
-    )
-
-    # ensure that the inserted data DOES NOT get replicated into the another mysql cluster
-    another_mysql_unit = ops_test.model.applications[another_mysql_application_name].units[0]
-    another_mysql_primary = await get_primary_unit(
-        ops_test, another_mysql_unit, another_mysql_application_name
-    )
-    assert another_mysql_primary
-    another_server_config_credentials = await get_server_config_credentials(another_mysql_primary)
-
-    select_databases_sql = [
-        "SELECT schema_name FROM information_schema.schemata",
-    ]
-
-    for unit in ops_test.model.applications[another_mysql_application_name].units:
-        unit_address = await get_unit_address(ops_test, unit.name)
-
-        output = execute_queries_on_unit(
-            unit_address,
-            another_server_config_credentials["username"],
-            another_server_config_credentials["password"],
-            select_databases_sql,
-        )
-
-        assert len(output) > 0
-        assert "information_schema" in output
-        assert database_name not in output
-
-    # remove another mysql application cluster
-    await scale_application(ops_test, another_mysql_application_name, 0, wait=False)
-    await ops_test.model.remove_application(
-        another_mysql_application_name,
-        block_until_done=False,
-    )
-
-    # clean up inserted data, and created tables + databases
-    await clean_up_database_and_table(ops_test, database_name, table_name, credentials)
-
-
-@pytest.mark.group(3)
-@pytest.mark.abort_on_fail
-async def test_scaling_without_data_loss(
-    ops_test: OpsTest, highly_available_cluster, credentials
-) -> None:
-    """Test to ensure that data is preserved when a unit is scaled up and then down.
-
-    Ensures that there are no running continuous writes as the extra data in the
-    database makes scaling up slower.
-    """
-    mysql_application_name = get_application_name(ops_test, "mysql")
-    assert mysql_application_name, "mysql application not found"
-
-    # assert that there are 3 units in the mysql cluster
-    assert len(ops_test.model.applications[mysql_application_name].units) == 3
-
-    mysql_unit = ops_test.model.applications[mysql_application_name].units[0]
-    primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name)
-    assert primary, "Primary unit not found"
-
-    # insert a value before scale up, and ensure that the value exists in all units
-    database_name, table_name = "test-preserves-data-on-delete", "data"
-    value_before_scale_up = await insert_data_into_mysql_and_validate_replication(
-        ops_test, database_name, table_name, credentials
-    )
-
-    select_value_before_scale_up_sql = [
-        f"SELECT id FROM `{database_name}`.`{table_name}` WHERE id = '{value_before_scale_up}'",
-    ]
-
-    # scale up the mysql application
-    await scale_application(ops_test, mysql_application_name, 4)
-    assert await ensure_n_online_mysql_members(
-        ops_test, 4
-    ), "The cluster is not fully online after scaling up"
-
-    # ensure value inserted before scale exists in all units
-    for attempt in Retrying(stop=stop_after_delay(10), wait=wait_fixed(2)):
-        with attempt:
-            for unit in ops_test.model.applications[mysql_application_name].units:
-                unit_address = await get_unit_address(ops_test, unit.name)
-
-                output = execute_queries_on_unit(
-                    unit_address,
-                    credentials["username"],
-                    credentials["password"],
-                    select_value_before_scale_up_sql,
-                )
-                assert output[0] == value_before_scale_up
-
-    # insert data after scale up
-    value_after_scale_up = await insert_data_into_mysql_and_validate_replication(
-        ops_test, database_name, table_name, credentials
-    )
-
-    # verify inserted data is present on all units
-    select_value_after_scale_up_sql = [
-        f"SELECT id FROM `{database_name}`.`{table_name}` WHERE id = '{value_after_scale_up}'",
-    ]
-
-    # scale down the mysql application
-    await scale_application(ops_test, mysql_application_name, 3)
-    assert await ensure_n_online_mysql_members(
-        ops_test, 3
-    ), "The cluster is not fully online after scaling down"
-
-    # ensure data written before scale down is persisted
-    for unit in ops_test.model.applications[mysql_application_name].units:
-        unit_address = await get_unit_address(ops_test, unit.name)
-
-        output = execute_queries_on_unit(
-            unit_address,
-            credentials["username"],
-            credentials["password"],
-            select_value_after_scale_up_sql,
-        )
-        assert output[0] == value_after_scale_up
-
-    # clean up inserted data, and created tables + databases
-    await clean_up_database_and_table(ops_test, database_name, table_name, credentials)
-
-
-@pytest.mark.group(4)
-@pytest.mark.abort_on_fail
-async def test_kill_primary_check_reelection(
-    ops_test: OpsTest, highly_available_cluster, continuous_writes, credentials
-) -> None:
-    """Test to kill the primary under load and ensure re-election of primary."""
-    mysql_application_name = get_application_name(ops_test, "mysql")
-    assert mysql_application_name, "mysql application not found"
-
-    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
-
-    mysql_unit = ops_test.model.applications[mysql_application_name].units[0]
-    primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name)
-    primary_name = primary.name
-
-    # kill the primary pod
-    client = lightkube.Client()
-    client.delete(Pod, primary.name.replace("/", "-"), namespace=ops_test.model.info.name)
-
-    time.sleep(60)
-
-    async with ops_test.fast_forward("60s"):
-        # wait for model to stabilize, k8s will re-create the killed pod
-        await ops_test.model.wait_for_idle(
-            apps=[mysql_application_name],
-            status="active",
-            raise_on_blocked=True,
-            timeout=TIMEOUT,
-            idle_period=30,
-        )
-
-        # ensure a new primary was elected
-        mysql_unit = ops_test.model.applications[mysql_application_name].units[0]
-        new_primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name)
-        new_primary_name = new_primary.name
-
-        assert primary_name != new_primary_name
-
-        # wait (and retry) until the killed pod is back online in the mysql cluster
-        assert await ensure_n_online_mysql_members(
-            ops_test, 3
-        ), "Old primary has not come back online after being killed"
-
-    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
-
-    database_name, table_name = "test-kill-primary-check-reelection", "data"
-    await insert_data_into_mysql_and_validate_replication(
-        ops_test, database_name, table_name, credentials
-    )
-    await clean_up_database_and_table(ops_test, database_name, table_name, credentials)
diff --git a/tests/integration/high_availability/test_replication_data_consistency.py b/tests/integration/high_availability/test_replication_data_consistency.py
new file mode 100644
index 000000000..99f4c6632
--- /dev/null
+++ b/tests/integration/high_availability/test_replication_data_consistency.py
@@ -0,0 +1,35 @@
+# Copyright 2022 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+
+from pytest_operator.plugin import OpsTest
+
+from .high_availability_helpers import (
+    clean_up_database_and_table,
+    ensure_all_units_continuous_writes_incrementing,
+    get_application_name,
+    insert_data_into_mysql_and_validate_replication,
+)
+
+logger = logging.getLogger(__name__)
+
+TIMEOUT = 15 * 60
+
+
+async def test_check_consistency(
+    ops_test: OpsTest, highly_available_cluster, continuous_writes, credentials
+) -> None:
+    """Test to write to primary, and read the same data back from replicas."""
+    mysql_application_name = get_application_name(ops_test, "mysql")
+
+    # assert that there are 3 units in the mysql cluster
+    assert len(ops_test.model.applications[mysql_application_name].units) == 3
+
+    database_name, table_name = "test-check-consistency", "data"
+    await insert_data_into_mysql_and_validate_replication(
+        ops_test, database_name, table_name, credentials
+    )
+    await clean_up_database_and_table(ops_test, database_name, table_name, credentials)
+
+    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
diff --git a/tests/integration/high_availability/test_replication_data_isolation.py b/tests/integration/high_availability/test_replication_data_isolation.py
new file mode 100644
index 000000000..820753df1
--- /dev/null
+++ b/tests/integration/high_availability/test_replication_data_isolation.py
@@ -0,0 +1,86 @@
+# Copyright 2022 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+
+from pytest_operator.plugin import OpsTest
+
+from ..helpers import (
+    execute_queries_on_unit,
+    get_primary_unit,
+    get_server_config_credentials,
+    get_unit_address,
+    scale_application,
+)
+from .high_availability_helpers import (
+    clean_up_database_and_table,
+    deploy_and_scale_mysql,
+    get_application_name,
+    insert_data_into_mysql_and_validate_replication,
+)
+
+logger = logging.getLogger(__name__)
+
+TIMEOUT = 15 * 60
+
+
+async def test_no_replication_across_clusters(
+    ops_test: OpsTest, charm, highly_available_cluster, continuous_writes, credentials
+) -> None:
+    """Test to ensure that writes to one cluster do not replicate to another cluster."""
+    mysql_application_name = get_application_name(ops_test, "mysql")
+
+    # assert that there are 3 units in the mysql cluster
+    assert len(ops_test.model.applications[mysql_application_name].units) == 3
+
+    # deploy another mysql application cluster with the same 'cluster-name'
+    another_mysql_application_name = "another-mysql"
+    await deploy_and_scale_mysql(
+        ops_test,
+        charm,
+        check_for_existing_application=False,
+        mysql_application_name=another_mysql_application_name,
+        num_units=1,
+    )
+
+    # insert some data into the first/original mysql cluster
+    database_name, table_name = "test-no-replication-across-clusters", "data"
+    await insert_data_into_mysql_and_validate_replication(
+        ops_test, database_name, table_name, credentials
+    )
+
+    # ensure that the inserted data DOES NOT get replicated into the another mysql cluster
+    another_mysql_unit = ops_test.model.applications[another_mysql_application_name].units[0]
+    another_mysql_primary = await get_primary_unit(
+        ops_test, another_mysql_unit, another_mysql_application_name
+    )
+    assert another_mysql_primary
+    another_server_config_credentials = await get_server_config_credentials(another_mysql_primary)
+
+    select_databases_sql = [
+        "SELECT schema_name FROM information_schema.schemata",
+    ]
+
+    for unit in ops_test.model.applications[another_mysql_application_name].units:
+        unit_address = await get_unit_address(ops_test, unit.name)
+
+        output = execute_queries_on_unit(
+            unit_address,
+            another_server_config_credentials["username"],
+            another_server_config_credentials["password"],
+            select_databases_sql,
+        )
+
+        assert len(output) > 0
+        assert "information_schema" in output
+        assert database_name not in output
+
+    # remove another mysql application cluster
+    await scale_application(ops_test, another_mysql_application_name, 0, wait=False)
+    await ops_test.model.remove_application(
+        another_mysql_application_name,
+        block_until_done=False,
+    )
+
+    # clean up inserted data, and created tables + databases
+    await clean_up_database_and_table(ops_test, database_name, table_name, credentials)
diff --git a/tests/integration/high_availability/test_replication_reelection.py b/tests/integration/high_availability/test_replication_reelection.py
new file mode 100644
index 000000000..53d194071
--- /dev/null
+++ b/tests/integration/high_availability/test_replication_reelection.py
@@ -0,0 +1,74 @@
+# Copyright 2022 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+import time
+
+import lightkube
+from lightkube.resources.core_v1 import Pod
+from pytest_operator.plugin import OpsTest
+
+from ..helpers import (
+    get_primary_unit,
+)
+from .high_availability_helpers import (
+    clean_up_database_and_table,
+    ensure_all_units_continuous_writes_incrementing,
+    ensure_n_online_mysql_members,
+    get_application_name,
+    insert_data_into_mysql_and_validate_replication,
+)
+
+logger = logging.getLogger(__name__)
+
+TIMEOUT = 15 * 60
+
+
+async def test_kill_primary_check_reelection(
+    ops_test: OpsTest, highly_available_cluster, continuous_writes, credentials
+) -> None:
+    """Test to kill the primary under load and ensure re-election of primary."""
+    mysql_application_name = get_application_name(ops_test, "mysql")
+    assert mysql_application_name, "mysql application not found"
+
+    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
+
+    mysql_unit = ops_test.model.applications[mysql_application_name].units[0]
+    primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name)
+    primary_name = primary.name
+
+    # kill the primary pod
+    client = lightkube.Client()
+    client.delete(Pod, primary.name.replace("/", "-"), namespace=ops_test.model.info.name)
+
+    time.sleep(60)
+
+    async with ops_test.fast_forward("60s"):
+        # wait for model to stabilize, k8s will re-create the killed pod
+        await ops_test.model.wait_for_idle(
+            apps=[mysql_application_name],
+            status="active",
+            raise_on_blocked=True,
+            timeout=TIMEOUT,
+            idle_period=30,
+        )
+
+        # ensure a new primary was elected
+        mysql_unit = ops_test.model.applications[mysql_application_name].units[0]
+        new_primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name)
+        new_primary_name = new_primary.name
+
+        assert primary_name != new_primary_name
+
+        # wait (and retry) until the killed pod is back online in the mysql cluster
+        assert await ensure_n_online_mysql_members(
+            ops_test, 3
+        ), "Old primary has not come back online after being killed"
+
+    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
+
+    database_name, table_name = "test-kill-primary-check-reelection", "data"
+    await insert_data_into_mysql_and_validate_replication(
+        ops_test, database_name, table_name, credentials
+    )
+    await clean_up_database_and_table(ops_test, database_name, table_name, credentials)
diff --git a/tests/integration/high_availability/test_replication_scaling.py b/tests/integration/high_availability/test_replication_scaling.py
new file mode 100644
index 000000000..e25f69b9b
--- /dev/null
+++ b/tests/integration/high_availability/test_replication_scaling.py
@@ -0,0 +1,104 @@
+# Copyright 2022 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+
+from pytest_operator.plugin import OpsTest
+from tenacity import Retrying, stop_after_delay, wait_fixed
+
+from ..helpers import (
+    execute_queries_on_unit,
+    get_primary_unit,
+    get_unit_address,
+    scale_application,
+)
+from .high_availability_helpers import (
+    clean_up_database_and_table,
+    ensure_n_online_mysql_members,
+    get_application_name,
+    insert_data_into_mysql_and_validate_replication,
+)
+
+logger = logging.getLogger(__name__)
+
+TIMEOUT = 15 * 60
+
+
+async def test_scaling_without_data_loss(
+    ops_test: OpsTest, highly_available_cluster, credentials
+) -> None:
+    """Test to ensure that data is preserved when a unit is scaled up and then down.
+
+    Ensures that there are no running continuous writes as the extra data in the
+    database makes scaling up slower.
+    """
+    mysql_application_name = get_application_name(ops_test, "mysql")
+    assert mysql_application_name, "mysql application not found"
+
+    # assert that there are 3 units in the mysql cluster
+    assert len(ops_test.model.applications[mysql_application_name].units) == 3
+
+    mysql_unit = ops_test.model.applications[mysql_application_name].units[0]
+    primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name)
+    assert primary, "Primary unit not found"
+
+    # insert a value before scale up, and ensure that the value exists in all units
+    database_name, table_name = "test-preserves-data-on-delete", "data"
+    value_before_scale_up = await insert_data_into_mysql_and_validate_replication(
+        ops_test, database_name, table_name, credentials
+    )
+
+    select_value_before_scale_up_sql = [
+        f"SELECT id FROM `{database_name}`.`{table_name}` WHERE id = '{value_before_scale_up}'",
+    ]
+
+    # scale up the mysql application
+    await scale_application(ops_test, mysql_application_name, 4)
+    assert await ensure_n_online_mysql_members(
+        ops_test, 4
+    ), "The cluster is not fully online after scaling up"
+
+    # ensure value inserted before scale exists in all units
+    for attempt in Retrying(stop=stop_after_delay(10), wait=wait_fixed(2)):
+        with attempt:
+            for unit in ops_test.model.applications[mysql_application_name].units:
+                unit_address = await get_unit_address(ops_test, unit.name)
+
+                output = execute_queries_on_unit(
+                    unit_address,
+                    credentials["username"],
+                    credentials["password"],
+                    select_value_before_scale_up_sql,
+                )
+                assert output[0] == value_before_scale_up
+
+    # insert data after scale up
+    value_after_scale_up = await insert_data_into_mysql_and_validate_replication(
+        ops_test, database_name, table_name, credentials
+    )
+
+    # verify inserted data is present on all units
+    select_value_after_scale_up_sql = [
+        f"SELECT id FROM `{database_name}`.`{table_name}` WHERE id = '{value_after_scale_up}'",
+    ]
+
+    # scale down the mysql application
+    await scale_application(ops_test, mysql_application_name, 3)
+    assert await ensure_n_online_mysql_members(
+        ops_test, 3
+    ), "The cluster is not fully online after scaling down"
+
+    # ensure data written before scale down is persisted
+    for unit in ops_test.model.applications[mysql_application_name].units:
+        unit_address = await get_unit_address(ops_test, unit.name)
+
+        output = execute_queries_on_unit(
+            unit_address,
+            credentials["username"],
+            credentials["password"],
+            select_value_after_scale_up_sql,
+        )
+        assert output[0] == value_after_scale_up
+
+    # clean up inserted data, and created tables + databases
+    await clean_up_database_and_table(ops_test, database_name, table_name, credentials)
diff --git a/tests/integration/high_availability/test_self_healing.py b/tests/integration/high_availability/test_self_healing.py
deleted file mode 100644
index d27cd73dd..000000000
--- a/tests/integration/high_availability/test_self_healing.py
+++ /dev/null
@@ -1,501 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-
-import logging
-import time
-
-import lightkube
-import pytest
-from lightkube.resources.core_v1 import Pod
-from pytest_operator.plugin import OpsTest
-from tenacity import Retrying, stop_after_delay, wait_fixed
-
-from ..helpers import (
-    get_cluster_status,
-    get_primary_unit,
-    get_process_pid,
-    scale_application,
-    start_mysqld_service,
-    stop_mysqld_service,
-)
-from .high_availability_helpers import (
-    clean_up_database_and_table,
-    ensure_all_units_continuous_writes_incrementing,
-    ensure_n_online_mysql_members,
-    ensure_process_not_running,
-    get_application_name,
-    get_process_stat,
-    insert_data_into_mysql_and_validate_replication,
-    isolate_instance_from_cluster,
-    remove_instance_isolation,
-    send_signal_to_pod_container_process,
-    wait_until_units_in_status,
-)
-
-logger = logging.getLogger(__name__)
-
-MYSQL_CONTAINER_NAME = "mysql"
-MYSQLD_PROCESS_NAME = "mysqld"
-TIMEOUT = 40 * 60
-
-
-@pytest.mark.group(1)
-@pytest.mark.abort_on_fail
-async def test_kill_db_process(
-    ops_test: OpsTest, highly_available_cluster, continuous_writes, credentials
-) -> None:
-    """Test to send a SIGKILL to the primary db process and ensure that the cluster self heals."""
-    mysql_application_name = get_application_name(ops_test, "mysql")
-
-    logger.info("Waiting until 3 mysql instances are online")
-    # ensure all units in the cluster are online
-    assert await ensure_n_online_mysql_members(
-        ops_test, 3
-    ), "The deployed mysql application is not fully online"
-
-    logger.info("Ensuring all units have continuous writes incrementing")
-    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
-
-    mysql_unit = ops_test.model.applications[mysql_application_name].units[0]
-    primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name)
-
-    mysql_pid = await get_process_pid(
-        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
-    )
-
-    logger.info(f"Sending SIGKILL to unit {primary.name}")
-    await send_signal_to_pod_container_process(
-        ops_test.model.info.name,
-        primary.name,
-        MYSQL_CONTAINER_NAME,
-        MYSQLD_PROCESS_NAME,
-        "SIGKILL",
-    )
-
-    # Wait for the SIGKILL above to take effect before continuing with test checks
-    time.sleep(10)
-
-    logger.info("Waiting until 3 mysql instances are online")
-    assert await ensure_n_online_mysql_members(
-        ops_test, 3
-    ), "The mysql application is not fully online after sending SIGKILL to primary"
-
-    # ensure that the mysqld process got restarted and has a new process id
-    new_mysql_pid = await get_process_pid(
-        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
-    )
-    assert (
-        mysql_pid != new_mysql_pid
-    ), "The mysql process id is the same after sending it a SIGKILL"
-
-    new_primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name)
-    assert (
-        primary.name != new_primary.name
-    ), "The mysql primary has not been reelected after sending a SIGKILL"
-
-    logger.info("Ensuring all units have continuous writes incrementing")
-    # ensure continuous writes still incrementing for all units
-    async with ops_test.fast_forward():
-        await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
-
-    # ensure that we are able to insert data into the primary and have it replicated to all units
-    database_name, table_name = "test-kill-db-process", "data"
-    await insert_data_into_mysql_and_validate_replication(
-        ops_test, database_name, table_name, credentials
-    )
-    await clean_up_database_and_table(ops_test, database_name, table_name, credentials)
-
-
-@pytest.mark.group(2)
-@pytest.mark.abort_on_fail
-@pytest.mark.unstable
-async def test_freeze_db_process(
-    ops_test: OpsTest, highly_available_cluster, continuous_writes, credentials
-) -> None:
-    """Test to send a SIGSTOP to the primary db process and ensure that the cluster self heals."""
-    mysql_application_name = get_application_name(ops_test, "mysql")
-    assert mysql_application_name, "mysql application name is not set"
-
-    # ensure all units in the cluster are online
-    assert await ensure_n_online_mysql_members(
-        ops_test, 3
-    ), "The deployed mysql application is not fully online"
-
-    logger.info("Ensuring that all units continuous writes incrementing")
-    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
-
-    mysql_unit = ops_test.model.applications[mysql_application_name].units[0]
-    primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name)
-
-    mysql_pid = await get_process_pid(
-        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
-    )
-    assert (mysql_pid or -1) > 0, "mysql process id is not positive"
-
-    logger.info(f"Sending SIGSTOP to unit {primary.name}")
-    await send_signal_to_pod_container_process(
-        ops_test.model.info.name,
-        primary.name,
-        MYSQL_CONTAINER_NAME,
-        MYSQLD_PROCESS_NAME,
-        "SIGSTOP",
-    )
-
-    # ensure that the mysqld process is stopped after receiving the sigstop
-    # T = stopped by job control signal
-    # (see https://man7.org/linux/man-pages/man1/ps.1.html under PROCESS STATE CODES)
-    mysql_process_stat_after_sigstop = await get_process_stat(
-        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
-    )
-    assert "T" in mysql_process_stat_after_sigstop, "mysql process is not stopped after sigstop"
-
-    remaining_online_units = [
-        unit
-        for unit in ops_test.model.applications[mysql_application_name].units
-        if unit.name != primary.name
-    ]
-
-    logger.info("Waiting for new primary to be elected")
-
-    # retring as it may take time for the cluster to recognize that the primary process is stopped
-    for attempt in Retrying(stop=stop_after_delay(15 * 60), wait=wait_fixed(10)):
-        with attempt:
-            assert await ensure_n_online_mysql_members(
-                ops_test, 2, remaining_online_units
-            ), "The deployed mysql application does not have two online nodes"
-
-            new_primary = await get_primary_unit(
-                ops_test, remaining_online_units[0], mysql_application_name
-            )
-            assert primary.name != new_primary.name, "new mysql primary was not elected"
-
-    logger.info("Ensuring all remaining units continuous writes incrementing")
-
-    async with ops_test.fast_forward():
-        for attempt in Retrying(stop=stop_after_delay(15 * 60), wait=wait_fixed(10)):
-            with attempt:
-                await ensure_all_units_continuous_writes_incrementing(
-                    ops_test, credentials=credentials, mysql_units=remaining_online_units
-                )
-
-    logger.info(f"Sending SIGCONT to {primary.name}")
-    await send_signal_to_pod_container_process(
-        ops_test.model.info.name,
-        primary.name,
-        MYSQL_CONTAINER_NAME,
-        MYSQLD_PROCESS_NAME,
-        "SIGCONT",
-    )
-
-    # ensure that the mysqld process has started after receiving the sigstop
-    # T = stopped by job control signal
-    # R = running or runnable
-    # S = interruptible sleep
-    # I = idle kernel thread
-    # (see https://man7.org/linux/man-pages/man1/ps.1.html under PROCESS STATE CODES)
-    mysql_process_stat_after_sigcont = await get_process_stat(
-        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
-    )
-    assert (
-        "T" not in mysql_process_stat_after_sigcont
-    ), "mysql process is not started after sigcont"
-    assert (
-        "R" in mysql_process_stat_after_sigcont
-        or "S" in mysql_process_stat_after_sigcont
-        or "I" in mysql_process_stat_after_sigcont
-    ), "mysql process not running or sleeping after sigcont"
-
-    new_mysql_pid = await get_process_pid(
-        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
-    )
-    assert (
-        new_mysql_pid == mysql_pid
-    ), "mysql process id is not the same as it was before process was stopped"
-
-    # wait for possible recovery of the old primary
-    async with ops_test.fast_forward("60s"):
-        await ops_test.model.wait_for_idle(
-            apps=[mysql_application_name],
-            status="active",
-            raise_on_blocked=False,
-            timeout=TIMEOUT,
-        )
-
-    logger.info("Ensuring that there are 3 online mysql members")
-    assert await ensure_n_online_mysql_members(
-        ops_test, 3, remaining_online_units
-    ), "The deployed mysql application does not have three online nodes"
-
-    logger.info("Ensure all units continuous writes incrementing")
-    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
-
-
-@pytest.mark.group(3)
-@pytest.mark.abort_on_fail
-async def test_graceful_crash_of_primary(
-    ops_test: OpsTest, highly_available_cluster, continuous_writes, credentials
-) -> None:
-    """Test to send SIGTERM to primary instance and then verify recovery."""
-    mysql_application_name = get_application_name(ops_test, "mysql")
-
-    assert mysql_application_name, "mysql application name is not set"
-
-    logger.info("Ensuring that there are 3 online mysql members")
-    assert await ensure_n_online_mysql_members(
-        ops_test, 3
-    ), "The deployed mysql application does not have three online nodes"
-
-    logger.info("Ensuring that all units have incrementing continuous writes")
-    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
-
-    mysql_unit = ops_test.model.applications[mysql_application_name].units[0]
-    primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name)
-
-    mysql_pid = await get_process_pid(
-        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
-    )
-
-    logger.info(f"Sending SIGTERM to unit {primary.name}")
-    await send_signal_to_pod_container_process(
-        ops_test.model.info.name,
-        primary.name,
-        MYSQL_CONTAINER_NAME,
-        MYSQLD_PROCESS_NAME,
-        "SIGTERM",
-    )
-
-    new_mysql_pid = await get_process_pid(
-        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
-    )
-    assert (
-        new_mysql_pid == mysql_pid
-    ), "mysql process id is not the same as it was before process was stopped"
-
-    remaining_online_units = [
-        unit
-        for unit in ops_test.model.applications[mysql_application_name].units
-        if unit.name != primary.name
-    ]
-
-    logger.info("Waiting until there are 3 online mysql instances again")
-    # retrying as it may take time for the cluster to recognize that the primary process is stopped
-    for attempt in Retrying(stop=stop_after_delay(2 * 60), wait=wait_fixed(10)):
-        with attempt:
-            assert await ensure_n_online_mysql_members(
-                ops_test, 3
-            ), "The deployed mysql application does not have three online nodes"
-
-            new_primary = await get_primary_unit(
-                ops_test, remaining_online_units[0], mysql_application_name
-            )
-            assert primary.name != new_primary.name, "new mysql primary was not elected"
-
-    logger.info("Ensuring all instances have incrementing continuous writes")
-    async with ops_test.fast_forward():
-        for attempt in Retrying(stop=stop_after_delay(60), wait=wait_fixed(10)):
-            with attempt:
-                await ensure_all_units_continuous_writes_incrementing(
-                    ops_test, credentials=credentials
-                )
-
-
-@pytest.mark.group(4)
-@pytest.mark.abort_on_fail
-async def test_network_cut_affecting_an_instance(
-    ops_test: OpsTest, highly_available_cluster, continuous_writes, chaos_mesh, credentials
-) -> None:
-    """Test for a network cut affecting an instance."""
-    mysql_application_name = get_application_name(ops_test, "mysql")
-    assert mysql_application_name, "mysql application name is not set"
-
-    logger.info("Ensuring that there are 3 online mysql members")
-    assert await ensure_n_online_mysql_members(
-        ops_test, 3
-    ), "The deployed mysql application does not have three online nodes"
-
-    logger.info("Ensuring that all instances have incrementing continuous writes")
-    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
-
-    mysql_units = ops_test.model.applications[mysql_application_name].units
-    primary = await get_primary_unit(ops_test, mysql_units[0], mysql_application_name)
-
-    assert primary is not None, "No primary unit found"
-
-    logger.info(
-        f"Creating networkchaos policy to isolate instance {primary.name} from the cluster"
-    )
-    isolate_instance_from_cluster(ops_test, primary.name)
-
-    remaining_units = [unit for unit in mysql_units if unit.name != primary.name]
-
-    logger.info("Wait until MySQL GR actually detects isolated instance")
-    await wait_until_units_in_status(ops_test, [primary], remaining_units[0], "(missing)")
-    await wait_until_units_in_status(ops_test, remaining_units, remaining_units[0], "online")
-
-    cluster_status = await get_cluster_status(remaining_units[0])
-
-    isolated_primary_status, isolated_primary_memberrole = [
-        (member["status"], member["memberrole"])
-        for label, member in cluster_status["defaultreplicaset"]["topology"].items()
-        if label == primary.name.replace("/", "-")
-    ][0]
-    assert isolated_primary_status == "(missing)"
-    assert isolated_primary_memberrole == "secondary"
-
-    new_primary = await get_primary_unit(ops_test, remaining_units[0], mysql_application_name)
-    assert primary.name != new_primary.name
-
-    logger.info("Ensure all units have incrementing continuous writes")
-    await ensure_all_units_continuous_writes_incrementing(
-        ops_test, credentials=credentials, mysql_units=remaining_units
-    )
-
-    logger.info("Remove networkchaos policy isolating instance from cluster")
-    remove_instance_isolation(ops_test)
-
-    async with ops_test.fast_forward():
-        logger.info("Wait until returning instance enters recovery")
-        await ops_test.model.block_until(
-            lambda: primary.workload_status != "active", timeout=TIMEOUT
-        )
-        logger.info("Wait until returning instance become active")
-        await ops_test.model.block_until(
-            lambda: primary.workload_status == "active", timeout=TIMEOUT
-        )
-
-    logger.info("Wait until all units are online")
-    await wait_until_units_in_status(ops_test, mysql_units, mysql_units[0], "online")
-
-    new_cluster_status = await get_cluster_status(mysql_units[0])
-
-    logger.info("Ensure isolated instance is now secondary")
-    isolated_primary_status, isolated_primary_memberrole = [
-        (member["status"], member["memberrole"])
-        for label, member in new_cluster_status["defaultreplicaset"]["topology"].items()
-        if label == primary.name.replace("/", "-")
-    ][0]
-    assert isolated_primary_status == "online"
-    assert isolated_primary_memberrole == "secondary"
-
-    logger.info("Ensure there are 3 online mysql members")
-    assert await ensure_n_online_mysql_members(
-        ops_test, 3
-    ), "The deployed mysql application does not have three online nodes"
-
-    logger.info("Ensure all units have incrementing continuous writes")
-    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
-
-
-@pytest.mark.group(5)
-@pytest.mark.abort_on_fail
-@pytest.mark.unstable
-async def test_graceful_full_cluster_crash_test(
-    ops_test: OpsTest, highly_available_cluster, continuous_writes, credentials
-) -> None:
-    """Test to send SIGTERM to all units and then ensure that the cluster recovers."""
-    mysql_application_name = get_application_name(ops_test, "mysql")
-    assert mysql_application_name, "mysql application name is not set"
-
-    logger.info("Ensure there are 3 online mysql members")
-    assert await ensure_n_online_mysql_members(
-        ops_test, 3
-    ), "The deployed mysql application does not have three online nodes"
-
-    logger.info("Ensure that all units have incrementing continuous writes")
-    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
-
-    mysql_units = ops_test.model.applications[mysql_application_name].units
-
-    unit_mysqld_pids = {}
-    logger.info("Get mysqld pids on all instances")
-    for unit in mysql_units:
-        pid = await get_process_pid(ops_test, unit.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME)
-        assert (pid or -1) > 1, "mysql process id is not known/positive"
-
-        unit_mysqld_pids[unit.name] = pid
-
-    for unit in mysql_units:
-        logger.info(f"Stopping mysqld on {unit.name}")
-        await stop_mysqld_service(ops_test, unit.name)
-
-    logger.info("Wait until mysqld stopped on all instances")
-    for attempt in Retrying(stop=stop_after_delay(300), wait=wait_fixed(30)):
-        with attempt:
-            for unit in mysql_units:
-                await ensure_process_not_running(
-                    ops_test, unit.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
-                )
-    for unit in mysql_units:
-        logger.info(f"Starting mysqld on {unit.name}")
-        await start_mysqld_service(ops_test, unit.name)
-
-    async with ops_test.fast_forward("60s"):
-        logger.info("Block until all in maintenance/offline")
-        await ops_test.model.block_until(
-            lambda: all(unit.workload_status == "maintenance" for unit in mysql_units),
-            timeout=TIMEOUT,
-        )
-
-        logger.info("Wait all members to recover")
-        await ops_test.model.wait_for_idle(
-            apps=[mysql_application_name],
-            status="active",
-            raise_on_blocked=False,
-            timeout=TIMEOUT,
-            idle_period=30,
-        )
-
-    for unit in mysql_units:
-        new_pid = await get_process_pid(
-            ops_test, unit.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
-        )
-        assert new_pid > unit_mysqld_pids[unit.name], "The mysqld process did not restart"
-
-    cluster_status = await get_cluster_status(mysql_units[0])
-    for member in cluster_status["defaultreplicaset"]["topology"].values():
-        assert member["status"] == "online"
-
-    logger.info("Ensure all units have incrementing continuous writes")
-    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
-
-
-@pytest.mark.group(6)
-@pytest.mark.abort_on_fail
-async def test_single_unit_pod_delete(
-    ops_test: OpsTest, highly_available_cluster, credentials
-) -> None:
-    """Delete the pod in a single unit deployment and write data to new pod."""
-    mysql_application_name = get_application_name(ops_test, "mysql")
-    assert mysql_application_name, "mysql application name is not set"
-
-    logger.info("Scale mysql application to 1 unit that is active")
-    async with ops_test.fast_forward("60s"):
-        await scale_application(ops_test, mysql_application_name, 1)
-    unit = ops_test.model.applications[mysql_application_name].units[0]
-    assert unit.workload_status == "active"
-
-    logger.info("Delete pod for the the mysql unit")
-    client = lightkube.Client()
-    client.delete(Pod, unit.name.replace("/", "-"), namespace=ops_test.model.info.name)
-
-    logger.info("Wait for a new pod to be created by k8s")
-    async with ops_test.fast_forward("60s"):
-        await ops_test.model.wait_for_idle(
-            apps=[mysql_application_name],
-            status="active",
-            raise_on_blocked=True,
-            timeout=TIMEOUT,
-            idle_period=30,
-        )
-
-    logger.info("Write data to unit and verify that data was written")
-    database_name, table_name = "test-single-pod-delete", "data"
-    await insert_data_into_mysql_and_validate_replication(
-        ops_test,
-        database_name=database_name,
-        table_name=table_name,
-        credentials=credentials,
-        mysql_application_substring="mysql-k8s",
-    )
-    await clean_up_database_and_table(ops_test, database_name, table_name, credentials)
diff --git a/tests/integration/high_availability/test_self_healing_network_cut.py b/tests/integration/high_availability/test_self_healing_network_cut.py
new file mode 100644
index 000000000..ae3f7cc0d
--- /dev/null
+++ b/tests/integration/high_availability/test_self_healing_network_cut.py
@@ -0,0 +1,110 @@
+# Copyright 2022 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+
+from pytest_operator.plugin import OpsTest
+
+from ..helpers import (
+    get_cluster_status,
+    get_primary_unit,
+)
+from .high_availability_helpers import (
+    ensure_all_units_continuous_writes_incrementing,
+    ensure_n_online_mysql_members,
+    get_application_name,
+    isolate_instance_from_cluster,
+    remove_instance_isolation,
+    wait_until_units_in_status,
+)
+
+logger = logging.getLogger(__name__)
+
+MYSQL_CONTAINER_NAME = "mysql"
+MYSQLD_PROCESS_NAME = "mysqld"
+TIMEOUT = 40 * 60
+
+
+async def test_network_cut_affecting_an_instance(
+    ops_test: OpsTest, highly_available_cluster, continuous_writes, chaos_mesh, credentials
+) -> None:
+    """Test for a network cut affecting an instance."""
+    mysql_application_name = get_application_name(ops_test, "mysql")
+    assert mysql_application_name, "mysql application name is not set"
+
+    logger.info("Ensuring that there are 3 online mysql members")
+    assert await ensure_n_online_mysql_members(
+        ops_test, 3
+    ), "The deployed mysql application does not have three online nodes"
+
+    logger.info("Ensuring that all instances have incrementing continuous writes")
+    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
+
+    mysql_units = ops_test.model.applications[mysql_application_name].units
+    primary = await get_primary_unit(ops_test, mysql_units[0], mysql_application_name)
+
+    assert primary is not None, "No primary unit found"
+
+    logger.info(
+        f"Creating networkchaos policy to isolate instance {primary.name} from the cluster"
+    )
+    isolate_instance_from_cluster(ops_test, primary.name)
+
+    remaining_units = [unit for unit in mysql_units if unit.name != primary.name]
+
+    logger.info("Wait until MySQL GR actually detects isolated instance")
+    await wait_until_units_in_status(ops_test, [primary], remaining_units[0], "(missing)")
+    await wait_until_units_in_status(ops_test, remaining_units, remaining_units[0], "online")
+
+    cluster_status = await get_cluster_status(remaining_units[0])
+
+    isolated_primary_status, isolated_primary_memberrole = [
+        (member["status"], member["memberrole"])
+        for label, member in cluster_status["defaultreplicaset"]["topology"].items()
+        if label == primary.name.replace("/", "-")
+    ][0]
+    assert isolated_primary_status == "(missing)"
+    assert isolated_primary_memberrole == "secondary"
+
+    new_primary = await get_primary_unit(ops_test, remaining_units[0], mysql_application_name)
+    assert primary.name != new_primary.name
+
+    logger.info("Ensure all units have incrementing continuous writes")
+    await ensure_all_units_continuous_writes_incrementing(
+        ops_test, credentials=credentials, mysql_units=remaining_units
+    )
+
+    logger.info("Remove networkchaos policy isolating instance from cluster")
+    remove_instance_isolation(ops_test)
+
+    async with ops_test.fast_forward():
+        logger.info("Wait until returning instance enters recovery")
+        await ops_test.model.block_until(
+            lambda: primary.workload_status != "active", timeout=TIMEOUT
+        )
+        logger.info("Wait until returning instance become active")
+        await ops_test.model.block_until(
+            lambda: primary.workload_status == "active", timeout=TIMEOUT
+        )
+
+    logger.info("Wait until all units are online")
+    await wait_until_units_in_status(ops_test, mysql_units, mysql_units[0], "online")
+
+    new_cluster_status = await get_cluster_status(mysql_units[0])
+
+    logger.info("Ensure isolated instance is now secondary")
+    isolated_primary_status, isolated_primary_memberrole = [
+        (member["status"], member["memberrole"])
+        for label, member in new_cluster_status["defaultreplicaset"]["topology"].items()
+        if label == primary.name.replace("/", "-")
+    ][0]
+    assert isolated_primary_status == "online"
+    assert isolated_primary_memberrole == "secondary"
+
+    logger.info("Ensure there are 3 online mysql members")
+    assert await ensure_n_online_mysql_members(
+        ops_test, 3
+    ), "The deployed mysql application does not have three online nodes"
+
+    logger.info("Ensure all units have incrementing continuous writes")
+    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
diff --git a/tests/integration/high_availability/test_self_healing_pod.py b/tests/integration/high_availability/test_self_healing_pod.py
new file mode 100644
index 000000000..e08b8dd78
--- /dev/null
+++ b/tests/integration/high_availability/test_self_healing_pod.py
@@ -0,0 +1,62 @@
+# Copyright 2022 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+
+import lightkube
+from lightkube.resources.core_v1 import Pod
+from pytest_operator.plugin import OpsTest
+
+from ..helpers import (
+    scale_application,
+)
+from .high_availability_helpers import (
+    clean_up_database_and_table,
+    get_application_name,
+    insert_data_into_mysql_and_validate_replication,
+)
+
+logger = logging.getLogger(__name__)
+
+MYSQL_CONTAINER_NAME = "mysql"
+MYSQLD_PROCESS_NAME = "mysqld"
+TIMEOUT = 40 * 60
+
+
+async def test_single_unit_pod_delete(
+    ops_test: OpsTest, highly_available_cluster, credentials
+) -> None:
+    """Delete the pod in a single unit deployment and write data to new pod."""
+    mysql_application_name = get_application_name(ops_test, "mysql")
+    assert mysql_application_name, "mysql application name is not set"
+
+    logger.info("Scale mysql application to 1 unit that is active")
+    async with ops_test.fast_forward("60s"):
+        await scale_application(ops_test, mysql_application_name, 1)
+    unit = ops_test.model.applications[mysql_application_name].units[0]
+    assert unit.workload_status == "active"
+
+    logger.info("Delete pod for the the mysql unit")
+    client = lightkube.Client()
+    client.delete(Pod, unit.name.replace("/", "-"), namespace=ops_test.model.info.name)
+
+    logger.info("Wait for a new pod to be created by k8s")
+    async with ops_test.fast_forward("60s"):
+        await ops_test.model.wait_for_idle(
+            apps=[mysql_application_name],
+            status="active",
+            raise_on_blocked=True,
+            timeout=TIMEOUT,
+            idle_period=30,
+        )
+
+    logger.info("Write data to unit and verify that data was written")
+    database_name, table_name = "test-single-pod-delete", "data"
+    await insert_data_into_mysql_and_validate_replication(
+        ops_test,
+        database_name=database_name,
+        table_name=table_name,
+        credentials=credentials,
+        mysql_application_substring="mysql-k8s",
+    )
+    await clean_up_database_and_table(ops_test, database_name, table_name, credentials)
diff --git a/tests/integration/high_availability/test_self_healing_process_frozen.py b/tests/integration/high_availability/test_self_healing_process_frozen.py
new file mode 100644
index 000000000..5e137693f
--- /dev/null
+++ b/tests/integration/high_availability/test_self_healing_process_frozen.py
@@ -0,0 +1,146 @@
+# Copyright 2022 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+
+from pytest_operator.plugin import OpsTest
+from tenacity import Retrying, stop_after_delay, wait_fixed
+
+from ..helpers import (
+    get_primary_unit,
+    get_process_pid,
+)
+from .high_availability_helpers import (
+    ensure_all_units_continuous_writes_incrementing,
+    ensure_n_online_mysql_members,
+    get_application_name,
+    get_process_stat,
+    send_signal_to_pod_container_process,
+)
+
+logger = logging.getLogger(__name__)
+
+MYSQL_CONTAINER_NAME = "mysql"
+MYSQLD_PROCESS_NAME = "mysqld"
+TIMEOUT = 40 * 60
+
+
+async def test_freeze_db_process(
+    ops_test: OpsTest, highly_available_cluster, continuous_writes, credentials
+) -> None:
+    """Test to send a SIGSTOP to the primary db process and ensure that the cluster self heals."""
+    mysql_application_name = get_application_name(ops_test, "mysql")
+    assert mysql_application_name, "mysql application name is not set"
+
+    # ensure all units in the cluster are online
+    assert await ensure_n_online_mysql_members(
+        ops_test, 3
+    ), "The deployed mysql application is not fully online"
+
+    logger.info("Ensuring that all units continuous writes incrementing")
+    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
+
+    mysql_unit = ops_test.model.applications[mysql_application_name].units[0]
+    primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name)
+
+    mysql_pid = await get_process_pid(
+        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
+    )
+    assert (mysql_pid or -1) > 0, "mysql process id is not positive"
+
+    logger.info(f"Sending SIGSTOP to unit {primary.name}")
+    await send_signal_to_pod_container_process(
+        ops_test.model.info.name,
+        primary.name,
+        MYSQL_CONTAINER_NAME,
+        MYSQLD_PROCESS_NAME,
+        "SIGSTOP",
+    )
+
+    # ensure that the mysqld process is stopped after receiving the sigstop
+    # T = stopped by job control signal
+    # (see https://man7.org/linux/man-pages/man1/ps.1.html under PROCESS STATE CODES)
+    mysql_process_stat_after_sigstop = await get_process_stat(
+        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
+    )
+    assert "T" in mysql_process_stat_after_sigstop, "mysql process is not stopped after sigstop"
+
+    remaining_online_units = [
+        unit
+        for unit in ops_test.model.applications[mysql_application_name].units
+        if unit.name != primary.name
+    ]
+
+    logger.info("Waiting for new primary to be elected")
+
+    # retring as it may take time for the cluster to recognize that the primary process is stopped
+    for attempt in Retrying(stop=stop_after_delay(15 * 60), wait=wait_fixed(10)):
+        with attempt:
+            assert await ensure_n_online_mysql_members(
+                ops_test, 2, remaining_online_units
+            ), "The deployed mysql application does not have two online nodes"
+
+            new_primary = await get_primary_unit(
+                ops_test, remaining_online_units[0], mysql_application_name
+            )
+            assert primary.name != new_primary.name, "new mysql primary was not elected"
+
+    logger.info("Ensuring all remaining units continuous writes incrementing")
+
+    async with ops_test.fast_forward():
+        for attempt in Retrying(stop=stop_after_delay(15 * 60), wait=wait_fixed(10)):
+            with attempt:
+                await ensure_all_units_continuous_writes_incrementing(
+                    ops_test, credentials=credentials, mysql_units=remaining_online_units
+                )
+
+    logger.info(f"Sending SIGCONT to {primary.name}")
+    await send_signal_to_pod_container_process(
+        ops_test.model.info.name,
+        primary.name,
+        MYSQL_CONTAINER_NAME,
+        MYSQLD_PROCESS_NAME,
+        "SIGCONT",
+    )
+
+    # ensure that the mysqld process has started after receiving the sigstop
+    # T = stopped by job control signal
+    # R = running or runnable
+    # S = interruptible sleep
+    # I = idle kernel thread
+    # (see https://man7.org/linux/man-pages/man1/ps.1.html under PROCESS STATE CODES)
+    mysql_process_stat_after_sigcont = await get_process_stat(
+        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
+    )
+    assert (
+        "T" not in mysql_process_stat_after_sigcont
+    ), "mysql process is not started after sigcont"
+    assert (
+        "R" in mysql_process_stat_after_sigcont
+        or "S" in mysql_process_stat_after_sigcont
+        or "I" in mysql_process_stat_after_sigcont
+    ), "mysql process not running or sleeping after sigcont"
+
+    new_mysql_pid = await get_process_pid(
+        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
+    )
+    assert (
+        new_mysql_pid == mysql_pid
+    ), "mysql process id is not the same as it was before process was stopped"
+
+    # wait for possible recovery of the old primary
+    async with ops_test.fast_forward("60s"):
+        await ops_test.model.wait_for_idle(
+            apps=[mysql_application_name],
+            status="active",
+            raise_on_blocked=False,
+            timeout=TIMEOUT,
+        )
+
+    logger.info("Ensuring that there are 3 online mysql members")
+    assert await ensure_n_online_mysql_members(
+        ops_test, 3, remaining_online_units
+    ), "The deployed mysql application does not have three online nodes"
+
+    logger.info("Ensure all units continuous writes incrementing")
+    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
diff --git a/tests/integration/high_availability/test_self_healing_process_killed.py b/tests/integration/high_availability/test_self_healing_process_killed.py
new file mode 100644
index 000000000..8de9720db
--- /dev/null
+++ b/tests/integration/high_availability/test_self_healing_process_killed.py
@@ -0,0 +1,91 @@
+# Copyright 2022 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+import time
+
+from pytest_operator.plugin import OpsTest
+
+from ..helpers import (
+    get_primary_unit,
+    get_process_pid,
+)
+from .high_availability_helpers import (
+    clean_up_database_and_table,
+    ensure_all_units_continuous_writes_incrementing,
+    ensure_n_online_mysql_members,
+    get_application_name,
+    insert_data_into_mysql_and_validate_replication,
+    send_signal_to_pod_container_process,
+)
+
+logger = logging.getLogger(__name__)
+
+MYSQL_CONTAINER_NAME = "mysql"
+MYSQLD_PROCESS_NAME = "mysqld"
+TIMEOUT = 40 * 60
+
+
+async def test_kill_db_process(
+    ops_test: OpsTest, highly_available_cluster, continuous_writes, credentials
+) -> None:
+    """Test to send a SIGKILL to the primary db process and ensure that the cluster self heals."""
+    mysql_application_name = get_application_name(ops_test, "mysql")
+
+    logger.info("Waiting until 3 mysql instances are online")
+    # ensure all units in the cluster are online
+    assert await ensure_n_online_mysql_members(
+        ops_test, 3
+    ), "The deployed mysql application is not fully online"
+
+    logger.info("Ensuring all units have continuous writes incrementing")
+    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
+
+    mysql_unit = ops_test.model.applications[mysql_application_name].units[0]
+    primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name)
+
+    mysql_pid = await get_process_pid(
+        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
+    )
+
+    logger.info(f"Sending SIGKILL to unit {primary.name}")
+    await send_signal_to_pod_container_process(
+        ops_test.model.info.name,
+        primary.name,
+        MYSQL_CONTAINER_NAME,
+        MYSQLD_PROCESS_NAME,
+        "SIGKILL",
+    )
+
+    # Wait for the SIGKILL above to take effect before continuing with test checks
+    time.sleep(10)
+
+    logger.info("Waiting until 3 mysql instances are online")
+    assert await ensure_n_online_mysql_members(
+        ops_test, 3
+    ), "The mysql application is not fully online after sending SIGKILL to primary"
+
+    # ensure that the mysqld process got restarted and has a new process id
+    new_mysql_pid = await get_process_pid(
+        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
+    )
+    assert (
+        mysql_pid != new_mysql_pid
+    ), "The mysql process id is the same after sending it a SIGKILL"
+
+    new_primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name)
+    assert (
+        primary.name != new_primary.name
+    ), "The mysql primary has not been reelected after sending a SIGKILL"
+
+    logger.info("Ensuring all units have continuous writes incrementing")
+    # ensure continuous writes still incrementing for all units
+    async with ops_test.fast_forward():
+        await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
+
+    # ensure that we are able to insert data into the primary and have it replicated to all units
+    database_name, table_name = "test-kill-db-process", "data"
+    await insert_data_into_mysql_and_validate_replication(
+        ops_test, database_name, table_name, credentials
+    )
+    await clean_up_database_and_table(ops_test, database_name, table_name, credentials)
diff --git a/tests/integration/high_availability/test_self_healing_restart_graceful.py b/tests/integration/high_availability/test_self_healing_restart_graceful.py
new file mode 100644
index 000000000..b61494559
--- /dev/null
+++ b/tests/integration/high_availability/test_self_healing_restart_graceful.py
@@ -0,0 +1,87 @@
+# Copyright 2022 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+
+from pytest_operator.plugin import OpsTest
+
+from ..helpers import (
+    execute_queries_on_unit,
+    get_primary_unit,
+    get_unit_address,
+    start_mysqld_service,
+    stop_mysqld_service,
+)
+from .high_availability_helpers import (
+    ensure_all_units_continuous_writes_incrementing,
+    ensure_process_not_running,
+    get_application_name,
+)
+
+logger = logging.getLogger(__name__)
+
+MYSQL_CONTAINER_NAME = "mysql"
+MYSQLD_PROCESS_NAME = "mysqld"
+TIMEOUT = 40 * 60
+
+
+async def test_cluster_manual_rejoin(
+    ops_test: OpsTest, highly_available_cluster, continuous_writes, credentials
+) -> None:
+    """The cluster manual re-join test.
+
+    A graceful restart is performed in one of the instances (choosing Primary to make it painful).
+    In order to verify that the instance can come back ONLINE, after disabling automatic re-join
+    """
+    # Ensure continuous writes still incrementing for all units
+    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
+
+    mysql_app_name = get_application_name(ops_test, "mysql")
+    mysql_units = ops_test.model.applications[mysql_app_name].units
+
+    primary_unit = await get_primary_unit(ops_test, mysql_units[0], mysql_app_name)
+    primary_unit_ip = await get_unit_address(ops_test, primary_unit.name)
+
+    queries = [
+        "SET PERSIST group_replication_autorejoin_tries=0",
+    ]
+
+    # Disable automatic re-join procedure
+    execute_queries_on_unit(
+        unit_address=primary_unit_ip,
+        username=credentials["username"],
+        password=credentials["password"],
+        queries=queries,
+        commit=True,
+    )
+
+    logger.info(f"Stopping mysqld on {primary_unit.name}")
+    await stop_mysqld_service(ops_test, primary_unit.name)
+
+    logger.info(f"Wait until mysqld stopped on {primary_unit.name}")
+    await ensure_process_not_running(
+        ops_test=ops_test,
+        unit_name=primary_unit.name,
+        container_name=MYSQL_CONTAINER_NAME,
+        process=MYSQLD_PROCESS_NAME,
+    )
+
+    logger.info(f"Starting mysqld on {primary_unit.name}")
+    await start_mysqld_service(ops_test, primary_unit.name)
+
+    # Verify unit comes back active
+    async with ops_test.fast_forward():
+        logger.info("Waiting unit to be back online as secondary")
+        await ops_test.model.block_until(
+            lambda: primary_unit.workload_status == "active"
+            and primary_unit.workload_status_message == "",
+            timeout=TIMEOUT,
+        )
+        logger.info("Waiting unit to be back online.")
+        await ops_test.model.block_until(
+            lambda: primary_unit.workload_status == "active",
+            timeout=TIMEOUT,
+        )
+
+    # Ensure continuous writes still incrementing for all units
+    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
diff --git a/tests/integration/high_availability/test_self_healing_stop_all.py b/tests/integration/high_availability/test_self_healing_stop_all.py
new file mode 100644
index 000000000..e87817e3e
--- /dev/null
+++ b/tests/integration/high_availability/test_self_healing_stop_all.py
@@ -0,0 +1,96 @@
+# Copyright 2022 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+
+from pytest_operator.plugin import OpsTest
+from tenacity import Retrying, stop_after_delay, wait_fixed
+
+from ..helpers import (
+    get_cluster_status,
+    get_process_pid,
+    start_mysqld_service,
+    stop_mysqld_service,
+)
+from .high_availability_helpers import (
+    ensure_all_units_continuous_writes_incrementing,
+    ensure_n_online_mysql_members,
+    ensure_process_not_running,
+    get_application_name,
+)
+
+logger = logging.getLogger(__name__)
+
+MYSQL_CONTAINER_NAME = "mysql"
+MYSQLD_PROCESS_NAME = "mysqld"
+TIMEOUT = 40 * 60
+
+
+async def test_graceful_full_cluster_crash_test(
+    ops_test: OpsTest, highly_available_cluster, continuous_writes, credentials
+) -> None:
+    """Test to send SIGTERM to all units and then ensure that the cluster recovers."""
+    mysql_application_name = get_application_name(ops_test, "mysql")
+    assert mysql_application_name, "mysql application name is not set"
+
+    logger.info("Ensure there are 3 online mysql members")
+    assert await ensure_n_online_mysql_members(
+        ops_test, 3
+    ), "The deployed mysql application does not have three online nodes"
+
+    logger.info("Ensure that all units have incrementing continuous writes")
+    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
+
+    mysql_units = ops_test.model.applications[mysql_application_name].units
+
+    unit_mysqld_pids = {}
+    logger.info("Get mysqld pids on all instances")
+    for unit in mysql_units:
+        pid = await get_process_pid(ops_test, unit.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME)
+        assert (pid or -1) > 1, "mysql process id is not known/positive"
+
+        unit_mysqld_pids[unit.name] = pid
+
+    for unit in mysql_units:
+        logger.info(f"Stopping mysqld on {unit.name}")
+        await stop_mysqld_service(ops_test, unit.name)
+
+    logger.info("Wait until mysqld stopped on all instances")
+    for attempt in Retrying(stop=stop_after_delay(300), wait=wait_fixed(30)):
+        with attempt:
+            for unit in mysql_units:
+                await ensure_process_not_running(
+                    ops_test, unit.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
+                )
+    for unit in mysql_units:
+        logger.info(f"Starting mysqld on {unit.name}")
+        await start_mysqld_service(ops_test, unit.name)
+
+    async with ops_test.fast_forward("60s"):
+        logger.info("Block until all in maintenance/offline")
+        await ops_test.model.block_until(
+            lambda: all(unit.workload_status == "maintenance" for unit in mysql_units),
+            timeout=TIMEOUT,
+        )
+
+        logger.info("Wait all members to recover")
+        await ops_test.model.wait_for_idle(
+            apps=[mysql_application_name],
+            status="active",
+            raise_on_blocked=False,
+            timeout=TIMEOUT,
+            idle_period=30,
+        )
+
+    for unit in mysql_units:
+        new_pid = await get_process_pid(
+            ops_test, unit.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
+        )
+        assert new_pid > unit_mysqld_pids[unit.name], "The mysqld process did not restart"
+
+    cluster_status = await get_cluster_status(mysql_units[0])
+    for member in cluster_status["defaultreplicaset"]["topology"].values():
+        assert member["status"] == "online"
+
+    logger.info("Ensure all units have incrementing continuous writes")
+    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
diff --git a/tests/integration/high_availability/test_self_healing_stop_primary.py b/tests/integration/high_availability/test_self_healing_stop_primary.py
new file mode 100644
index 000000000..cc827cb56
--- /dev/null
+++ b/tests/integration/high_availability/test_self_healing_stop_primary.py
@@ -0,0 +1,91 @@
+# Copyright 2022 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+
+from pytest_operator.plugin import OpsTest
+from tenacity import Retrying, stop_after_delay, wait_fixed
+
+from ..helpers import (
+    get_primary_unit,
+    get_process_pid,
+)
+from .high_availability_helpers import (
+    ensure_all_units_continuous_writes_incrementing,
+    ensure_n_online_mysql_members,
+    get_application_name,
+    send_signal_to_pod_container_process,
+)
+
+logger = logging.getLogger(__name__)
+
+MYSQL_CONTAINER_NAME = "mysql"
+MYSQLD_PROCESS_NAME = "mysqld"
+TIMEOUT = 40 * 60
+
+
+async def test_graceful_crash_of_primary(
+    ops_test: OpsTest, highly_available_cluster, continuous_writes, credentials
+) -> None:
+    """Test to send SIGTERM to primary instance and then verify recovery."""
+    mysql_application_name = get_application_name(ops_test, "mysql")
+
+    assert mysql_application_name, "mysql application name is not set"
+
+    logger.info("Ensuring that there are 3 online mysql members")
+    assert await ensure_n_online_mysql_members(
+        ops_test, 3
+    ), "The deployed mysql application does not have three online nodes"
+
+    logger.info("Ensuring that all units have incrementing continuous writes")
+    await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
+
+    mysql_unit = ops_test.model.applications[mysql_application_name].units[0]
+    primary = await get_primary_unit(ops_test, mysql_unit, mysql_application_name)
+
+    mysql_pid = await get_process_pid(
+        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
+    )
+
+    logger.info(f"Sending SIGTERM to unit {primary.name}")
+    await send_signal_to_pod_container_process(
+        ops_test.model.info.name,
+        primary.name,
+        MYSQL_CONTAINER_NAME,
+        MYSQLD_PROCESS_NAME,
+        "SIGTERM",
+    )
+
+    new_mysql_pid = await get_process_pid(
+        ops_test, primary.name, MYSQL_CONTAINER_NAME, MYSQLD_PROCESS_NAME
+    )
+    assert (
+        new_mysql_pid == mysql_pid
+    ), "mysql process id is not the same as it was before process was stopped"
+
+    remaining_online_units = [
+        unit
+        for unit in ops_test.model.applications[mysql_application_name].units
+        if unit.name != primary.name
+    ]
+
+    logger.info("Waiting until there are 3 online mysql instances again")
+    # retrying as it may take time for the cluster to recognize that the primary process is stopped
+    for attempt in Retrying(stop=stop_after_delay(2 * 60), wait=wait_fixed(10)):
+        with attempt:
+            assert await ensure_n_online_mysql_members(
+                ops_test, 3
+            ), "The deployed mysql application does not have three online nodes"
+
+            new_primary = await get_primary_unit(
+                ops_test, remaining_online_units[0], mysql_application_name
+            )
+            assert primary.name != new_primary.name, "new mysql primary was not elected"
+
+    logger.info("Ensuring all instances have incrementing continuous writes")
+    async with ops_test.fast_forward():
+        for attempt in Retrying(stop=stop_after_delay(60), wait=wait_fixed(10)):
+            with attempt:
+                await ensure_all_units_continuous_writes_incrementing(
+                    ops_test, credentials=credentials
+                )
diff --git a/tests/integration/high_availability/test_upgrade.py b/tests/integration/high_availability/test_upgrade.py
index b71c6ad08..15d6959f8 100644
--- a/tests/integration/high_availability/test_upgrade.py
+++ b/tests/integration/high_availability/test_upgrade.py
@@ -3,6 +3,7 @@
 
 import json
 import logging
+import pathlib
 import shutil
 import zipfile
 from pathlib import Path
@@ -35,7 +36,6 @@
 TEST_APP_NAME = "mysql-test-app"
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_deploy_latest(ops_test: OpsTest) -> None:
     """Simple test to ensure that the mysql and application charms get deployed."""
@@ -67,7 +67,6 @@ async def test_deploy_latest(ops_test: OpsTest) -> None:
     assert len(ops_test.model.applications[MYSQL_APP_NAME].units) == 3
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_pre_upgrade_check(ops_test: OpsTest) -> None:
     """Test that the pre-upgrade-check action runs successfully."""
@@ -94,18 +93,14 @@ async def test_pre_upgrade_check(ops_test: OpsTest) -> None:
     assert get_sts_partition(ops_test, MYSQL_APP_NAME) == 2, "Partition not set to 2"
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
-async def test_upgrade_from_edge(ops_test: OpsTest, continuous_writes, credentials) -> None:
+async def test_upgrade_from_edge(ops_test: OpsTest, charm, continuous_writes, credentials) -> None:
     logger.info("Ensure continuous_writes")
     await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
 
     resources = {"mysql-image": METADATA["resources"]["mysql-image"]["upstream-source"]}
     application = ops_test.model.applications[MYSQL_APP_NAME]
 
-    logger.info("Build charm locally")
-    charm = await ops_test.build_charm(".")
-
     logger.info("Refresh the charm")
     await application.refresh(path=charm, resources=resources)
 
@@ -142,9 +137,8 @@ async def test_upgrade_from_edge(ops_test: OpsTest, continuous_writes, credentia
     await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
-async def test_fail_and_rollback(ops_test, continuous_writes, built_charm, credentials) -> None:
+async def test_fail_and_rollback(ops_test, charm, continuous_writes, credentials) -> None:
     logger.info("Get leader unit")
     leader_unit = await get_leader_unit(ops_test, MYSQL_APP_NAME)
     assert leader_unit is not None, "No leader unit found"
@@ -152,15 +146,8 @@ async def test_fail_and_rollback(ops_test, continuous_writes, built_charm, crede
     logger.info("Run pre-upgrade-check action")
     await juju_.run_action(leader_unit, "pre-upgrade-check")
 
-    if not built_charm:
-        # on CI built charm is cached and returned with build_charm
-        # by the pytest-operator-cache plugin
-        local_charm = await ops_test.build_charm(".")
-    else:
-        # return the built charm from the test
-        local_charm = built_charm
-    fault_charm = Path("/tmp/", local_charm.name)
-    shutil.copy(local_charm, fault_charm)
+    fault_charm = Path("/tmp/", pathlib.Path(charm).name)
+    shutil.copy(charm, fault_charm)
 
     logger.info("Inject dependency fault")
     await inject_dependency_fault(ops_test, MYSQL_APP_NAME, fault_charm)
@@ -190,7 +177,7 @@ async def test_fail_and_rollback(ops_test, continuous_writes, built_charm, crede
     await juju_.run_action(leader_unit, "pre-upgrade-check")
 
     logger.info("Re-refresh the charm")
-    await application.refresh(path=local_charm)
+    await application.refresh(path=charm)
 
     logger.info("Wait for upgrade to complete on first upgrading unit")
     await ops_test.model.block_until(
diff --git a/tests/integration/high_availability/test_upgrade_from_stable.py b/tests/integration/high_availability/test_upgrade_from_stable.py
index 74ef62a6f..d92c40e57 100644
--- a/tests/integration/high_availability/test_upgrade_from_stable.py
+++ b/tests/integration/high_availability/test_upgrade_from_stable.py
@@ -8,7 +8,7 @@
 import pytest
 from pytest_operator.plugin import OpsTest
 
-from .. import juju_, markers
+from .. import juju_
 from ..helpers import (
     get_leader_unit,
     get_primary_unit,
@@ -30,8 +30,6 @@
 TEST_APP_NAME = "test-app"
 
 
-@pytest.mark.group(1)
-@markers.amd64_only  # TODO: remove after arm64 stable release
 @pytest.mark.abort_on_fail
 async def test_deploy_stable(ops_test: OpsTest) -> None:
     """Simple test to ensure that the mysql and application charms get deployed."""
@@ -64,8 +62,6 @@ async def test_deploy_stable(ops_test: OpsTest) -> None:
     assert len(ops_test.model.applications[MYSQL_APP_NAME].units) == 3
 
 
-@pytest.mark.group(1)
-@markers.amd64_only  # TODO: remove after arm64 stable release
 @pytest.mark.abort_on_fail
 async def test_pre_upgrade_check(ops_test: OpsTest) -> None:
     """Test that the pre-upgrade-check action runs successfully."""
@@ -91,21 +87,14 @@ async def test_pre_upgrade_check(ops_test: OpsTest) -> None:
     assert get_sts_partition(ops_test, MYSQL_APP_NAME) == 2, "Partition not set to 2"
 
 
-@pytest.mark.group(1)
-@markers.amd64_only  # TODO: remove after arm64 stable release
 @pytest.mark.abort_on_fail
-async def test_upgrade_from_stable(ops_test: OpsTest, credentials):
+async def test_upgrade_from_stable(ops_test: OpsTest, charm, credentials):
     """Test updating from stable channel."""
     application = ops_test.model.applications[MYSQL_APP_NAME]
-    logger.info("Build charm locally")
-    charm = await ops_test.build_charm(".")
 
     resources = {"mysql-image": METADATA["resources"]["mysql-image"]["upstream-source"]}
     application = ops_test.model.applications[MYSQL_APP_NAME]
 
-    logger.info("Build charm locally")
-    charm = await ops_test.build_charm(".")
-
     logger.info("Refresh the charm")
     await application.refresh(path=charm, resources=resources)
 
diff --git a/tests/integration/high_availability/test_upgrade_rollback_incompat.py b/tests/integration/high_availability/test_upgrade_rollback_incompat.py
index c316d4ba2..f01331ffc 100644
--- a/tests/integration/high_availability/test_upgrade_rollback_incompat.py
+++ b/tests/integration/high_availability/test_upgrade_rollback_incompat.py
@@ -2,10 +2,8 @@
 # See LICENSE file for licensing details.
 
 import logging
-import os
 import pathlib
 import shutil
-import subprocess
 from time import sleep
 from zipfile import ZipFile
 
@@ -14,7 +12,7 @@
 from pytest_operator.plugin import OpsTest
 
 from .. import juju_, markers
-from ..helpers import get_leader_unit, get_unit_by_index
+from ..helpers import get_leader_unit, get_model_logs, get_unit_by_index
 from .high_availability_helpers import get_sts_partition
 
 logger = logging.getLogger(__name__)
@@ -25,31 +23,28 @@
 METADATA = yaml.safe_load(pathlib.Path("./metadata.yaml").read_text())
 
 
-@pytest.mark.group(1)
 # TODO: remove after next incompatible MySQL server version released in our snap
 # (details: https://github.com/canonical/mysql-operator/pull/472#discussion_r1659300069)
 @markers.amd64_only
 @pytest.mark.abort_on_fail
-async def test_build_and_deploy(ops_test: OpsTest) -> None:
+async def test_build_and_deploy(ops_test: OpsTest, charm) -> None:
     """Simple test to ensure that the mysql and application charms get deployed."""
-    charm = await charm_local_build(ops_test)
-
     config = {"profile": "testing", "plugin-audit-enabled": "false"}
     # MySQL 8.0.34 image, last known minor version incompatible
     resources = {
         "mysql-image": "ghcr.io/canonical/charmed-mysql@sha256:0f5fe7d7679b1881afde24ecfb9d14a9daade790ec787087aa5d8de1d7b00b21"
     }
-    async with ops_test.fast_forward("10s"):
-        await ops_test.model.deploy(
-            charm,
-            application_name=MYSQL_APP_NAME,
-            config=config,
-            num_units=3,
-            resources=resources,
-            trust=True,
-            base="ubuntu@22.04",
-        )
+    await ops_test.model.deploy(
+        charm,
+        application_name=MYSQL_APP_NAME,
+        config=config,
+        num_units=3,
+        resources=resources,
+        trust=True,
+        base="ubuntu@22.04",
+    )
 
+    async with ops_test.fast_forward("30s"):
         await ops_test.model.wait_for_idle(
             apps=[MYSQL_APP_NAME],
             status="active",
@@ -58,7 +53,6 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None:
         )
 
 
-@pytest.mark.group(1)
 # TODO: remove after next incompatible MySQL server version released in our snap
 # (details: https://github.com/canonical/mysql-operator/pull/472#discussion_r1659300069)
 @markers.amd64_only
@@ -73,22 +67,21 @@ async def test_pre_upgrade_check(ops_test: OpsTest) -> None:
     await juju_.run_action(leader_unit, "pre-upgrade-check")
 
 
-@pytest.mark.group(1)
 # TODO: remove after next incompatible MySQL server version released in our snap
 # (details: https://github.com/canonical/mysql-operator/pull/472#discussion_r1659300069)
 @markers.amd64_only
 @pytest.mark.abort_on_fail
-async def test_upgrade_to_failling(ops_test: OpsTest) -> None:
+async def test_upgrade_to_failling(ops_test: OpsTest, charm) -> None:
+    assert ops_test.model
     application = ops_test.model.applications[MYSQL_APP_NAME]
-    logger.info("Build charm locally")
 
-    sub_regex_failing_rejoin = (
-        's/logger.debug("Recovering unit")'
-        '/self.charm._mysql.set_instance_offline_mode(True); raise RetryError("dummy")/'
-    )
-    src_patch(sub_regex=sub_regex_failing_rejoin, file_name="src/upgrade.py")
-    new_charm = await charm_local_build(ops_test, refresh=True)
-    src_patch(revert=True)
+    with InjectFailure(
+        path="src/upgrade.py",
+        original_str="self.charm.recover_unit_after_restart()",
+        replace_str="raise MySQLServiceNotRunningError",
+    ):
+        logger.info("Build charm with failure injected")
+        new_charm = await charm_local_build(ops_test, charm, refresh=True)
 
     logger.info("Refresh the charm")
     # Current MySQL Image > 8.0.34
@@ -113,16 +106,13 @@ async def test_upgrade_to_failling(ops_test: OpsTest) -> None:
     )
 
 
-@pytest.mark.group(1)
 # TODO: remove after next incompatible MySQL server version released in our rock
 # (details: https://github.com/canonical/mysql-operator/pull/472#discussion_r1659300069)
 @markers.amd64_only
 @pytest.mark.abort_on_fail
-async def test_rollback(ops_test) -> None:
+async def test_rollback(ops_test, charm) -> None:
     application = ops_test.model.applications[MYSQL_APP_NAME]
 
-    charm = await charm_local_build(ops_test, refresh=True)
-
     logger.info("Get leader unit")
     leader_unit = await get_leader_unit(ops_test, MYSQL_APP_NAME)
 
@@ -152,6 +142,11 @@ async def test_rollback(ops_test) -> None:
         wait_period=5,
     )
 
+    logger.info("Ensure rollback has taken place")
+    message = "Downgrade is incompatible. Resetting workload"
+    warnings = await get_model_logs(ops_test, log_level="WARNING")
+    assert message in warnings
+
     logger.info("Resume upgrade")
     while get_sts_partition(ops_test, MYSQL_APP_NAME) == 2:
         # resume action sometime fails in CI, no clear reason
@@ -172,18 +167,29 @@ async def test_rollback(ops_test) -> None:
     )
 
 
-def src_patch(sub_regex: str = "", file_name: str = "", revert: bool = False) -> None:
-    """Apply a patch to the source code."""
-    if revert:
-        cmd = "git checkout src/"  # revert changes on src/ dir
-        logger.info("Reverting patch on source")
-    else:
-        cmd = f"sed -i -e '{sub_regex}' {file_name}"
-        logger.info("Applying patch to source")
-    subprocess.run([cmd], shell=True, check=True)
+class InjectFailure(object):
+    def __init__(self, path: str, original_str: str, replace_str: str):
+        self.path = path
+        self.original_str = original_str
+        self.replace_str = replace_str
+        with open(path, "r") as file:
+            self.original_content = file.read()
+
+    def __enter__(self):
+        logger.info("Injecting failure")
+        assert self.original_str in self.original_content, "replace content not found"
+        new_content = self.original_content.replace(self.original_str, self.replace_str)
+        assert self.original_str not in new_content, "original string not replaced"
+        with open(self.path, "w") as file:
+            file.write(new_content)
 
+    def __exit__(self, exc_type, exc_value, traceback):
+        logger.info("Reverting failure")
+        with open(self.path, "w") as file:
+            file.write(self.original_content)
 
-async def charm_local_build(ops_test: OpsTest, refresh: bool = False):
+
+async def charm_local_build(ops_test: OpsTest, charm, refresh: bool = False):
     """Wrapper for a local charm build zip file updating."""
     local_charms = pathlib.Path().glob("local-*.charm")
     for lc in local_charms:
@@ -191,22 +197,18 @@ async def charm_local_build(ops_test: OpsTest, refresh: bool = False):
         # pytest_operator_cache globbing them
         lc.unlink()
 
-    charm = await ops_test.build_charm(".")
-
-    if os.environ.get("CI") == "true":
-        # CI will get charm from common cache
-        # make local copy and update charm zip
+    # update charm zip
 
-        update_files = ["src/constants.py", "src/upgrade.py"]
+    update_files = ["src/constants.py", "src/upgrade.py"]
 
-        charm = pathlib.Path(shutil.copy(charm, f"local-{charm.stem}.charm"))
+    charm = pathlib.Path(shutil.copy(charm, f"local-{pathlib.Path(charm).stem}.charm"))
 
-        for path in update_files:
-            with open(path, "r") as f:
-                content = f.read()
+    for path in update_files:
+        with open(path, "r") as f:
+            content = f.read()
 
-            with ZipFile(charm, mode="a") as charm_zip:
-                charm_zip.writestr(path, content)
+        with ZipFile(charm, mode="a") as charm_zip:
+            charm_zip.writestr(path, content)
 
     if refresh:
         # when refreshing, return posix path
diff --git a/tests/integration/relations/test_database.py b/tests/integration/relations/test_database.py
index 64e228e22..52e36fe1f 100644
--- a/tests/integration/relations/test_database.py
+++ b/tests/integration/relations/test_database.py
@@ -26,20 +26,16 @@
 ENDPOINT = "database"
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 @pytest.mark.skip_if_deployed
-async def test_build_and_deploy(ops_test: OpsTest):
+async def test_build_and_deploy(ops_test: OpsTest, charm):
     """Build the charm and deploy 3 units to ensure a cluster is formed."""
-    # Build and deploy charm from local source folder
-    db_charm = await ops_test.build_charm(".")
-
     config = {"cluster-name": CLUSTER_NAME, "profile": "testing"}
     resources = {"mysql-image": DB_METADATA["resources"]["mysql-image"]["upstream-source"]}
 
     await asyncio.gather(
         ops_test.model.deploy(
-            db_charm,
+            charm,
             application_name=DATABASE_APP_NAME,
             config=config,
             num_units=3,
@@ -90,7 +86,6 @@ async def test_build_and_deploy(ops_test: OpsTest):
     assert len(ops_test.model.applications[APPLICATION_APP_NAME].units) == 2
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 @markers.only_without_juju_secrets
 async def test_relation_creation_databag(ops_test: OpsTest):
@@ -107,7 +102,6 @@ async def test_relation_creation_databag(ops_test: OpsTest):
     assert {"password", "username"} <= set(relation_data[0]["application-data"])
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 @markers.only_with_juju_secrets
 async def test_relation_creation(ops_test: OpsTest):
@@ -125,7 +119,6 @@ async def test_relation_creation(ops_test: OpsTest):
     assert "secret-user" in relation_data[0]["application-data"]
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_relation_broken(ops_test: OpsTest):
     """Remove relation and wait for the expected changes in status."""
diff --git a/tests/integration/relations/test_mysql_root.py b/tests/integration/relations/test_mysql_root.py
index 486bbff5d..ec1ab23ed 100644
--- a/tests/integration/relations/test_mysql_root.py
+++ b/tests/integration/relations/test_mysql_root.py
@@ -28,12 +28,10 @@
 
 
 # TODO: deploy and relate osm-grafana once it can be use with MySQL Group Replication
-@pytest.mark.group(1)
 @markers.amd64_only  # kafka-k8s charm not available for arm64
-async def test_deploy_and_relate_osm_bundle(ops_test: OpsTest) -> None:
+async def test_deploy_and_relate_osm_bundle(ops_test: OpsTest, charm) -> None:
     """Test the deployment and relation with osm bundle with mysql replacing mariadb."""
     async with ops_test.fast_forward("60s"):
-        charm = await ops_test.build_charm(".")
         resources = {"mysql-image": METADATA["resources"]["mysql-image"]["upstream-source"]}
         config = {
             "mysql-root-interface-user": "keystone",
@@ -173,7 +171,6 @@ async def test_deploy_and_relate_osm_bundle(ops_test: OpsTest) -> None:
 
 
 @pytest.mark.abort_on_fail
-@pytest.mark.group(1)
 @markers.amd64_only  # kafka-k8s charm not available for arm64
 async def test_osm_pol_operations(ops_test: OpsTest) -> None:
     """Test the existence of databases and tables created by osm-pol's migrations."""
diff --git a/tests/integration/test_architecture.py b/tests/integration/test_architecture.py
new file mode 100644
index 000000000..9b16e01ef
--- /dev/null
+++ b/tests/integration/test_architecture.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+# Copyright 2024 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+from pathlib import Path
+
+import yaml
+from pytest_operator.plugin import OpsTest
+
+from . import markers
+
+METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
+APP_NAME = METADATA["name"]
+
+
+@markers.amd64_only
+async def test_arm_charm_on_amd_host(ops_test: OpsTest) -> None:
+    """Tries deploying an arm64 charm on amd64 host."""
+    charm = "./mysql-k8s_ubuntu@22.04-arm64.charm"
+
+    await ops_test.model.deploy(
+        charm,
+        application_name=APP_NAME,
+        num_units=1,
+        config={"profile": "testing"},
+        resources={"mysql-image": METADATA["resources"]["mysql-image"]["upstream-source"]},
+        base="ubuntu@22.04",
+    )
+
+    await ops_test.model.wait_for_idle(
+        apps=[APP_NAME],
+        status="error",
+        raise_on_error=False,
+    )
+
+
+@markers.arm64_only
+async def test_amd_charm_on_arm_host(ops_test: OpsTest) -> None:
+    """Tries deploying an amd64 charm on arm64 host."""
+    charm = "./mysql-k8s_ubuntu@22.04-amd64.charm"
+
+    await ops_test.model.deploy(
+        charm,
+        application_name=APP_NAME,
+        num_units=1,
+        config={"profile": "testing"},
+        resources={"mysql-image": METADATA["resources"]["mysql-image"]["upstream-source"]},
+        base="ubuntu@22.04",
+    )
+
+    await ops_test.model.wait_for_idle(
+        apps=[APP_NAME],
+        status="error",
+        raise_on_error=False,
+    )
diff --git a/tests/integration/test_backup_aws.py b/tests/integration/test_backup_aws.py
index fc63b1eca..d70437609 100644
--- a/tests/integration/test_backup_aws.py
+++ b/tests/integration/test_backup_aws.py
@@ -3,6 +3,7 @@
 # See LICENSE file for licensing details.
 
 import logging
+import os
 import socket
 import uuid
 from pathlib import Path
@@ -40,11 +41,11 @@
 
 
 @pytest.fixture(scope="session")
-def cloud_credentials(github_secrets) -> dict[str, str]:
+def cloud_credentials() -> dict[str, str]:
     """Read cloud credentials."""
     return {
-        "access-key": github_secrets["AWS_ACCESS_KEY"],
-        "secret-key": github_secrets["AWS_SECRET_KEY"],
+        "access-key": os.environ["AWS_ACCESS_KEY"],
+        "secret-key": os.environ["AWS_SECRET_KEY"],
     }
 
 
@@ -82,11 +83,10 @@ def clean_backups_from_buckets(cloud_credentials, cloud_configs):
         bucket_object.delete()
 
 
-@pytest.mark.group(1)
-async def test_build_and_deploy(ops_test: OpsTest) -> None:
+async def test_build_and_deploy(ops_test: OpsTest, charm) -> None:
     """Simple test to ensure that the mysql charm gets deployed."""
     # TODO: deploy 3 units when bug https://bugs.launchpad.net/juju/+bug/1995466 is resolved
-    mysql_application_name = await deploy_and_scale_mysql(ops_test, num_units=1)
+    mysql_application_name = await deploy_and_scale_mysql(ops_test, charm, num_units=1)
 
     mysql_unit = ops_test.model.units[f"{mysql_application_name}/0"]
     assert mysql_unit
@@ -109,12 +109,13 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None:
     )
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
-async def test_backup(ops_test: OpsTest, cloud_credentials, cloud_configs, credentials) -> None:
+async def test_backup(
+    ops_test: OpsTest, charm, cloud_credentials, cloud_configs, credentials
+) -> None:
     """Test to create a backup and list backups."""
     # TODO: deploy 3 units when bug https://bugs.launchpad.net/juju/+bug/1995466 is resolved
-    mysql_application_name = await deploy_and_scale_mysql(ops_test, num_units=1)
+    mysql_application_name = await deploy_and_scale_mysql(ops_test, charm, num_units=1)
 
     global backup_id, backups_by_cloud, value_before_backup, value_after_backup
 
@@ -177,14 +178,13 @@ async def test_backup(ops_test: OpsTest, cloud_credentials, cloud_configs, crede
     )
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_restore_on_same_cluster(
-    ops_test: OpsTest, cloud_credentials, cloud_configs, credentials
+    ops_test: OpsTest, charm, cloud_credentials, cloud_configs, credentials
 ) -> None:
     """Test to restore a backup to the same mysql cluster."""
     # TODO: deploy 3 units when bug https://bugs.launchpad.net/juju/+bug/1995466 is resolved
-    mysql_application_name = await deploy_and_scale_mysql(ops_test, num_units=1)
+    mysql_application_name = await deploy_and_scale_mysql(ops_test, charm, num_units=1)
 
     mysql_unit = ops_test.model.units[f"{mysql_application_name}/0"]
     assert mysql_unit
@@ -262,14 +262,16 @@ async def test_restore_on_same_cluster(
         assert sorted(values) == sorted([value_before_backup, value_after_restore])
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
-async def test_restore_on_new_cluster(ops_test: OpsTest, cloud_credentials, cloud_configs) -> None:
+async def test_restore_on_new_cluster(
+    ops_test: OpsTest, charm, cloud_credentials, cloud_configs
+) -> None:
     """Test to restore a backup on a new mysql cluster."""
     logger.info("Deploying a new mysql cluster")
 
     new_mysql_application_name = await deploy_and_scale_mysql(
         ops_test,
+        charm,
         check_for_existing_application=False,
         mysql_application_name="another-mysql-k8s",
         num_units=1,
diff --git a/tests/integration/test_backup_ceph.py b/tests/integration/test_backup_ceph.py
index 161e2500f..a9d920837 100644
--- a/tests/integration/test_backup_ceph.py
+++ b/tests/integration/test_backup_ceph.py
@@ -2,13 +2,18 @@
 # Copyright 2022 Canonical Ltd.
 # See LICENSE file for licensing details.
 
+import dataclasses
+import json
 import logging
+import os
 import socket
+import subprocess
+import time
 from pathlib import Path
 
 import boto3
+import botocore.exceptions
 import pytest
-import pytest_microceph
 from pytest_operator.plugin import OpsTest
 
 from . import juju_
@@ -37,10 +42,66 @@
 TABLE_NAME = "backup-table"
 CLOUD = "ceph"
 value_before_backup, value_after_backup = None, None
+MICROCEPH_BUCKET = "testbucket"
+
+
+@dataclasses.dataclass(frozen=True)
+class MicrocephConnectionInformation:
+    access_key_id: str
+    secret_access_key: str
+    bucket: str
 
 
 @pytest.fixture(scope="session")
-def cloud_credentials(microceph: pytest_microceph.ConnectionInformation) -> dict[str, str]:
+def microceph():
+    if not os.environ.get("CI") == "true":
+        raise Exception("Not running on CI. Skipping microceph installation")
+    logger.info("Setting up microceph")
+    subprocess.run(["sudo", "snap", "install", "microceph"], check=True)
+    subprocess.run(["sudo", "microceph", "cluster", "bootstrap"], check=True)
+    subprocess.run(["sudo", "microceph", "disk", "add", "loop,4G,3"], check=True)
+    subprocess.run(["sudo", "microceph", "enable", "rgw"], check=True)
+    output = subprocess.run(
+        [
+            "sudo",
+            "microceph.radosgw-admin",
+            "user",
+            "create",
+            "--uid",
+            "test",
+            "--display-name",
+            "test",
+        ],
+        capture_output=True,
+        check=True,
+        encoding="utf-8",
+    ).stdout
+    key = json.loads(output)["keys"][0]
+    key_id = key["access_key"]
+    secret_key = key["secret_key"]
+    logger.info("Creating microceph bucket")
+    for attempt in range(3):
+        try:
+            boto3.client(
+                "s3",
+                endpoint_url="http://localhost",
+                aws_access_key_id=key_id,
+                aws_secret_access_key=secret_key,
+            ).create_bucket(Bucket=MICROCEPH_BUCKET)
+        except botocore.exceptions.EndpointConnectionError:
+            if attempt == 2:
+                raise
+            # microceph is not ready yet
+            logger.info("Unable to connect to microceph via S3. Retrying")
+            time.sleep(1)
+        else:
+            break
+    logger.info("Set up microceph")
+    return MicrocephConnectionInformation(key_id, secret_key, MICROCEPH_BUCKET)
+
+
+@pytest.fixture(scope="session")
+def cloud_credentials(microceph) -> dict[str, str]:
     """Read cloud credentials."""
     return {
         "access-key": microceph.access_key_id,
@@ -49,7 +110,7 @@ def cloud_credentials(microceph: pytest_microceph.ConnectionInformation) -> dict
 
 
 @pytest.fixture(scope="session")
-def cloud_configs(microceph: pytest_microceph.ConnectionInformation) -> dict[str, str]:
+def cloud_configs(microceph) -> dict[str, str]:
     return {
         "endpoint": f"http://{host_ip}",
         "bucket": microceph.bucket,
@@ -78,11 +139,10 @@ def clean_backups_from_buckets(cloud_credentials, cloud_configs):
         bucket_object.delete()
 
 
-@pytest.mark.group(1)
-async def test_build_and_deploy(ops_test: OpsTest) -> None:
+async def test_build_and_deploy(ops_test: OpsTest, charm) -> None:
     """Simple test to ensure that the mysql charm gets deployed."""
     # TODO: deploy 3 units when bug https://bugs.launchpad.net/juju/+bug/1995466 is resolved
-    mysql_application_name = await deploy_and_scale_mysql(ops_test, num_units=1)
+    mysql_application_name = await deploy_and_scale_mysql(ops_test, charm, num_units=1)
 
     mysql_unit = ops_test.model.units[f"{mysql_application_name}/0"]
     assert mysql_unit
@@ -105,12 +165,13 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None:
     )
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
-async def test_backup(ops_test: OpsTest, cloud_credentials, cloud_configs, credentials) -> None:
+async def test_backup(
+    ops_test: OpsTest, charm, cloud_credentials, cloud_configs, credentials
+) -> None:
     """Test to create a backup and list backups."""
     # TODO: deploy 3 units when bug https://bugs.launchpad.net/juju/+bug/1995466 is resolved
-    mysql_application_name = await deploy_and_scale_mysql(ops_test, num_units=1)
+    mysql_application_name = await deploy_and_scale_mysql(ops_test, charm, num_units=1)
 
     global backup_id, backups_by_cloud, value_before_backup, value_after_backup
 
@@ -173,14 +234,13 @@ async def test_backup(ops_test: OpsTest, cloud_credentials, cloud_configs, crede
     )
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_restore_on_same_cluster(
-    ops_test: OpsTest, cloud_credentials, cloud_configs, credentials
+    ops_test: OpsTest, charm, cloud_credentials, cloud_configs, credentials
 ) -> None:
     """Test to restore a backup to the same mysql cluster."""
     # TODO: deploy 3 units when bug https://bugs.launchpad.net/juju/+bug/1995466 is resolved
-    mysql_application_name = await deploy_and_scale_mysql(ops_test, num_units=1)
+    mysql_application_name = await deploy_and_scale_mysql(ops_test, charm, num_units=1)
 
     mysql_unit = ops_test.model.units[f"{mysql_application_name}/0"]
     assert mysql_unit
@@ -258,14 +318,16 @@ async def test_restore_on_same_cluster(
         assert sorted(values) == sorted([value_before_backup, value_after_restore])
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
-async def test_restore_on_new_cluster(ops_test: OpsTest, cloud_credentials, cloud_configs) -> None:
+async def test_restore_on_new_cluster(
+    ops_test: OpsTest, charm, cloud_credentials, cloud_configs
+) -> None:
     """Test to restore a backup on a new mysql cluster."""
     logger.info("Deploying a new mysql cluster")
 
     new_mysql_application_name = await deploy_and_scale_mysql(
         ops_test,
+        charm,
         check_for_existing_application=False,
         mysql_application_name="another-mysql-k8s",
         num_units=1,
diff --git a/tests/integration/test_backup_gcp.py b/tests/integration/test_backup_gcp.py
index bb927914b..0d2205f41 100644
--- a/tests/integration/test_backup_gcp.py
+++ b/tests/integration/test_backup_gcp.py
@@ -3,6 +3,7 @@
 # See LICENSE file for licensing details.
 
 import logging
+import os
 import socket
 import uuid
 from pathlib import Path
@@ -40,11 +41,11 @@
 
 
 @pytest.fixture(scope="session")
-def cloud_credentials(github_secrets) -> dict[str, str]:
+def cloud_credentials() -> dict[str, str]:
     """Read cloud credentials."""
     return {
-        "access-key": github_secrets["GCP_ACCESS_KEY"],
-        "secret-key": github_secrets["GCP_SECRET_KEY"],
+        "access-key": os.environ["GCP_ACCESS_KEY"],
+        "secret-key": os.environ["GCP_SECRET_KEY"],
     }
 
 
@@ -82,11 +83,10 @@ def clean_backups_from_buckets(cloud_credentials, cloud_configs):
         bucket_object.delete()
 
 
-@pytest.mark.group(1)
-async def test_build_and_deploy(ops_test: OpsTest) -> None:
+async def test_build_and_deploy(ops_test: OpsTest, charm) -> None:
     """Simple test to ensure that the mysql charm gets deployed."""
     # TODO: deploy 3 units when bug https://bugs.launchpad.net/juju/+bug/1995466 is resolved
-    mysql_application_name = await deploy_and_scale_mysql(ops_test, num_units=1)
+    mysql_application_name = await deploy_and_scale_mysql(ops_test, charm, num_units=1)
 
     mysql_unit = ops_test.model.units[f"{mysql_application_name}/0"]
     assert mysql_unit
@@ -109,12 +109,13 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None:
     )
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
-async def test_backup(ops_test: OpsTest, cloud_credentials, cloud_configs, credentials) -> None:
+async def test_backup(
+    ops_test: OpsTest, charm, cloud_credentials, cloud_configs, credentials
+) -> None:
     """Test to create a backup and list backups."""
     # TODO: deploy 3 units when bug https://bugs.launchpad.net/juju/+bug/1995466 is resolved
-    mysql_application_name = await deploy_and_scale_mysql(ops_test, num_units=1)
+    mysql_application_name = await deploy_and_scale_mysql(ops_test, charm, num_units=1)
 
     global backup_id, backups_by_cloud, value_before_backup, value_after_backup
 
@@ -177,14 +178,13 @@ async def test_backup(ops_test: OpsTest, cloud_credentials, cloud_configs, crede
     )
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_restore_on_same_cluster(
-    ops_test: OpsTest, cloud_credentials, cloud_configs, credentials
+    ops_test: OpsTest, charm, cloud_credentials, cloud_configs, credentials
 ) -> None:
     """Test to restore a backup to the same mysql cluster."""
     # TODO: deploy 3 units when bug https://bugs.launchpad.net/juju/+bug/1995466 is resolved
-    mysql_application_name = await deploy_and_scale_mysql(ops_test, num_units=1)
+    mysql_application_name = await deploy_and_scale_mysql(ops_test, charm, num_units=1)
 
     mysql_unit = ops_test.model.units[f"{mysql_application_name}/0"]
     assert mysql_unit
@@ -262,14 +262,16 @@ async def test_restore_on_same_cluster(
         assert sorted(values) == sorted([value_before_backup, value_after_restore])
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
-async def test_restore_on_new_cluster(ops_test: OpsTest, cloud_credentials, cloud_configs) -> None:
+async def test_restore_on_new_cluster(
+    ops_test: OpsTest, charm, cloud_credentials, cloud_configs
+) -> None:
     """Test to restore a backup on a new mysql cluster."""
     logger.info("Deploying a new mysql cluster")
 
     new_mysql_application_name = await deploy_and_scale_mysql(
         ops_test,
+        charm,
         check_for_existing_application=False,
         mysql_application_name="another-mysql-k8s",
         num_units=1,
diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py
index fc44a0e4e..b9b1e3563 100644
--- a/tests/integration/test_charm.py
+++ b/tests/integration/test_charm.py
@@ -36,13 +36,11 @@
 TIMEOUT = 15 * 60
 
 
-@pytest.mark.group(1)
 @pytest.mark.skip_if_deployed
 @pytest.mark.abort_on_fail
-async def test_build_and_deploy(ops_test: OpsTest) -> None:
+async def test_build_and_deploy(ops_test: OpsTest, charm) -> None:
     """Build the mysql charm and deploy it."""
     async with ops_test.fast_forward("60s"):
-        charm = await ops_test.build_charm(".")
         resources = {"mysql-image": METADATA["resources"]["mysql-image"]["upstream-source"]}
         config = {"cluster-name": CLUSTER_NAME, "profile": "testing"}
         await ops_test.model.deploy(
@@ -85,7 +83,6 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None:
             assert output[0] == 3
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_consistent_data_replication_across_cluster(ops_test: OpsTest) -> None:
     """Confirm that data is replicated from the primary node to all the replicas."""
@@ -138,7 +135,6 @@ async def test_consistent_data_replication_across_cluster(ops_test: OpsTest) ->
         assert False
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_scale_up_and_down(ops_test: OpsTest) -> None:
     """Confirm that a new primary is elected when the current primary is torn down."""
@@ -183,7 +179,6 @@ async def test_scale_up_and_down(ops_test: OpsTest) -> None:
         assert len(not_online_member_addresses) == 0
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_scale_up_after_scale_down(ops_test: OpsTest) -> None:
     """Confirm storage reuse works."""
@@ -201,7 +196,28 @@ async def test_scale_up_after_scale_down(ops_test: OpsTest) -> None:
         assert len(online_member_addresses) == 3
 
 
-@pytest.mark.group(1)
+@pytest.mark.abort_on_fail
+async def test_scale_up_from_zero(ops_test: OpsTest) -> None:
+    """Ensure scaling down to zero and back up works."""
+    await scale_application(ops_test, APP_NAME, 0)
+
+    await ops_test.model.block_until(
+        lambda: len(ops_test.model.applications[APP_NAME].units) == 0,
+        timeout=TIMEOUT,
+    )
+
+    await scale_application(ops_test, APP_NAME, 3)
+
+    random_unit = ops_test.model.applications[APP_NAME].units[0]
+    cluster_status = await get_cluster_status(random_unit)
+    online_member_addresses = [
+        member["address"]
+        for _, member in cluster_status["defaultreplicaset"]["topology"].items()
+        if member["status"] == "online"
+    ]
+    assert len(online_member_addresses) == 3
+
+
 @pytest.mark.abort_on_fail
 async def test_password_rotation(ops_test: OpsTest):
     """Rotate password and confirm changes."""
@@ -235,7 +251,6 @@ async def test_password_rotation(ops_test: OpsTest):
     )
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_password_rotation_silent(ops_test: OpsTest):
     """Rotate password and confirm changes."""
@@ -264,7 +279,6 @@ async def test_password_rotation_silent(ops_test: OpsTest):
     )
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_password_rotation_root_user_implicit(ops_test: OpsTest):
     """Rotate password and confirm changes."""
@@ -291,7 +305,6 @@ async def test_password_rotation_root_user_implicit(ops_test: OpsTest):
     assert updated_credentials["password"] == updated_root_credentials["password"]
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_exporter_endpoints(ops_test: OpsTest) -> None:
     """Test that endpoints are running."""
@@ -312,7 +325,6 @@ async def test_exporter_endpoints(ops_test: OpsTest) -> None:
         ), "Scrape error in mysql_exporter"
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_custom_variables(ops_test: OpsTest) -> None:
     """Query database for custom variables."""
diff --git a/tests/integration/test_cos_integration_bundle.py b/tests/integration/test_cos_integration_bundle.py
new file mode 100644
index 000000000..b21f89aad
--- /dev/null
+++ b/tests/integration/test_cos_integration_bundle.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+# Copyright 2024 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+import pathlib
+import tempfile
+
+import jinja2
+import pytest
+import yaml
+from pytest_operator.plugin import OpsTest
+
+logger = logging.getLogger(__name__)
+
+METADATA = yaml.safe_load(pathlib.Path("./metadata.yaml").read_text())
+IMAGE_SOURCE = METADATA["resources"]["mysql-image"]["upstream-source"]
+TIMEOUT = 10 * 60
+
+
+@pytest.mark.abort_on_fail
+async def test_deploy_bundle_with_cos_integrations(ops_test: OpsTest, charm) -> None:
+    """Test COS integrations formed before mysql is allocated and deployed."""
+    bundle_template = jinja2.Template(
+        pathlib.Path(
+            "./tests/integration/bundle_templates/grafana_agent_integration.j2"
+        ).read_text()
+    )
+    rendered_bundle = bundle_template.render(
+        mysql_charm_path=str(pathlib.Path(charm).absolute()), mysql_image_source=IMAGE_SOURCE
+    )
+
+    with tempfile.NamedTemporaryFile(mode="w+", suffix=".yaml") as rendered_bundle_file:
+        rendered_bundle_file.write(rendered_bundle)
+        rendered_bundle_file.flush()
+
+        logger.info("Deploying grafana_agent_integration bundle")
+        await ops_test.model.deploy(f"local:{rendered_bundle_file.name}", trust=True)
+
+    logger.info("Waiting until mysql-k8s becomes active")
+    await ops_test.model.wait_for_idle(
+        apps=["mysql-k8s"],
+        status="active",
+        raise_on_blocked=True,
+        timeout=TIMEOUT,
+    )
diff --git a/tests/integration/test_multi_relations.py b/tests/integration/test_multi_relations.py
index 271e96605..c0495283b 100644
--- a/tests/integration/test_multi_relations.py
+++ b/tests/integration/test_multi_relations.py
@@ -14,18 +14,14 @@
 SCALE_UNITS = 3
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
-async def test_build_and_deploy(ops_test: OpsTest):
+async def test_build_and_deploy(ops_test: OpsTest, charm):
     """Build the charm and deploy 1 units to ensure a cluster is formed."""
-    # Build and deploy charm from local source folder
-    db_charm = await ops_test.build_charm(".")
-
     config = {"profile": "testing"}
     resources = {"mysql-image": DB_METADATA["resources"]["mysql-image"]["upstream-source"]}
 
     await ops_test.model.deploy(
-        db_charm,
+        charm,
         application_name="mysql",
         config=config,
         num_units=1,
@@ -42,6 +38,7 @@ async def test_build_and_deploy(ops_test: OpsTest):
             num_units=1,
             channel="latest/edge",
             config=config,
+            base="ubuntu@22.04",
         )
         await ops_test.model.deploy(
             "mysql-router-k8s",
@@ -49,10 +46,10 @@ async def test_build_and_deploy(ops_test: OpsTest):
             num_units=1,
             channel="8.0/edge",
             trust=True,
+            base="ubuntu@22.04",
         )
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_relate_all(ops_test: OpsTest):
     """Relate all the applications to the database."""
@@ -67,7 +64,6 @@ async def test_relate_all(ops_test: OpsTest):
     )
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_scale_out(ops_test: OpsTest):
     """Scale database and routers."""
@@ -83,7 +79,6 @@ async def test_scale_out(ops_test: OpsTest):
     )
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_scale_in(ops_test: OpsTest):
     """Scale database and routers."""
diff --git a/tests/integration/test_saturate_max_connections.py b/tests/integration/test_saturate_max_connections.py
new file mode 100644
index 000000000..ca1c2aca6
--- /dev/null
+++ b/tests/integration/test_saturate_max_connections.py
@@ -0,0 +1,97 @@
+# Copyright 2024 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+from pathlib import Path
+
+import pytest
+import yaml
+from mysql.connector.errors import OperationalError
+from pytest_operator.plugin import OpsTest
+
+from .connector import create_db_connections
+from .helpers import get_unit_address
+from .juju_ import run_action
+
+logger = logging.getLogger(__name__)
+
+MYSQL_APP_NAME = "mysql"
+TEST_APP_NAME = "app"
+CONNECTIONS = 10
+METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
+
+
+@pytest.mark.abort_on_fail
+async def test_build_and_deploy(ops_test: OpsTest, charm) -> None:
+    """Build the charm and deploy 1 units to ensure a cluster is formed."""
+    config = {"profile-limit-memory": "2000", "experimental-max-connections": CONNECTIONS}
+    resources = {"mysql-image": METADATA["resources"]["mysql-image"]["upstream-source"]}
+
+    await ops_test.model.deploy(
+        charm,
+        application_name=MYSQL_APP_NAME,
+        config=config,
+        num_units=1,
+        base="ubuntu@22.04",
+        resources=resources,
+        trust=True,
+    )
+
+
+@pytest.mark.abort_on_fail
+async def test_deploy_and_relate_test_app(ops_test: OpsTest) -> None:
+    config = {"auto_start_writes": False, "sleep_interval": "500"}
+    logger.info("Deploying test app")
+    await ops_test.model.deploy(
+        "mysql-test-app",
+        application_name=TEST_APP_NAME,
+        num_units=1,
+        base="ubuntu@22.04",
+        config=config,
+        channel="latest/edge",
+    )
+
+    logger.info("Relating test app to mysql")
+    await ops_test.model.relate(MYSQL_APP_NAME, f"{TEST_APP_NAME}:database")
+
+    logger.info("Waiting all to be active")
+    await ops_test.model.block_until(
+        lambda: all(unit.workload_status == "active" for unit in ops_test.model.units.values()),
+        timeout=60 * 10,
+        wait_period=5,
+    )
+
+
+@pytest.mark.abort_on_fail
+async def test_saturate_max_connections(ops_test: OpsTest) -> None:
+    app_unit = ops_test.model.applications[TEST_APP_NAME].units[0]
+    mysql_unit = ops_test.model.applications[MYSQL_APP_NAME].units[0]
+
+    host_ip = await get_unit_address(ops_test, mysql_unit.name)
+    logger.info("Running action to get app connection data")
+    credentials = await run_action(app_unit, "get-client-connection-data")
+    if "return-code" in credentials:
+        # juju 2.9 dont have the return-code key
+        del credentials["return-code"]
+    if "Code" in credentials:
+        del credentials["Code"]
+    credentials["host"] = host_ip
+
+    logger.info(f"Creating {CONNECTIONS} connections")
+    connections = create_db_connections(CONNECTIONS, **credentials)
+    assert isinstance(connections, list), "Connections not created"
+
+    logger.info("Ensure all connections are established")
+    for conn in connections:
+        assert conn.is_connected(), "Connection failed to establish"
+
+    assert len(connections) == CONNECTIONS, "Not all connections were established"
+
+    logger.info("Ensure no more client connections are possible")
+
+    with pytest.raises(OperationalError):
+        # exception raised when too many connections are attempted
+        create_db_connections(1, **credentials)
+
+    logger.info("Get cluster status while connections are saturated")
+    _ = await run_action(mysql_unit, "get-cluster-status")
diff --git a/tests/integration/test_tls.py b/tests/integration/test_tls.py
index f445fb98a..2ea2a951b 100644
--- a/tests/integration/test_tls.py
+++ b/tests/integration/test_tls.py
@@ -46,9 +46,8 @@
     tls_config = {"generate-self-signed-certificates": "true", "ca-common-name": "Test CA"}
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
-async def test_build_and_deploy(ops_test: OpsTest) -> None:
+async def test_build_and_deploy(ops_test: OpsTest, charm) -> None:
     """Build the charm and deploy 3 units to ensure a cluster is formed."""
     # Set model configuration
     await ops_test.model.set_config(MODEL_CONFIG)
@@ -60,8 +59,6 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None:
                 await scale_application(ops_test, app, 3)
             return
 
-    # Build and deploy charm from local source folder
-    charm = await ops_test.build_charm(".")
     resources = {"mysql-image": METADATA["resources"]["mysql-image"]["upstream-source"]}
     config = {"profile": "testing"}
     await ops_test.model.deploy(
@@ -88,7 +85,6 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None:
         )
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_connection_before_tls(ops_test: OpsTest) -> None:
     """Ensure connections (with and without ssl) are possible before relating with TLS operator."""
@@ -118,7 +114,6 @@ async def test_connection_before_tls(ops_test: OpsTest) -> None:
         ), f"❌ Unencrypted connection not possible to unit {unit.name} with disabled TLS"
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_enable_tls(ops_test: OpsTest) -> None:
     """Test for encryption enablement when relation to TLS charm."""
@@ -158,7 +153,6 @@ async def test_enable_tls(ops_test: OpsTest) -> None:
     assert await get_tls_ca(ops_test, all_units[0].name), "❌ No CA found after TLS relation"
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_rotate_tls_key(ops_test: OpsTest) -> None:
     """Verify rotating tls private keys restarts cluster with new certificates.
@@ -208,7 +202,6 @@ async def test_rotate_tls_key(ops_test: OpsTest) -> None:
         ), f"❌ Unencrypted connection possible to unit {unit.name} with enabled TLS"
 
 
-@pytest.mark.group(1)
 @pytest.mark.abort_on_fail
 async def test_disable_tls(ops_test: OpsTest) -> None:
     # Remove the relation
diff --git a/tests/spread/test_architecture.py/task.yaml b/tests/spread/test_architecture.py/task.yaml
new file mode 100644
index 000000000..cad863364
--- /dev/null
+++ b/tests/spread/test_architecture.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_architecture.py
+environment:
+  TEST_MODULE: test_architecture.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
+backends:
+  - -lxd-vm  # This task requires charm built on different architecture from host
diff --git a/tests/spread/test_async_replication.py/task.yaml b/tests/spread/test_async_replication.py/task.yaml
new file mode 100644
index 000000000..d1116ce09
--- /dev/null
+++ b/tests/spread/test_async_replication.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_async_replication.py
+environment:
+  TEST_MODULE: high_availability/test_async_replication.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
+variants:
+  - -juju29
diff --git a/tests/spread/test_backup_aws.py/task.yaml b/tests/spread/test_backup_aws.py/task.yaml
new file mode 100644
index 000000000..b15ee3043
--- /dev/null
+++ b/tests/spread/test_backup_aws.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_backup_aws.py
+environment:
+  TEST_MODULE: test_backup_aws.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
+backends:
+  - -lxd-vm  # Requires CI secrets
diff --git a/tests/spread/test_backup_ceph.py/task.yaml b/tests/spread/test_backup_ceph.py/task.yaml
new file mode 100644
index 000000000..536927e44
--- /dev/null
+++ b/tests/spread/test_backup_ceph.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_backup_ceph.py
+environment:
+  TEST_MODULE: test_backup_ceph.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_backup_gcp.py/task.yaml b/tests/spread/test_backup_gcp.py/task.yaml
new file mode 100644
index 000000000..627a03960
--- /dev/null
+++ b/tests/spread/test_backup_gcp.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_backup_gcp.py
+environment:
+  TEST_MODULE: test_backup_gcp.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
+backends:
+  - -lxd-vm  # Requires CI secrets
diff --git a/tests/spread/test_charm.py/task.yaml b/tests/spread/test_charm.py/task.yaml
new file mode 100644
index 000000000..96450bdc3
--- /dev/null
+++ b/tests/spread/test_charm.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_charm.py
+environment:
+  TEST_MODULE: test_charm.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_cos_integration_bundle.py/task.yaml b/tests/spread/test_cos_integration_bundle.py/task.yaml
new file mode 100644
index 000000000..4285b5553
--- /dev/null
+++ b/tests/spread/test_cos_integration_bundle.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_cos_integration_bundle.py
+environment:
+  TEST_MODULE: test_cos_integration_bundle.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_crash_during_setup.py/task.yaml b/tests/spread/test_crash_during_setup.py/task.yaml
new file mode 100644
index 000000000..47fb82d81
--- /dev/null
+++ b/tests/spread/test_crash_during_setup.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_crash_during_setup.py
+environment:
+  TEST_MODULE: high_availability/test_crash_during_setup.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_database.py/task.yaml b/tests/spread/test_database.py/task.yaml
new file mode 100644
index 000000000..c18f605c2
--- /dev/null
+++ b/tests/spread/test_database.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_database.py
+environment:
+  TEST_MODULE: relations/test_database.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_k8s_endpoints.py/task.yaml b/tests/spread/test_k8s_endpoints.py/task.yaml
new file mode 100644
index 000000000..341fec072
--- /dev/null
+++ b/tests/spread/test_k8s_endpoints.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_k8s_endpoints.py
+environment:
+  TEST_MODULE: high_availability/test_k8s_endpoints.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_log_rotation.py/task.yaml b/tests/spread/test_log_rotation.py/task.yaml
new file mode 100644
index 000000000..9da1430a2
--- /dev/null
+++ b/tests/spread/test_log_rotation.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_log_rotation.py
+environment:
+  TEST_MODULE: high_availability/test_log_rotation.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_multi_relations.py/task.yaml b/tests/spread/test_multi_relations.py/task.yaml
new file mode 100644
index 000000000..73d60de2d
--- /dev/null
+++ b/tests/spread/test_multi_relations.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_multi_relations.py
+environment:
+  TEST_MODULE: test_multi_relations.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_mysql_root.py/task.yaml b/tests/spread/test_mysql_root.py/task.yaml
new file mode 100644
index 000000000..0b219c423
--- /dev/null
+++ b/tests/spread/test_mysql_root.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_mysql_root.py
+environment:
+  TEST_MODULE: relations/test_mysql_root.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
+systems:
+  - -ubuntu-24.04-arm
diff --git a/tests/spread/test_node_drain.py/task.yaml b/tests/spread/test_node_drain.py/task.yaml
new file mode 100644
index 000000000..860f13c9e
--- /dev/null
+++ b/tests/spread/test_node_drain.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_node_drain.py
+environment:
+  TEST_MODULE: high_availability/test_node_drain.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_replication_data_consistency.py/task.yaml b/tests/spread/test_replication_data_consistency.py/task.yaml
new file mode 100644
index 000000000..22b4c29ea
--- /dev/null
+++ b/tests/spread/test_replication_data_consistency.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_replication_data_consistency.py
+environment:
+  TEST_MODULE: high_availability/test_replication_data_consistency.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_replication_data_isolation.py/task.yaml b/tests/spread/test_replication_data_isolation.py/task.yaml
new file mode 100644
index 000000000..6980b5ebc
--- /dev/null
+++ b/tests/spread/test_replication_data_isolation.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_replication_data_isolation.py
+environment:
+  TEST_MODULE: high_availability/test_replication_data_isolation.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_replication_reelection.py/task.yaml b/tests/spread/test_replication_reelection.py/task.yaml
new file mode 100644
index 000000000..ec378bdb5
--- /dev/null
+++ b/tests/spread/test_replication_reelection.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_replication_reelection.py
+environment:
+  TEST_MODULE: high_availability/test_replication_reelection.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_replication_scaling.py/task.yaml b/tests/spread/test_replication_scaling.py/task.yaml
new file mode 100644
index 000000000..07ab6765f
--- /dev/null
+++ b/tests/spread/test_replication_scaling.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_replication_scaling.py
+environment:
+  TEST_MODULE: high_availability/test_replication_scaling.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_saturate_max_connections.py/task.yaml b/tests/spread/test_saturate_max_connections.py/task.yaml
new file mode 100644
index 000000000..febd627c8
--- /dev/null
+++ b/tests/spread/test_saturate_max_connections.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_saturate_max_connections.py
+environment:
+  TEST_MODULE: test_saturate_max_connections.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_self_healing_network_cut.py/task.yaml b/tests/spread/test_self_healing_network_cut.py/task.yaml
new file mode 100644
index 000000000..dc94e595f
--- /dev/null
+++ b/tests/spread/test_self_healing_network_cut.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_self_healing_network_cut.py
+environment:
+  TEST_MODULE: high_availability/test_self_healing_network_cut.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_self_healing_pod.py/task.yaml b/tests/spread/test_self_healing_pod.py/task.yaml
new file mode 100644
index 000000000..1aa4fbd8e
--- /dev/null
+++ b/tests/spread/test_self_healing_pod.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_self_healing_pod.py
+environment:
+  TEST_MODULE: high_availability/test_self_healing_pod.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_self_healing_process_frozen.py/task.yaml b/tests/spread/test_self_healing_process_frozen.py/task.yaml
new file mode 100644
index 000000000..131abb77a
--- /dev/null
+++ b/tests/spread/test_self_healing_process_frozen.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_self_healing_process_frozen.py
+environment:
+  TEST_MODULE: high_availability/test_self_healing_process_frozen.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_self_healing_process_killed.py/task.yaml b/tests/spread/test_self_healing_process_killed.py/task.yaml
new file mode 100644
index 000000000..1909addc9
--- /dev/null
+++ b/tests/spread/test_self_healing_process_killed.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_self_healing_process_killed.py
+environment:
+  TEST_MODULE: high_availability/test_self_healing_process_killed.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_self_healing_restart_graceful.py/task.yaml b/tests/spread/test_self_healing_restart_graceful.py/task.yaml
new file mode 100644
index 000000000..38543523c
--- /dev/null
+++ b/tests/spread/test_self_healing_restart_graceful.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_self_healing_restart_graceful.py
+environment:
+  TEST_MODULE: high_availability/test_self_healing_restart_graceful.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_self_healing_stop_all.py/task.yaml b/tests/spread/test_self_healing_stop_all.py/task.yaml
new file mode 100644
index 000000000..5e19cd4a0
--- /dev/null
+++ b/tests/spread/test_self_healing_stop_all.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_self_healing_stop_all.py
+environment:
+  TEST_MODULE: high_availability/test_self_healing_stop_all.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_self_healing_stop_primary.py/task.yaml b/tests/spread/test_self_healing_stop_primary.py/task.yaml
new file mode 100644
index 000000000..d438a4762
--- /dev/null
+++ b/tests/spread/test_self_healing_stop_primary.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_self_healing_stop_primary.py
+environment:
+  TEST_MODULE: high_availability/test_self_healing_stop_primary.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_tls.py/task.yaml b/tests/spread/test_tls.py/task.yaml
new file mode 100644
index 000000000..a60574491
--- /dev/null
+++ b/tests/spread/test_tls.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_tls.py
+environment:
+  TEST_MODULE: test_tls.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_upgrade.py/task.yaml b/tests/spread/test_upgrade.py/task.yaml
new file mode 100644
index 000000000..f99ac6938
--- /dev/null
+++ b/tests/spread/test_upgrade.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_upgrade.py
+environment:
+  TEST_MODULE: high_availability/test_upgrade.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_upgrade_from_stable.py/task.yaml b/tests/spread/test_upgrade_from_stable.py/task.yaml
new file mode 100644
index 000000000..ffdb002d2
--- /dev/null
+++ b/tests/spread/test_upgrade_from_stable.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_upgrade_from_stable.py
+environment:
+  TEST_MODULE: high_availability/test_upgrade_from_stable.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
diff --git a/tests/spread/test_upgrade_rollback_incompat.py/task.yaml b/tests/spread/test_upgrade_rollback_incompat.py/task.yaml
new file mode 100644
index 000000000..590646655
--- /dev/null
+++ b/tests/spread/test_upgrade_rollback_incompat.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_upgrade_rollback_incompat.py
+environment:
+  TEST_MODULE: high_availability/test_upgrade_rollback_incompat.py
+execute: |
+  tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+  - allure-results
+systems:
+  - -ubuntu-24.04-arm
diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py
index 15fe483c1..ea09858ff 100644
--- a/tests/unit/conftest.py
+++ b/tests/unit/conftest.py
@@ -2,7 +2,7 @@
 # See LICENSE file for licensing details.
 
 import pytest
-from charms.tempo_k8s.v1.charm_tracing import charm_tracing_disabled
+from charms.tempo_coordinator_k8s.v0.charm_tracing import charm_tracing_disabled
 
 
 @pytest.fixture(autouse=True)
diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py
index 1e09e0464..1949ec8ac 100644
--- a/tests/unit/test_charm.py
+++ b/tests/unit/test_charm.py
@@ -15,6 +15,7 @@
     BACKUPS_PASSWORD_KEY,
     CLUSTER_ADMIN_PASSWORD_KEY,
     MONITORING_PASSWORD_KEY,
+    MYSQLD_LOCATION,
     PASSWORD_LENGTH,
     ROOT_PASSWORD_KEY,
     SERVER_CONFIG_PASSWORD_KEY,
@@ -54,18 +55,27 @@ def use_caplog(self, caplog):
         self._caplog = caplog
 
     def layer_dict(self, with_mysqld_exporter: bool = False):
+        mysqld_cmd = [
+            MYSQLD_LOCATION,
+            "--basedir=/usr",
+            "--datadir=/var/lib/mysql",
+            "--plugin-dir=/usr/lib/mysql/plugin",
+            "--log-error=/var/log/mysql/error.log",
+            f"--pid-file={self.charm.unit_label}.pid",
+        ]
         return {
             "summary": "mysqld services layer",
             "description": "pebble config layer for mysqld safe and exporter",
             "services": {
-                "mysqld_safe": {
+                "mysqld": {
                     "override": "replace",
                     "summary": "mysqld safe",
-                    "command": "mysqld_safe",
+                    "command": " ".join(mysqld_cmd),
                     "startup": "enabled",
                     "user": "mysql",
                     "group": "mysql",
                     "kill-delay": "24h",
+                    "environment": {"MYSQLD_PARENT_PID": 1},
                 },
                 "mysqld_exporter": {
                     "override": "replace",
@@ -141,9 +151,11 @@ def test_on_leader_elected_secrets(self):
     )
     @patch("mysql_k8s_helpers.MySQL.get_max_connections", return_value=120)
     @patch("mysql_k8s_helpers.MySQL.setup_logrotate_config")
+    @patch("mysql_k8s_helpers.MySQL.reset_root_password_and_start_mysqld")
     def test_mysql_pebble_ready(
         self,
         _,
+        __,
         _get_max_connections,
         _get_innodb_buffer_pool_parameters,
         _get_member_state,
diff --git a/tests/unit/test_mysql_k8s_helpers.py b/tests/unit/test_mysql_k8s_helpers.py
index 5fb0a5c35..caea155fa 100644
--- a/tests/unit/test_mysql_k8s_helpers.py
+++ b/tests/unit/test_mysql_k8s_helpers.py
@@ -7,17 +7,17 @@
 
 import tenacity
 from charms.mysql.v0.mysql import MySQLClientError
-from ops.pebble import ExecError
+from ops.pebble import ExecError, PathError
 
 from mysql_k8s_helpers import (
     MYSQLD_SOCK_FILE,
-    MYSQLSH_SCRIPT_FILE,
     MySQL,
     MySQLCreateDatabaseError,
     MySQLCreateUserError,
     MySQLDeleteUsersWithLabelError,
     MySQLEscalateUserPrivilegesError,
     MySQLInitialiseMySQLDError,
+    MySQLServiceNotRunningError,
     MySQLWaitUntilUnitRemovedFromClusterError,
 )
 
@@ -72,7 +72,7 @@ def test_initialise_mysqld(self, _container, _process):
         self.mysql.initialise_mysqld()
 
         _container.exec.assert_called_once_with(
-            command=["mysqld", "--initialize-insecure", "-u", "mysql"],
+            command=["/usr/sbin/mysqld", "--initialize", "-u", "mysql"],
             user="mysql",
             group="mysql",
         )
@@ -106,13 +106,18 @@ def test_wait_until_mysql_connection(self, _container):
     def test_create_database(self, _run_mysqlsh_script):
         """Test successful execution of create_database."""
         _expected_create_database_commands = (
-            "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1')",
+            "shell.connect_to_primary()",
             'session.run_sql("CREATE DATABASE IF NOT EXISTS `test_database`;")',
         )
 
         self.mysql.create_database("test_database")
 
-        _run_mysqlsh_script.assert_called_once_with("\n".join(_expected_create_database_commands))
+        _run_mysqlsh_script.assert_called_once_with(
+            "\n".join(_expected_create_database_commands),
+            user="serverconfig",
+            host="127.0.0.1",
+            password="serverconfigpassword",
+        )
 
     @patch("mysql_k8s_helpers.MySQL._run_mysqlsh_script")
     def test_create_database_exception(self, _run_mysqlsh_script):
@@ -127,13 +132,18 @@ def test_create_user(self, _run_mysqlsh_script):
         """Test successful execution of create_user."""
         _escaped_attributes = json.dumps({"label": "test_label"}).replace('"', r"\"")
         _expected_create_user_commands = (
-            "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1')",
+            "shell.connect_to_primary()",
             f"session.run_sql(\"CREATE USER `test_user`@`%` IDENTIFIED BY 'test_password' ATTRIBUTE '{_escaped_attributes}';\")",
         )
 
         self.mysql.create_user("test_user", "test_password", "test_label")
 
-        _run_mysqlsh_script.assert_called_once_with("\n".join(_expected_create_user_commands))
+        _run_mysqlsh_script.assert_called_once_with(
+            "\n".join(_expected_create_user_commands),
+            user="serverconfig",
+            host="127.0.0.1",
+            password="serverconfigpassword",
+        )
 
     @patch("mysql_k8s_helpers.MySQL._run_mysqlsh_script")
     def test_create_user_exception(self, _run_mysqlsh_script):
@@ -160,16 +170,19 @@ def test_escalate_user_privileges(self, _run_mysqlsh_script):
         )
 
         _expected_escalate_user_privileges_commands = (
-            "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1')",
+            "shell.connect_to_primary()",
             'session.run_sql("GRANT ALL ON *.* TO `test_user`@`%` WITH GRANT OPTION;")',
-            f"session.run_sql(\"REVOKE {', '.join(super_privileges_to_revoke)} ON *.* FROM `test_user`@`%`;\")",
+            f'session.run_sql("REVOKE {", ".join(super_privileges_to_revoke)} ON *.* FROM `test_user`@`%`;")',
             'session.run_sql("FLUSH PRIVILEGES;")',
         )
 
         self.mysql.escalate_user_privileges("test_user")
 
         _run_mysqlsh_script.assert_called_once_with(
-            "\n".join(_expected_escalate_user_privileges_commands)
+            "\n".join(_expected_escalate_user_privileges_commands),
+            user="serverconfig",
+            host="127.0.0.1",
+            password="serverconfigpassword",
         )
 
     @patch("mysql_k8s_helpers.MySQL._run_mysqlsh_script")
@@ -191,21 +204,33 @@ def test_delete_users_with_label(self, _run_mysqlsh_script, _run_mysqlcli_script
             'WHERE attributes.attribute LIKE \'%"test_label_name": "test_label_value"%\'',
         )
 
-        _run_mysqlcli_script.return_value = "users\ntest_user@%\ntest_user_2@localhost"
+        _run_mysqlcli_script.return_value = [
+            [
+                "test_user@%",
+            ],
+            [
+                "test_user_2@localhost",
+            ],
+        ]
 
         _expected_drop_users_commands = (
-            "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1')",
+            "shell.connect_to_primary()",
             "session.run_sql(\"DROP USER IF EXISTS 'test_user'@'%', 'test_user_2'@'localhost';\")",
         )
 
         self.mysql.delete_users_with_label("test_label_name", "test_label_value")
 
         _run_mysqlcli_script.assert_called_once_with(
-            "; ".join(_expected_get_label_users_commands),
+            _expected_get_label_users_commands,
+            user="serverconfig",
+            password="serverconfigpassword",
+        )
+        _run_mysqlsh_script.assert_called_once_with(
+            "\n".join(_expected_drop_users_commands),
             user="serverconfig",
+            host="127.0.0.1",
             password="serverconfigpassword",
         )
-        _run_mysqlsh_script.assert_called_once_with("\n".join(_expected_drop_users_commands))
 
     @patch("mysql_k8s_helpers.MySQL._run_mysqlcli_script")
     @patch("mysql_k8s_helpers.MySQL._run_mysqlsh_script")
@@ -227,43 +252,52 @@ def test_run_mysqlsh_script(self, _container):
         """Test a successful execution of run_mysqlsh_script."""
         _container.exec.return_value = MagicMock()
         _container.exec.return_value.wait_output.return_value = (
-            b"stdout",
-            b"stderr",
+            "garbage_that_mysqlsh_output###stdout",
+            "stderr",
         )
         self.mysql.container = _container
 
-        self.mysql._run_mysqlsh_script("script")
+        self.mysql._run_mysqlsh_script(
+            "script", user="serverconfig", password="serverconfigpassword", host="127.0.0.1:3306"
+        )
 
+        call_script = "shell.options.set('useWizards', False)\nprint('###')\nscript"
         _container.exec.assert_called_once_with(
             [
                 "/usr/bin/mysqlsh",
-                "--no-wizard",
+                "--passwords-from-stdin",
+                "--uri=serverconfig@127.0.0.1:3306",
                 "--python",
                 "--verbose=0",
-                "-f",
-                MYSQLSH_SCRIPT_FILE,
-                ";",
-                "rm",
-                MYSQLSH_SCRIPT_FILE,
+                "-c",
+                call_script,
             ],
+            stdin="serverconfigpassword",
         )
 
         _container.reset_mock()
-        self.mysql._run_mysqlsh_script("script", timeout=10)
+        output = self.mysql._run_mysqlsh_script(
+            "script",
+            user="serverconfig",
+            password="serverconfigpassword",
+            host="127.0.0.1:3306",
+            timeout=10,
+        )
+        self.assertEqual(output, "stdout")
+
         _container.exec.assert_called_once_with(
             [
                 "timeout",
                 "10",
                 "/usr/bin/mysqlsh",
-                "--no-wizard",
+                "--passwords-from-stdin",
+                "--uri=serverconfig@127.0.0.1:3306",
                 "--python",
                 "--verbose=0",
-                "-f",
-                MYSQLSH_SCRIPT_FILE,
-                ";",
-                "rm",
-                MYSQLSH_SCRIPT_FILE,
+                "-c",
+                call_script,
             ],
+            stdin="serverconfigpassword",
         )
 
     @patch("ops.model.Container")
@@ -271,19 +305,19 @@ def test_run_mysqlcli_script(self, _container):
         """Test a execution of run_mysqlcli_script."""
         _container.exec.return_value = MagicMock()
         _container.exec.return_value.wait_output.return_value = (
-            b"stdout",
-            b"stderr",
+            "",
+            None,
         )
         self.mysql.container = _container
 
-        self.mysql._run_mysqlcli_script("script")
+        self.mysql._run_mysqlcli_script(("script",))
 
         _container.exec.assert_called_once_with(
             [
                 "/usr/bin/mysql",
                 "-u",
                 "root",
-                "--protocol=SOCKET",
+                "-N",
                 f"--socket={MYSQLD_SOCK_FILE}",
                 "-e",
                 "script",
@@ -291,6 +325,23 @@ def test_run_mysqlcli_script(self, _container):
             timeout=None,
         )
 
+        _container.reset_mock()
+        self.mysql._run_mysqlcli_script(("script",), password="rootpassword")
+        _container.exec.assert_called_once_with(
+            [
+                "/usr/bin/mysql",
+                "-u",
+                "root",
+                "-p",
+                "-N",
+                f"--socket={MYSQLD_SOCK_FILE}",
+                "-e",
+                "script",
+            ],
+            timeout=None,
+            stdin="rootpassword",
+        )
+
     @patch("mysql_k8s_helpers.MySQL.get_cluster_status", return_value=GET_CLUSTER_STATUS_RETURN)
     def test_wait_until_unit_removed_from_cluster(self, _get_cluster_status):
         """Test the successful execution of _wait_until_unit_removed_from_cluster."""
@@ -321,19 +372,19 @@ def test_wait_until_unit_removed_from_cluster_exception(self, _get_cluster_statu
     def test_log_rotate_config(self, _container):
         """Test log_rotate_config."""
         rendered_logrotate_config = (
-            "# Use system user\nsu mysql mysql\n\n# Create dedicated "
-            "subdirectory for rotated files\ncreateolddir 770 mysql mysql\n\n# Frequency of logs"
-            " rotation\nhourly\nmaxage 7\nrotate 10800\n\n# Naming of rotated files should be in"
-            " the format:\ndateext\ndateformat -%Y%m%d_%H%M\n\n# Settings to prevent"
-            " misconfigurations and unwanted behaviours\nifempty\nmissingok\nnocompress\nnomail\n"
-            "nosharedscripts\nnocopytruncate\n\n/var/log/mysql/error.log {\n    olddir"
-            " archive_error\n}\n\n/var/log/mysql/general.log {\n    olddir archive_general\n}\n\n"
-            "/var/log/mysql/slowquery.log {\n    olddir archive_slowquery\n}\n\n"
-            "/var/log/mysql/audit.log {\n    olddir archive_audit\n}"
+            "# Use system user\nsu mysql mysql\n\n# Create dedicated subdirectory for rotated "
+            "files\ncreateolddir 770 mysql mysql\n\n# Frequency of logs rotation\nhourly\nmaxa"
+            "ge 1\nrotate 1440\n\n# Compression settings\n\nnocompress\n\n\n# Naming of rotate"
+            "d files should be in the format:\ndateext\ndateformat -%Y%m%d_%H%M\n\n# Settings "
+            "to prevent misconfigurations and unwanted behaviours\nifempty\nmissingok\nnomail\n"
+            "nosharedscripts\nnocopytruncate\n\n\n/var/log/mysql/error.log {\n    olddir archi"
+            "ve_error\n}\n\n/var/log/mysql/general.log {\n    olddir archive_general\n}\n\n/va"
+            "r/log/mysql/slowquery.log {\n    olddir archive_slowquery\n}\n\n/var/log/mysql/au"
+            "dit.log {\n    olddir archive_audit\n}\n\n"
         )
 
         self.mysql.container = _container
-        self.mysql.setup_logrotate_config()
+        self.mysql.setup_logrotate_config(1, ["error", "general", "slowquery", "audit"], False)
 
         self.mysql.container.push.assert_called_once_with(
             "/etc/logrotate.d/flush_mysql_logs",
@@ -370,3 +421,98 @@ def test_update_endpoints(self, _get_cluster_endpoints):
         _get_cluster_endpoints.assert_called_once()
 
         _label_pod.assert_has_calls(calls)
+
+    @patch("ops.model.Container")
+    @patch("mysql_k8s_helpers.MySQL.wait_until_mysql_connection")
+    def test_reset_root_password_and_start_mysqld(self, _wait_until_mysql_connection, _container):
+        """Test for reset_root_password_and_start_mysqld()."""
+        self.mysql.container = _container
+        self.mysql.reset_root_password_and_start_mysqld()
+
+        self.mysql.container.push.assert_has_calls([
+            call(
+                "/alter-root-user.sql",
+                "ALTER USER 'root'@'localhost' IDENTIFIED BY 'password';\nFLUSH PRIVILEGES;",
+                encoding="utf-8",
+                permissions=384,
+                user="mysql",
+                group="mysql",
+            ),
+            call(
+                "/etc/mysql/mysql.conf.d/z-custom-init-file.cnf",
+                "[mysqld]\ninit_file = /alter-root-user.sql",
+                encoding="utf-8",
+                permissions=384,
+                user="mysql",
+                group="mysql",
+            ),
+        ])
+        self.mysql.container.restart.assert_called_once_with("mysqld")
+        _wait_until_mysql_connection.assert_called_once_with(check_port=False)
+        self.mysql.container.remove_path.assert_has_calls([
+            call("/alter-root-user.sql"),
+            call("/etc/mysql/mysql.conf.d/z-custom-init-file.cnf"),
+        ])
+
+    @patch("ops.model.Container")
+    @patch("mysql_k8s_helpers.MySQL.wait_until_mysql_connection")
+    def test_reset_root_password_and_start_mysqld_error(
+        self, _wait_until_mysql_connection, _container
+    ):
+        """Test exceptions in reset_root_password_and_start_mysqld()."""
+        self.mysql.container = _container
+        _container.push.side_effect = [
+            None,
+            PathError("not-found", "Should be a pebble exception"),
+        ]
+
+        with self.assertRaises(PathError):
+            self.mysql.reset_root_password_and_start_mysqld()
+
+        self.mysql.container.push.assert_has_calls([
+            call(
+                "/alter-root-user.sql",
+                "ALTER USER 'root'@'localhost' IDENTIFIED BY 'password';\nFLUSH PRIVILEGES;",
+                encoding="utf-8",
+                permissions=384,
+                user="mysql",
+                group="mysql",
+            ),
+        ])
+        self.mysql.container.remove_path.assert_called_once_with("/alter-root-user.sql")
+        _wait_until_mysql_connection.assert_not_called()
+
+        _container.push.side_effect = [None, None]
+        _container.push.reset_mock()
+        _container.remove_path.reset_mock()
+
+        _wait_until_mysql_connection.side_effect = [
+            MySQLServiceNotRunningError("mysqld not running")
+        ]
+
+        with self.assertRaises(MySQLServiceNotRunningError):
+            self.mysql.reset_root_password_and_start_mysqld()
+
+        self.mysql.container.push.assert_has_calls([
+            call(
+                "/alter-root-user.sql",
+                "ALTER USER 'root'@'localhost' IDENTIFIED BY 'password';\nFLUSH PRIVILEGES;",
+                encoding="utf-8",
+                permissions=384,
+                user="mysql",
+                group="mysql",
+            ),
+            call(
+                "/etc/mysql/mysql.conf.d/z-custom-init-file.cnf",
+                "[mysqld]\ninit_file = /alter-root-user.sql",
+                encoding="utf-8",
+                permissions=384,
+                user="mysql",
+                group="mysql",
+            ),
+        ])
+        self.mysql.container.restart.assert_called_once_with("mysqld")
+        self.mysql.container.remove_path.assert_has_calls([
+            call("/alter-root-user.sql"),
+            call("/etc/mysql/mysql.conf.d/z-custom-init-file.cnf"),
+        ])
diff --git a/tests/unit/test_upgrade.py b/tests/unit/test_upgrade.py
index 1213bfa78..54598cb88 100644
--- a/tests/unit/test_upgrade.py
+++ b/tests/unit/test_upgrade.py
@@ -9,6 +9,7 @@
 from charms.mysql.v0.mysql import MySQLSetClusterPrimaryError, MySQLSetVariableError
 from ops.model import BlockedStatus
 from ops.testing import Harness
+from tenacity import RetryError
 
 import k8s_helpers
 from charm import MySQLOperatorCharm
@@ -150,19 +151,17 @@ def test_pre_upgrade_prepare(
         mock_set_rolling_update_partition.assert_called_once()
         assert mock_set_dynamic_variable.call_count == 2
 
+    @patch("charm.MySQLOperatorCharm.recover_unit_after_restart")
     @patch("mysql_k8s_helpers.MySQL.install_plugins")
     @patch("mysql_k8s_helpers.MySQL.cluster_metadata_exists", return_value=True)
     @patch("mysql_k8s_helpers.MySQL.setup_logrotate_config")
     @patch("charm.MySQLOperatorCharm._reconcile_pebble_layer")
     @patch("charm.MySQLOperatorCharm._write_mysqld_configuration")
-    @patch("upgrade.RECOVER_ATTEMPTS", 1)
     @patch("mysql_k8s_helpers.MySQL.hold_if_recovering")
     @patch("mysql_k8s_helpers.MySQL.get_mysql_version", return_value="8.0.33")
     @patch("mysql_k8s_helpers.MySQL.verify_server_upgradable")
-    @patch("mysql_k8s_helpers.MySQL.is_instance_in_cluster", return_value=True)
     def test_pebble_ready(
         self,
-        mock_is_instance_in_cluster,
         mock_is_server_upgradable,
         mock_get_mysql_version,
         mock_hold_if_recovering,
@@ -171,6 +170,7 @@ def test_pebble_ready(
         mock_setup_logrotate_config,
         mock_cluster_metadata_exists,
         mock_install_plugins,
+        mock_recover_unit_after_restart,
     ):
         """Test the pebble ready."""
         self.charm.on.config_changed.emit()
@@ -191,13 +191,12 @@ def test_pebble_ready(
             self.harness.get_relation_data(self.upgrade_relation_id, "mysql-k8s/1")["state"],
             "idle",  # change to `completed` - behavior not yet set in the lib
         )
-        mock_is_instance_in_cluster.assert_called_once()
 
         self.harness.update_relation_data(
             self.upgrade_relation_id, "mysql-k8s/0", {"state": "upgrading"}
         )
         # setup for exception
-        mock_is_instance_in_cluster.return_value = False
+        mock_recover_unit_after_restart.side_effect = RetryError("dummy")
 
         with patch(
             "charm.MySQLOperatorCharm.unit_initialized",
diff --git a/tox.ini b/tox.ini
index bfc07aa72..c722e0a51 100644
--- a/tox.ini
+++ b/tox.ini
@@ -6,42 +6,24 @@ no_package = True
 env_list = lint, unit
 
 [vars]
-src_path = {tox_root}/src
-tests_path = {tox_root}/tests
-scripts_path = {tox_root}/scripts
+src_path = "{tox_root}/src"
+tests_path = "{tox_root}/tests"
+scripts_path = "{tox_root}/scripts"
 all_path = {[vars]src_path} {[vars]tests_path} {[vars]scripts_path}
 
 [testenv]
 set_env =
-    PYTHONPATH = {[vars]src_path}:{tox_root}/lib
+    PYTHONPATH = {tox_root}/src:{tox_root}/lib
     PY_COLORS = 1
 allowlist_externals =
     poetry
 
-[testenv:build-{production,dev,wrapper}]
-# Wrap `charmcraft pack`
-pass_env =
-    CI
-    GH_TOKEN
-allowlist_externals =
-    {[testenv]allowlist_externals}
-    charmcraft
-    charmcraftcache
-    mv
-commands_pre =
-    poetry export --only main,charm-libs --output requirements.txt
-commands =
-    build-production: charmcraft pack {posargs}
-    build-dev: charmcraftcache pack {posargs}
-commands_post =
-    mv requirements.txt requirements-last-build.txt
-
 [testenv:format]
 description = Apply coding style standards to code
 commands_pre =
     poetry install --only format
 commands =
-    poetry lock --no-update
+    poetry lock
     poetry run ruff check --fix {[vars]all_path}
     poetry run ruff format {[vars]all_path}
 
@@ -70,20 +52,13 @@ commands =
 
 [testenv:integration]
 description = Run integration tests
-set_env =
-    {[testenv]set_env}
-    # Workaround for https://github.com/python-poetry/poetry/issues/6958
-    POETRY_INSTALLER_PARALLEL = false
 pass_env =
     CI
-    GITHUB_OUTPUT
-    SECRETS_FROM_GITHUB
-allowlist_externals =
-    {[testenv:build-wrapper]allowlist_externals}
+    AWS_ACCESS_KEY
+    AWS_SECRET_KEY
+    GCP_ACCESS_KEY
+    GCP_SECRET_KEY
 commands_pre =
     poetry install --only integration
-    {[testenv:build-wrapper]commands_pre}
 commands =
     poetry run pytest -v --tb native --log-cli-level=INFO -s --ignore={[vars]tests_path}/unit/ {posargs}
-commands_post =
-    {[testenv:build-wrapper]commands_post}