diff --git a/.ci/pipeline.yaml b/.ci/pipeline.yaml index 44ac3e52e0..ae3bbedf93 100644 --- a/.ci/pipeline.yaml +++ b/.ci/pipeline.yaml @@ -34,7 +34,7 @@ spec: - name: CONTEXT value: ./tools - name: IMAGE - value: registry.khuedoan.com/tools:latest + value: registry.jupiter.mein.nl/tools:latest - name: EXTRA_ARGS value: - --cache=true @@ -49,7 +49,7 @@ spec: workspaces: - name: source stepTemplate: - image: registry.khuedoan.com/tools:latest + image: registry.jupiter.mein.nl/tools:latest workingDir: /workspace/source steps: - name: run @@ -74,7 +74,7 @@ spec: secret: secretName: terraform-secrets stepTemplate: - image: registry.khuedoan.com/tools:latest + image: registry.jupiter.mein.nl/tools:latest workingDir: /workspace/source/external volumeMounts: - name: terraform-secrets diff --git a/.ci/tasks/external.yaml b/.ci/tasks/external.yaml new file mode 100644 index 0000000000..20a702973e --- /dev/null +++ b/.ci/tasks/external.yaml @@ -0,0 +1,36 @@ +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: terraform-external +spec: + workspaces: + - name: source + stepTemplate: + image: hashicorp/terraform:1.3.3 + workingDir: /workspace/source/external + volumeMounts: + - name: terraform-secrets + mountPath: /root/.terraform.d/credentials.tfrc.json + subPath: credentials.tfrc.json + - name: terraform-secrets + mountPath: /workspace/source/external/terraform.tfvars + subPath: terraform.tfvars + command: + - terraform + volumes: + - name: terraform-secrets + secret: + secretName: terraform-secrets + steps: + - name: init + args: + - init + - name: plan + args: + - plan + - -out=tfplan + - name: apply + args: + - apply + - -auto-approve + - tfplan diff --git a/.ci/tasks/lint.yaml b/.ci/tasks/lint.yaml new file mode 100644 index 0000000000..0c863ce91d --- /dev/null +++ b/.ci/tasks/lint.yaml @@ -0,0 +1,30 @@ +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: lint +spec: + workspaces: + - name: source + steps: + - name: ansible + image: cytopia/ansible-lint + workingDir: /workspace/source/metal + command: + - ansible-lint + args: + - -v + - name: yaml + image: cytopia/yamllint + command: + - yamllint + args: + - . + - name: terraform + image: hashicorp/terraform:1.3.3 + command: + - terraform + args: + - fmt + - -recursive + - -check + - -diff diff --git a/.gitignore b/.gitignore index 636d35689d..3f94ce3f3a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,7 @@ .venv/ +.ssh/ book/ +.idea *.iso *.log @@ -7,3 +9,7 @@ book/ *.tgz *kubeconfig.yaml Chart.lock +.DS_Store +go.sum + +config.js diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000000..21cfc46d92 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,22 @@ +{ + "workbench.colorCustomizations": { + "activityBar.activeBackground": "#65c89b", + "activityBar.activeBorder": "#945bc4", + "activityBar.background": "#65c89b", + "activityBar.foreground": "#15202b", + "activityBar.inactiveForeground": "#15202b99", + "activityBarBadge.background": "#945bc4", + "activityBarBadge.foreground": "#e7e7e7", + "sash.hoverBorder": "#65c89b", + "statusBar.background": "#42b883", + "statusBar.foreground": "#15202b", + "statusBarItem.hoverBackground": "#359268", + "statusBarItem.remoteBackground": "#42b883", + "statusBarItem.remoteForeground": "#15202b", + "titleBar.activeBackground": "#42b883", + "titleBar.activeForeground": "#15202b", + "titleBar.inactiveBackground": "#42b88399", + "titleBar.inactiveForeground": "#15202b99" + }, + "peacock.color": "#42b883" +} \ No newline at end of file diff --git a/README.md b/README.md index 56feb41e54..142561d6f5 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,9 @@ -# Khue's Homelab +# Jupiter Mein Homelab - Fork from Khuedoan -**[Features](#features) • [Get Started](#get-started) • [Documentation](https://homelab.khuedoan.com)** +**[Features](#features) • [Get Started](#get-started) • [Documentation](https://homelab.jupiter.mein.nl)** [![tag](https://img.shields.io/github/v/tag/khuedoan/homelab?style=flat-square&logo=semver&logoColor=white)](https://github.com/khuedoan/homelab/tags) -[![document](https://img.shields.io/website?label=document&logo=gitbook&logoColor=white&style=flat-square&url=https%3A%2F%2Fhomelab.khuedoan.com)](https://homelab.khuedoan.com) +[![document](https://img.shields.io/website?label=document&logo=gitbook&logoColor=white&style=flat-square&url=https%3A%2F%2Fhomelab.jupiter.mein.nl)](https://homelab.jupiter.mein.nl) [![license](https://img.shields.io/github/license/khuedoan/homelab?style=flat-square&logo=gnu&logoColor=white)](https://www.gnu.org/licenses/gpl-3.0.html) [![stars](https://img.shields.io/github/stars/khuedoan/homelab?logo=github&logoColor=white&color=gold&style=flat-square)](https://github.com/khuedoan/homelab) @@ -217,25 +217,25 @@ They can't capture all the project's features, but they are sufficient to get a ## Get Started -- [Try it out locally](https://homelab.khuedoan.com/installation/sandbox) without any hardware (just 4 commands!) -- [Deploy on real hardware](https://homelab.khuedoan.com/installation/production/prerequisites) for production workload +- [Try it out locally](https://homelab.jupiter.mein.nl/installation/sandbox) without any hardware (just 4 commands!) +- [Deploy on real hardware](https://homelab.jupiter.mein.nl/installation/production/prerequisites) for production workload ## Roadmap -See [roadmap](https://homelab.khuedoan.com/reference/roadmap) and [open issues](https://github.com/khuedoan/homelab/issues) for a list of proposed features and known issues. +See [roadmap](https://homelab.jupiter.mein.nl/reference/roadmap) and [open issues](https://github.com/khuedoan/homelab/issues) for a list of proposed features and known issues. ## Contributing Any contributions you make are greatly appreciated. -Please see [contributing guide](https://homelab.khuedoan.com/reference/contributing) for more information. +Please see [contributing guide](https://homelab.jupiter.mein.nl/reference/contributing) for more information. ## License Copyright © 2020 - 2022 Khue Doan Distributed under the GPLv3 License. -See [license page](https://homelab.khuedoan.com/reference/license) or `LICENSE.md` file for more information. +See [license page](https://homelab.jupiter.mein.nl/reference/license) or `LICENSE.md` file for more information. ## Acknowledgements @@ -274,3 +274,4 @@ If you feel you're missing from this list, feel free to add yourself in a PR. ## Stargazers over time [![Stargazers over time](https://starchart.cc/khuedoan/homelab.svg)](https://starchart.cc/khuedoan/homelab) +>>>>>>> 686fea57645ab65df8b71c54814dbc01827d48a6 diff --git a/apps/hajimari/Chart.yaml b/apps/hajimari/Chart.yaml index be25f924f7..dfae8c78b2 100644 --- a/apps/hajimari/Chart.yaml +++ b/apps/hajimari/Chart.yaml @@ -3,5 +3,5 @@ name: hajimari version: 0.0.0 dependencies: - name: hajimari - version: 1.2.0 + version: 2.0.2 repository: https://hajimari.io diff --git a/apps/hajimari/values.yaml b/apps/hajimari/values.yaml index 1814eb1536..000f32b201 100644 --- a/apps/hajimari/values.yaml +++ b/apps/hajimari/values.yaml @@ -2,7 +2,7 @@ hajimari: image: # TODO remove this https://github.com/toboshii/hajimari/blob/main/charts/hajimari/values.yaml tag: v0.2.0 env: - TZ: Asia/Ho_Chi_Minh + TZ: Europe/Amsterdam ingress: main: enabled: true @@ -11,7 +11,7 @@ hajimari: cert-manager.io/cluster-issuer: letsencrypt-prod hajimari.io/enable: 'false' hosts: - - host: &host home.khuedoan.com + - host: &host home.jupiter.mein.nl paths: - path: / pathType: Prefix @@ -27,9 +27,9 @@ hajimari: - name: Homelab links: - name: Documentation - url: https://homelab.khuedoan.com + url: https://homelab.jupiter.mein.nl - name: Public GitHub repository - url: https://github.com/khuedoan/homelab + url: https://github.com/infratron/homelab - name: Managed services links: - name: Terraform Cloud diff --git a/apps/jellyfin/Chart.yaml b/apps/jellyfin/Chart.yaml index 1969e4e212..43431db22a 100644 --- a/apps/jellyfin/Chart.yaml +++ b/apps/jellyfin/Chart.yaml @@ -3,5 +3,5 @@ name: jellyfin version: 0.0.0 dependencies: - name: jellyfin - version: 9.3.0 + version: 9.5.3 repository: https://k8s-at-home.com/charts/ diff --git a/apps/jellyfin/values.yaml b/apps/jellyfin/values.yaml index 8ff8cb23a5..538ecb0591 100644 --- a/apps/jellyfin/values.yaml +++ b/apps/jellyfin/values.yaml @@ -8,7 +8,7 @@ jellyfin: hajimari.io/appName: Jellyfin hajimari.io/icon: filmstrip hosts: - - host: &host jellyfin.khuedoan.com + - host: &host jellyfin.jupiter.mein.nl paths: - path: / pathType: Prefix diff --git a/apps/matrix/values.yaml b/apps/matrix/values.yaml index d996c7cad4..253100b95d 100644 --- a/apps/matrix/values.yaml +++ b/apps/matrix/values.yaml @@ -4,12 +4,12 @@ elementweb: className: nginx annotations: cert-manager.io/cluster-issuer: letsencrypt-prod - external-dns.alpha.kubernetes.io/target: "homelab-tunnel.khuedoan.com" + external-dns.alpha.kubernetes.io/target: "jupiter-tunnel.mein.nl" external-dns.alpha.kubernetes.io/cloudflare-proxied: "true" hajimari.io/appName: Chat hajimari.io/icon: chat hosts: - - host: &frontend_host chat.khuedoan.com + - host: &frontend_host chat.mein.nl paths: - path: / pathType: Prefix @@ -19,8 +19,8 @@ elementweb: - *frontend_host config: default: - base_url: https://matrix.khuedoan.com - server_name: khuedoan.com + base_url: https://matrix.mein.nl + server_name: mein.nl dendrite: polylith: false @@ -29,7 +29,7 @@ dendrite: pullPolicy: IfNotPresent configuration: version: 2 - servername: &backend_host matrix.khuedoan.com + servername: &backend_host matrix.mein.nl database: host: matrix-postgresql:5432 user: dendrite @@ -72,7 +72,7 @@ dendrite: className: nginx annotations: cert-manager.io/cluster-issuer: letsencrypt-prod - external-dns.alpha.kubernetes.io/target: "homelab-tunnel.khuedoan.com" + external-dns.alpha.kubernetes.io/target: "jupiter-tunnel.mein.nl" external-dns.alpha.kubernetes.io/cloudflare-proxied: "true" nginx.ingress.kubernetes.io/proxy-body-size: 32m hajimari.io/enable: 'false' diff --git a/apps/paperless/Chart.yaml b/apps/paperless/Chart.yaml index 01b500dfc9..8a95ce326a 100644 --- a/apps/paperless/Chart.yaml +++ b/apps/paperless/Chart.yaml @@ -3,5 +3,8 @@ name: paperless version: 0.0.0 dependencies: - name: paperless - version: 8.5.0 + version: 8.8.3 repository: https://k8s-at-home.com/charts/ + - name: crushftp + version: 1.0.3 + repository: https://greggbjensen.github.io/helm-crushftp diff --git a/apps/paperless/values.yaml b/apps/paperless/values.yaml index dff533b1d8..bee0d4282a 100644 --- a/apps/paperless/values.yaml +++ b/apps/paperless/values.yaml @@ -1,4 +1,63 @@ paperless: + + env: + # -- Project name + COMPOSE_PROJECT_NAME: paperless + # -- Redis to use + PAPERLESS_REDIS: redis://redis-master.redis:6379 + # -- OCR languages to install + PAPERLESS_OCR_LANGUAGE: eng + PAPERLESS_TIME_ZONE: "Europe/Amsterdam" + # USERMAP_UID: 1000 + # USERMAP_GID: 1000 + # PAPERLESS_TIME_ZONE: Europe/London + # -- Database host to use + PAPERLESS_DBHOST: postgresql.postgresql + # -- Port to use + PAPERLESS_PORT: 8000 + + envFrom: + - secretRef: + name: paperless-secret + + persistence: + # -- Configure persistence for data. + # @default -- See values.yaml + data: + enabled: true + mountPath: /usr/src/paperless/data + accessMode: ReadWriteOnce + storageClass: "longhorn" + size: 1Gi + + # -- Configure persistence for media. + # @default -- See values.yaml + media: + enabled: true + mountPath: /usr/src/paperless/media + accessMode: ReadWriteOnce + storageClass: "longhorn" + size: 1Gi + + # -- Configure volume to monitor for new documents. + # @default -- See values.yaml + consume: + enabled: true + mountPath: /usr/src/paperless/consume + accessMode: ReadWriteMany + storageClass: "longhorn" + size: 1Gi + + # -- Configure export volume. + # @default -- See values.yaml + + export: + enabled: true + mountPath: /usr/src/paperless/export + accessMode: ReadWriteOnce + storageClass: "longhorn" + size: 1Gi + ingress: main: enabled: true @@ -8,7 +67,7 @@ paperless: hajimari.io/appName: Paperless hajimari.io/icon: file-document hosts: - - host: &host paperless.khuedoan.com + - host: &host paperless.jupiter.mein.nl paths: - path: / pathType: Prefix @@ -17,4 +76,118 @@ paperless: hosts: - *host postgresql: - enabled: true + enabled: false + +crushftp: + + # Default values for crushftp. + + # Project specific + admin: + username: crushadmin + password: 'password' + protocol: http + port: 8080 + + tls: + secretName: crushftp-tls + volumes: + - name: paperless-consume + claimName: paperless-consume + mountPath: /mnt/FTP/Shared/scan + + configVolume: + size: 1Gi + loadBalancerIp: 127.0.0.1 + + # Shared + shared: + hosts: + crushFtp: + root: scan.ftp.jupiter.mein.nl + prefix: ftp + ingress: + clusterIssuer: 'letsencrypt-prod' + storageClassName: default + + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + + replicaCount: 1 + + image: + repository: greggbjensen/crushftp + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + + imagePullSecrets: [] + nameOverride: "" + fullnameOverride: "" + + serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + + podAnnotations: {} + + podSecurityContext: {} + # fsGroup: 2000 + + securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + service: + type: ClusterIP + port: 80 + + ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: ftp.jupiter.mein.nl + paths: + - path: / + pathType: ImplementationSpecific + tls: + - secretName: crushftp-tls + hosts: + - ftp.jupiter.mein.nl + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 1 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + + nodeSelector: {} + + tolerations: [] + + affinity: {} diff --git a/apps/privatebin/Chart.yaml b/apps/privatebin/Chart.yaml new file mode 100644 index 0000000000..be2e2818e7 --- /dev/null +++ b/apps/privatebin/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: privatebin +version: 0.0.0 +dependencies: + - name: privatebin + version: 2.1.2 + repository: https://k8s-at-home.com/charts/ diff --git a/apps/privatebin/values.yaml b/apps/privatebin/values.yaml new file mode 100644 index 0000000000..5d0db0446c --- /dev/null +++ b/apps/privatebin/values.yaml @@ -0,0 +1,115 @@ +privatebin: + # IMPORTANT NOTE + # + # This chart inherits from our common library chart. You can check the default values/options here: + # https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml + # + + image: + # -- image repository + repository: privatebin/pdo + # -- image tag + tag: 1.4.0 + # -- image pull policy + pullPolicy: IfNotPresent + + # -- environment variables. See [image docs](https://docs.kanboard.org/en/latest/admin_guide/docker.html#environment-variables) + # and [application docs](# https://docs.kanboard.org/en/latest/admin_guide/config_file.html) for more details. + # @default -- See below (only deviations from the default settings are specified) + env: + # -- container timezone + TZ: Europe/Amsterdam + # -- PHP timezone (usually should match the containers TZ) + PHP_TZ: Europe/Amsterdam + + # -- Configures service settings for the chart. + # @default -- See values.yaml + service: + main: + ports: + http: + port: 8080 + + ingress: + # -- Enable and configure ingress settings for the chart under this key. + # @default -- See values.yaml + ingress: + main: + enabled: true + ingressClassName: nginx + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + hajimari.io/appName: Privatebin + hajimari.io/icon: bin + hosts: + - host: &host privatebin.jupiter.mein.nl + paths: + - path: / + pathType: Prefix + tls: + - secretName: privatebin-tls-certificate + hosts: + - *host + + # -- Configure persistence settings for the chart under this key. + # @default -- See values.yaml + persistence: + data: + enabled: false + mountPath: /srv/data + + # -- Enable and configure postgresql database subchart under this key. + # For more options see [postgresql chart documentation](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) + # @default -- See values.yaml + postgresql: + enabled: false + postgresqlUsername: privatebin + postgresqlPassword: privatebin + postgresqlDatabase: privatebin + persistence: + enabled: false + # storageClass: "" + + # -- Application Settings. See https://github.com/PrivateBin/PrivateBin/blob/master/cfg/conf.sample.php + # for a description of every setting + # @default -- See values.yaml + config: + main: + name: "PrivateBin" + # discussion: true + # opendiscussion: false + # password: true + # fileupload: false + # burnafterreadingselected: false + # defaultformatter: "plaintext" + # syntaxhighlightingtheme: "sons-of-obsidian" + # sizelimit: 10485760 + # template: "bootstrap" + # info: "More information on the project page." + # notice: "Note: This is a test service: Data may be deleted anytime. Kittens will die if you abuse this service." + # languageselection: false + # languagedefault: "en" + # urlshortener: "https://shortener.example.com/api?link=" + # qrcode: false + # icon: "none" + # zerobincompatibility: false + # httpwarning: true + # compression: "zlib" + expire: + default: "1week" + expire_options: + 5min: 300 + 10min: 600 + 1hour: 3600 + 1day: 86400 + 1week: 604800 + 1month: 2592000 + 1year: 31536000 + never: 0 + formatter_options: + plaintext: "Plain Text" + syntaxhighlighting: "Source Code" + markdown: "Markdown" + traffic: + limit: 10 + # exemptedIp: "1.2.3.4,10.10.10/24" diff --git a/apps/seafile/Chart.yaml b/apps/seafile/Chart.yaml index 91e0881d64..5b0c569e4e 100644 --- a/apps/seafile/Chart.yaml +++ b/apps/seafile/Chart.yaml @@ -3,5 +3,5 @@ name: seafile version: 0.0.0 dependencies: - name: seafile - version: 2.0.1 + version: 2.2.2 repository: https://k8s-at-home.com/charts diff --git a/apps/seafile/values.yaml b/apps/seafile/values.yaml index a3a7727ce9..41b3da15a5 100644 --- a/apps/seafile/values.yaml +++ b/apps/seafile/values.yaml @@ -1,20 +1,75 @@ seafile: - ingress: + + image: + # -- image repository + repository: seafileltd/seafile-mc + # -- image tag + tag: 9.0.9 + # -- image pull policy + pullPolicy: IfNotPresent + + # -- environment variables. See more environment variables in the [seafile documentation](https://manual.seafile.com/). + # @default -- See below + env: + # -- Set the container timezone + TIME_ZONE: Europe/Amsterdam + # -- The hostname of your database + DB_HOST: mariadb-galera.mariadb-galera.svc.cluster.local + # -- The root password for mysql (used for initial setup) + DB_ROOT_PASSWD: XL3ePsvRu2% + # -- The initial admin user's email + SEAFILE_ADMIN_EMAIL: peter@mein.nl + # -- The initial admin user's password + SEAFILE_ADMIN_PASSWORD: seafileadminpass + # -- The hostname for the server (set to your ingress hostname) + SEAFILE_SERVER_HOSTNAME: seafile.jupiter.mein.nl + SEAFILE_SERVER_LETSENCRYPT: true + + # -- Configures service settings for the chart. + # @default -- See values.yaml + service: main: - enabled: true - ingressClassName: nginx - annotations: - cert-manager.io/cluster-issuer: letsencrypt-prod - hajimari.io/appName: Seafile - hajimari.io/icon: google-drive - hosts: - - host: &host seafile.khuedoan.com - paths: - - path: / - pathType: Prefix - tls: - - secretName: seafile-tls-certificate - hosts: - - *host + ports: + http: + port: 80 + + ingress: + # -- Enable and configure ingress settings for the chart under this key. + # @default -- See values.yaml + ingress: + main: + enabled: true + ingressClassName: nginx + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + hosts: + - host: &host seafile.jupiter.mein.nl + paths: + - path: / + pathType: Prefix + tls: + - secretName: seafile-tls-certificate + hosts: + - *host + + # -- Enable and configure mariadb database subchart under this key. + # For more options see [mariadb chart documentation](https://github.com/bitnami/charts/tree/master/bitnami/mariadb) + # @default -- See values.yaml mariadb: - enabled: true + enabled: false + architecture: standalone + auth: + database: seafile + username: seafile + password: seafilepass + rootPassword: seafilerootpass + primary: + persistence: + enabled: false + # storageClass: "" + + # -- Configure persistence settings for the chart under this key. + # @default -- See values.yaml + persistence: {} + # shared: + # enabled: false diff --git a/bin/cloudflared/Chart.yaml b/bin/cloudflared/Chart.yaml new file mode 100644 index 0000000000..f6bae20473 --- /dev/null +++ b/bin/cloudflared/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: cloudflared +version: 0.0.0 +dependencies: + - name: cloudflared + version: 0.4.3 + repository: https://khuedoan.github.io/charts diff --git a/bin/cloudflared/values.yaml b/bin/cloudflared/values.yaml new file mode 100644 index 0000000000..f5f469968d --- /dev/null +++ b/bin/cloudflared/values.yaml @@ -0,0 +1,18 @@ +cloudflared: + credentials: + existingSecret: cloudflared-credentials + config: + tunnel: homelab + ingress: + # It is safe to put a wildcard here + # Please see https://homelab.jupiter.mein.nl/reference/faq.html#is-it-safe-to-use-wildcard-in-cloudflare-tunnel-ingress-config + - hostname: '*.jupiter.mein.nl' + service: https://ingress-nginx-controller.ingress-nginx + originRequest: + noTLSVerify: true + - service: http_status:404 + + podMonitor: + enabled: true + metricsEndpoints: + - port: http diff --git a/bootstrap/argocd/Chart.yaml b/bootstrap/argocd/Chart.yaml index 29979af054..b05eeb3914 100644 --- a/bootstrap/argocd/Chart.yaml +++ b/bootstrap/argocd/Chart.yaml @@ -3,5 +3,5 @@ name: argocd version: 0.0.0 dependencies: - name: argo-cd - version: 4.5.8 + version: 5.8.5 repository: https://argoproj.github.io/argo-helm diff --git a/bootstrap/argocd/values.yaml b/bootstrap/argocd/values.yaml index 632c13a8e6..c1086810f5 100644 --- a/bootstrap/argocd/values.yaml +++ b/bootstrap/argocd/values.yaml @@ -2,8 +2,11 @@ argo-cd: server: extraArgs: - --insecure + - --repo-server-timeout-seconds + - "500" config: - statusbadge.enabled: 'true' + statusbadge.enabled: "true" + resource.customizations.health.argoproj.io_Application: | hs = {} hs.status = "Progressing" @@ -25,7 +28,7 @@ argo-cd: hajimari.io/appName: ArgoCD hajimari.io/icon: robot hosts: - - &host argocd.khuedoan.com + - &host argocd.jupiter.mein.nl tls: - secretName: argocd-tls-certificate hosts: @@ -37,8 +40,13 @@ argo-cd: dex: enabled: false controller: + args: + repoServerTimeoutSeconds: "180" metrics: *metrics repoServer: + env: + - name: "ARGOCD_EXEC_TIMEOUT" + value: "5m" metrics: *metrics redis: metrics: *metrics diff --git a/bootstrap/root/values-seed.yaml b/bootstrap/root/values-seed.yaml index ae729830c4..60766a7846 100644 --- a/bootstrap/root/values-seed.yaml +++ b/bootstrap/root/values-seed.yaml @@ -1,2 +1,2 @@ gitops: - repo: https://github.com/khuedoan/homelab + repo: https://github.com/petermein/homelab diff --git a/bootstrap/root/values.yaml b/bootstrap/root/values.yaml index 37e96eec1e..a6b5b7cf0f 100644 --- a/bootstrap/root/values.yaml +++ b/bootstrap/root/values.yaml @@ -7,3 +7,4 @@ stacks: - name: system - name: platform - name: apps + - name: home \ No newline at end of file diff --git a/disable/external-dns/Chart.yaml b/disable/external-dns/Chart.yaml new file mode 100644 index 0000000000..af442b2831 --- /dev/null +++ b/disable/external-dns/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: external-dns +version: 0.0.0 +dependencies: + - name: external-dns + version: 1.7.1 + repository: https://kubernetes-sigs.github.io/external-dns/ diff --git a/disable/external-dns/values.yaml b/disable/external-dns/values.yaml new file mode 100644 index 0000000000..21b1ccf773 --- /dev/null +++ b/disable/external-dns/values.yaml @@ -0,0 +1,15 @@ +external-dns: + provider: cloudflare + env: + - name: CF_API_TOKEN + valueFrom: + secretKeyRef: + name: cloudflare-api-token + key: value + extraArgs: + - --annotation-filter=external-dns.alpha.kubernetes.io/exclude notin (true) + + metrics: + enabled: true + serviceMonitor: + enabled: true diff --git a/disable/kured/Chart.yaml b/disable/kured/Chart.yaml new file mode 100644 index 0000000000..400314fcf1 --- /dev/null +++ b/disable/kured/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: kured +version: 0.0.0 +dependencies: +- name: kured + version: 4.0.3 + repository: https://kubereboot.github.io/charts diff --git a/system/kured/values.yaml b/disable/kured/values.yaml similarity index 78% rename from system/kured/values.yaml rename to disable/kured/values.yaml index 27797e2e06..7f6d51f644 100644 --- a/system/kured/values.yaml +++ b/disable/kured/values.yaml @@ -2,4 +2,4 @@ kured: configuration: annotateNodes: true rebootSentinelCommand: sh -c "! needs-restarting --reboothint" - timeZone: Asia/Ho_Chi_Minh + timeZone: Europe/Amsterdam diff --git a/docs/diagrams/provisioning_flow.jpg b/docs/diagrams/provisioning_flow.jpg new file mode 100644 index 0000000000..40fc4c858e Binary files /dev/null and b/docs/diagrams/provisioning_flow.jpg differ diff --git a/docs/how-to-guides/updating-documentation.md b/docs/how-to-guides/updating-documentation.md index e214567511..adaafdf7be 100644 --- a/docs/how-to-guides/updating-documentation.md +++ b/docs/how-to-guides/updating-documentation.md @@ -1,7 +1,7 @@ # Updating documentation (this website) This project uses the [Diátaxis](https://diataxis.fr) technical documentation framework. -The website is generated using [Material for MkDocs](https://squidfunk.github.io/mkdocs-material) and can be viewed at [homelab.khuedoan.com](https://homelab.khuedoan.com). +The website is generated using [Material for MkDocs](https://squidfunk.github.io/mkdocs-material) and can be viewed at [homelab.jupiter.mein.nl](https://homelab.jupiter.mein.nl). There are 4 main parts: @@ -34,7 +34,7 @@ It's running on my other cluster in the [khuedoan/horus](https://github.com/khue - + diff --git a/docs/how-to-guides/use-both-github-and-gitea.md b/docs/how-to-guides/use-both-github-and-gitea.md index d4f6577267..b173039b08 100644 --- a/docs/how-to-guides/use-both-github-and-gitea.md +++ b/docs/how-to-guides/use-both-github-and-gitea.md @@ -5,7 +5,7 @@ Even though we self-host Gitea, you may still want to use GitHub as a backup and Add both push URLs (replace my repositories with yours): ```sh -git remote set-url --add --push origin git@git.khuedoan.com:ops/homelab +git remote set-url --add --push origin git@git.jupiter.mein.nl:ops/homelab git remote set-url --add --push origin git@github.com:khuedoan/homelab ``` diff --git a/docs/installation/production/configuration.md b/docs/installation/production/configuration.md index 5df750f335..cb405fea0e 100644 --- a/docs/installation/production/configuration.md +++ b/docs/installation/production/configuration.md @@ -31,7 +31,7 @@ make configure ``` Text editor (nvim): Enter seed repo (github.com/khuedoan/homelab): github.com/example/homelab - Enter your domain (khuedoan.com): example.com + Enter your domain (jupiter.mein.nl): example.com ``` It will prompt you to edit the inventory: diff --git a/docs/installation/production/external-resources.md b/docs/installation/production/external-resources.md index 6704e82aa6..0534ea93f1 100644 --- a/docs/installation/production/external-resources.md +++ b/docs/installation/production/external-resources.md @@ -43,7 +43,7 @@ If you decide to use a [different Terraform backend](https://www.terraform.io/la - + diff --git a/docs/src/images/provisioning_flow.jpg b/docs/src/images/provisioning_flow.jpg new file mode 100644 index 0000000000..40fc4c858e Binary files /dev/null and b/docs/src/images/provisioning_flow.jpg differ diff --git a/external/modules/cloudflare/main.tf b/external/modules/cloudflare/main.tf index dcdb0bda7e..156eb24ea2 100644 --- a/external/modules/cloudflare/main.tf +++ b/external/modules/cloudflare/main.tf @@ -1,5 +1,5 @@ data "cloudflare_zone" "zone" { - name = "khuedoan.com" + name = "mein.nl" } data "cloudflare_api_token_permission_groups" "all" {} @@ -24,9 +24,9 @@ resource "random_password" "tunnel_secret" { special = false } -resource "cloudflare_argo_tunnel" "homelab" { +resource "cloudflare_argo_tunnel" "jupiter" { account_id = var.cloudflare_account_id - name = "homelab" + name = "jupiter" secret = base64encode(random_password.tunnel_secret.result) } @@ -34,8 +34,8 @@ resource "cloudflare_argo_tunnel" "homelab" { resource "cloudflare_record" "tunnel" { zone_id = data.cloudflare_zone.zone.id type = "CNAME" - name = "homelab-tunnel" - value = "${cloudflare_argo_tunnel.homelab.id}.cfargotunnel.com" + name = "jupiter-tunnel" + value = "${cloudflare_argo_tunnel.jupiter.id}.cfargotunnel.com" proxied = false ttl = 1 # Auto } @@ -49,15 +49,15 @@ resource "kubernetes_secret" "cloudflared_credentials" { data = { "credentials.json" = jsonencode({ AccountTag = var.cloudflare_account_id - TunnelName = cloudflare_argo_tunnel.homelab.name - TunnelID = cloudflare_argo_tunnel.homelab.id + TunnelName = cloudflare_argo_tunnel.jupiter.name + TunnelID = cloudflare_argo_tunnel.jupiter.id TunnelSecret = base64encode(random_password.tunnel_secret.result) }) } } resource "cloudflare_api_token" "external_dns" { - name = "homelab_external_dns" + name = "jupiter_external_dns" policy { permission_groups = [ @@ -88,7 +88,7 @@ resource "kubernetes_secret" "external_dns_token" { } resource "cloudflare_api_token" "cert_manager" { - name = "homelab_cert_manager" + name = "jupiter_cert_manager" policy { permission_groups = [ diff --git a/external/modules/cloudflare/versions.tf b/external/modules/cloudflare/versions.tf index b91692591e..847c09b009 100644 --- a/external/modules/cloudflare/versions.tf +++ b/external/modules/cloudflare/versions.tf @@ -12,7 +12,7 @@ terraform { http = { source = "hashicorp/http" - version = "~> 2.1.0" + version = "~> 3.4.0" } } } diff --git a/external/terraform.tfstate.backup b/external/terraform.tfstate.backup new file mode 100644 index 0000000000..fbe4ce3313 --- /dev/null +++ b/external/terraform.tfstate.backup @@ -0,0 +1,722 @@ +{ + "version": 4, + "terraform_version": "1.3.1", + "serial": 63, + "lineage": "021adf4e-968c-6935-be21-746921f43f25", + "outputs": {}, + "resources": [ + { + "module": "module.cloudflare", + "mode": "data", + "type": "cloudflare_api_token_permission_groups", + "name": "all", + "provider": "module.cloudflare.provider[\"registry.terraform.io/cloudflare/cloudflare\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "37f58c2e50041f9eed30c0006b111b44", + "permissions": { + "API Gateway Read": "6ced5d0d69b1422396909a62c38ab41b", + "API Gateway Write": "f0235726de25444a84f704b7c93afadf", + "API Tokens Read": "0cc3a61731504c89b99ec1be78b77aa0", + "API Tokens Write": "686d18d5ac6c441c867cbf6771e58a0a", + "Access: Apps and Policies Read": "eb258a38ea634c86a0c89da6b27cb6b6", + "Access: Apps and Policies Revoke": "6c9d1cfcfc6840a987d1b5bfb880a841", + "Access: Apps and Policies Write": "959972745952452f8be2452be8cbb9f2", + "Access: Audit Logs Read": "b05b28e839c54467a7d6cba5d3abb5a3", + "Access: Certificates Read": "4f3196a5c95747b6ad82e34e1d0a694f", + "Access: Certificates Write": "29d3afbfd4054af9accdd1118815ed05", + "Access: Device Posture Read": "0f4841f80adb4bada5a09493300e7f8d", + "Access: Device Posture Write": "2fc1072ee6b743828db668fcb3f9dee7", + "Access: Organizations, Identity Providers, and Groups Read": "26bc23f853634eb4bff59983b9064fde", + "Access: Organizations, Identity Providers, and Groups Revoke": "7121a0c7e9ed46e3829f9cca2bb572aa", + "Access: Organizations, Identity Providers, and Groups Write": "bfe0d8686a584fa680f4c53b5eb0de6d", + "Access: Service Tokens Read": "91f7ce32fa614d73b7e1fc8f0e78582b", + "Access: Service Tokens Write": "a1c0fec57cf94af79479a6d827fa518c", + "Account Analytics Read": "b89a480218d04ceb98b4fe57ca29dc1f", + "Account Custom Pages Read": "c57ea647ef654b47bc8944fa739b570d", + "Account Custom Pages Write": "8a9d35a7c8504208ad5c3e8d58e6162d", + "Account Firewall Access Rules Read": "de7a688cc47d43bd9ea700b467a09c96", + "Account Firewall Access Rules Write": "a416acf9ef5a4af19fb11ed3b96b1fe6", + "Account Rule Lists Read": "4f1071168de8466e9808de86febfc516", + "Account Rule Lists Write": "2edbf20661fd4661b0fe10e9e12f485c", + "Account Rulesets Read": "fb39996ee9044d2a8725921e02744b39", + "Account Rulesets Write": "56907406c3d548ed902070ec4df0e328", + "Account Settings Read": "c1fde68c7bcc44588cbb6ddbc16d6480", + "Account Settings Write": "1af1fa2adc104452b74a9a3364202f20", + "Account WAF Read": "56b2af4817c84ad99187911dc3986c23", + "Account WAF Write": "cde8c82463b6414ca06e46b9633f52a6", + "Analytics Read": "9c88f9c5bce24ce7af9a958ba9c504db", + "Apps Write": "094547ab6e77498c8c4dfa87fadd5c51", + "Argo Tunnel Read": "efea2ab8357b47888938f101ae5e053f", + "Argo Tunnel Write": "c07321b023e944ff818fec44d8203567", + "Billing Read": "7cf72faf220841aabcfdfab81c43c4f6", + "Billing Write": "6c80e02421494afc9ae14414ed442632", + "Bot Management Read": "07bea2220b2343fa9fae15656c0d8e88", + "Bot Management Write": "3b94c49258ec4573b06d51d99b6416c0", + "Cache Purge": "e17beae8b8cb423a99b1730f21238bed", + "Cache Settings Read": "3245da1cf36c45c3847bb9b483c62f97", + "Cache Settings Write": "9ff81cbbe65c400b97d92c3c1033cab6", + "China Network Steering Read": "9ade9cfc8f8949bcb2371be2f0ec8db1", + "China Network Steering Write": "c6f6338ceae545d0b90daaa1fed855e6", + "Config Settings Read": "20e5ea084b2f491c86b8d8d90abff905", + "Config Settings Write": "06f0526e6e464647bd61b63c54935235", + "Custom Errors Read": "a2b55cd504d44ef18b7ba6a7f2b8fbb1", + "Custom Errors Write": "a9dba34cf5814d4ab2007b4ada0045bd", + "Custom Pages Read": "a2431ca73b7d41f99c53303027392586", + "Custom Pages Write": "c244ec076974430a88bda1cdd992d0d9", + "D1 Read": "192192df92ee43ac90f2aeeffce67e35", + "D1 Write": "09b2857d1c31407795e75e3fed8617a1", + "DDoS Protection Read": "af1c363c35ba45b9a8c682ae50eb3f99", + "DDoS Protection Write": "d44ed14bcc4340b194d3824d60edad3f", + "DNS Firewall Read": "5f48a472240a4b489a21d43bd19a06e1", + "DNS Firewall Write": "da6d2d6f2ec8442eaadda60d13f42bca", + "DNS Read": "82e64a83756745bbbb1c9c2701bf816b", + "DNS Write": "4755a26eedb94da69e1066d98aa820be", + "Disable ESC Read": "e199d584e69344eba202452019deafe3", + "Disable ESC Write": "18555e39c5ba40d284dde87eda845a90", + "Dynamic URL Redirects Read": "d8e12db741544d1586ec1d6f5d3c7786", + "Dynamic URL Redirects Write": "74e1036f577a48528b78d2413b40538d", + "Email Routing Addresses Read": "5272e56105d04b5897466995b9bd4643", + "Email Routing Addresses Write": "e4589eb09e63436686cd64252a3aebeb", + "Email Routing Rules Read": "1b600d9d8062443e986a973f097e728a", + "Email Routing Rules Write": "79b3ec0d10ce4148a8f8bdc0cc5f97f2", + "Firewall Services Read": "4ec32dfcb35641c5bb32d5ef1ab963b4", + "Firewall Services Write": "43137f8d07884d3198dc0ee77ca6e79b", + "HTTP Applications Read": "6b60a5a87cae475da7e76e77e4209dd5", + "HTTP Applications Write": "4736c02a9f224c8196ae5b127beae78c", + "HTTP DDoS Managed Ruleset Read": "c49f8d15f9f44885a544d945ef5aa6ae", + "HTTP DDoS Managed Ruleset Write": "b88a3aa889474524bccea5cf18f122bf", + "Health Checks Read": "fac65912d42144aa86b7dd33281bf79e", + "Health Checks Write": "e0dc25a0fbdf4286b1ea100e3256b0e3", + "IP Prefixes: BGP On Demand Read": "e763fae6ee95443b8f56f19213c5f2a5", + "IP Prefixes: BGP On Demand Write": "2ae23e4939d54074b7d252d27ce75a77", + "IP Prefixes: Read": "27beb7f8333b41e2b946f0e23cd8091e", + "IP Prefixes: Write": "92b8234e99f64e05bbbc59e1dc0f76b6", + "Images Read": "0cf6473ad41449e7b7b743d14fc20c60", + "Images Write": "618ec6c64a3a42f8b08bdcb147ded4e4", + "Intel Read": "df1577df30ee46268f9470952d7b0cdf", + "Intel Write": "92209474242d459690e2cdb1985eaa6c", + "L4 DDoS Managed Ruleset Read": "4657621393f94f83b8ef94adba382e48", + "L4 DDoS Managed Ruleset Write": "7a4c3574054a4d0ba7c692893ba8bdd4", + "Load Balancers Read": "e9a975f628014f1d85b723993116f7d5", + "Load Balancers Write": "6d7f2f5f5b1d4a0e9081fdc98d432fd1", + "Load Balancing: Monitors and Pools Read": "9d24387c6e8544e2bc4024a03991339f", + "Load Balancing: Monitors and Pools Write": "d2a1802cc9a34e30852f8b33869b2f3c", + "Logs Read": "6a315a56f18441e59ed03352369ae956", + "Logs Write": "3e0b5820118e47f3922f7c989e673882", + "Magic Firewall Packet Captures - Read PCAPs API": "3a46c728a0a040d5a65cd8e2f3bc6935", + "Magic Firewall Packet Captures - Write PCAPs API": "4ea7d6421801452dbf07cef853a5ef39", + "Magic Firewall Read": "02b71f12bb0748e9af8126494e181342", + "Magic Firewall Write": "8bd1dac84d3d43e7bfb43145f010a15c", + "Magic Network Monitoring Admin": "8e6ed1ef6e864ad0ae477ceffa5aa5eb", + "Magic Network Monitoring Config Read": "3d85e9514f944bb4912c5871d92e5af5", + "Magic Network Monitoring Config Write": "09c77baecb6341a2b1ca2c62b658d290", + "Magic Transit Prefix Read": "967ecf860a244dd1911a0331a0af582a", + "Magic Transit Prefix Write": "0bc09a3cd4b54605990df4e307f138e1", + "Managed headers Read": "319f5059d33a410da0fac4d35a716157", + "Managed headers Write": "0fd9d56bc2da43ad8ea22d610dd8cab1", + "Mass URL Redirects Read": "429a068902904c5a9ed9fc267c67da9a", + "Mass URL Redirects Write": "abe78e2276664f4db588c1f675a77486", + "Memberships Read": "3518d0f75557482e952c6762d3e64903", + "Memberships Write": "9201bc6f42d440968aaab0c6f17ebb1d", + "Origin Read": "7b32a91ece3140d4b3c2c56f23fc8e35", + "Origin Write": "a4308c6855c84eb2873e01b6cc85cbb3", + "Page Rules Read": "b415b70a4fd1412886f164451f20405c", + "Page Rules Write": "ed07f6c337da4195b4e72a1fb2c6bcae", + "Pages Read": "e247aedd66bd41cc9193af0213416666", + "Pages Write": "8d28297797f24fb8a0c332fe0866ec89", + "Pubsub Configuration Read": "fd7f886c75a244389e892c4c3c068292", + "Pubsub Configuration Write": "910b6ecca1c5411bb894e787362d1312", + "Rule Policies Read": "58abbad6d2ce40abb2594fbe932a2e0e", + "Rule Policies Write": "61ddc58f1da14f95b33b41213360cbeb", + "SSL and Certificates Read": "7b7216b327b04b8fbc8f524e1f9b7531", + "SSL and Certificates Write": "c03055bc037c4ea9afb9a9f104b7b721", + "Sanitize Read": "853643ed57244ed1a05a7c024af9ab5a", + "Sanitize Write": "89bb8c37d46042e98b84560eaaa6379f", + "Select Configuration Read": "595409c54a24444b80a495620b2d614c", + "Select Configuration Write": "235eac9bb64942b49cb805cc851cb000", + "Stream Read": "de21485a24744b76a004aa153898f7fe", + "Stream Write": "714f9c13a5684c2885a793f5edb36f59", + "Teams Read": "3f376c8e6f764a938b848bd01c8995c4", + "Teams Report": "efb81b5cd37d49f3be1da9363a6d7a19", + "Teams Write": "b33f02c6f7284e05a6f20741c0bb0567", + "Transform Rules Read": "a9a99455bf3245f6a5a244f909d74830", + "Transform Rules Write": "ae16e88bc7814753a1894c7ce187ab72", + "Turnstile Sites Read": "5d78fd7895974fd0bdbbbb079482721b", + "Turnstile Sites Write": "755c05aa014b4f9ab263aa80b8167bd8", + "User Details Read": "8acbe5bb0d54464ab867149d7f7cf8ac", + "User Details Write": "55a5e17cc99e4a3fa1f3432d262f2e55", + "Waiting Rooms Read": "cab5202d07ef47beae788e6bc95cb6fe", + "Waiting Rooms Write": "24fc124dc8254e0db468e60bf410c800", + "Web3 Hostnames Read": "8e31f574901c42e8ad89140b28d42112", + "Web3 Hostnames Write": "5ea6da42edb34811a78d1b007557c0ca", + "Workers KV Storage Read": "8b47d2786a534c08a1f94ee8f9f599ef", + "Workers KV Storage Write": "f7f0eda5697f475c90846e879bab8666", + "Workers R2 Storage Read": "b4992e1108244f5d8bfbd5744320c2e1", + "Workers R2 Storage Write": "bf7481a1826f439697cb59a20b22293e", + "Workers Routes Read": "2072033d694d415a936eaeb94e6405b8", + "Workers Routes Write": "28f4b596e7d643029c524985477ae49a", + "Workers Scripts Read": "1a71c399035b4950a1bd1466bbe4f420", + "Workers Scripts Write": "e086da7e2179491d91ee5f35b3ca210a", + "Workers Tail Read": "05880cd1bdc24d8bae0be2136972816b", + "Zaraz Admin": "cdeb15b336e640a2965df8c65052f1e0", + "Zaraz Read": "5bdbde7e76144204a244274eac3eb0eb", + "Zone Read": "c8fed203ed3043cba015a93ad1616f1f", + "Zone Settings Read": "517b21aee92c4d89936c976ba6e4be55", + "Zone Settings Write": "3030687196b94b638145a3953da2b699", + "Zone Transform Rules Read": "211a4c0feb3e43b3a2d41f1443a433e7", + "Zone Transform Rules Write": "0ac90a90249747bca6b047d97f0803e9", + "Zone WAF Read": "dbc512b354774852af2b5a5f4ba3d470", + "Zone WAF Write": "fb6778dc191143babbfaa57993f1d275", + "Zone Write": "e6d2666161e84845a636613608cee8d5" + } + }, + "sensitive_attributes": [] + } + ] + }, + { + "module": "module.cloudflare", + "mode": "data", + "type": "cloudflare_zone", + "name": "zone", + "provider": "module.cloudflare.provider[\"registry.terraform.io/cloudflare/cloudflare\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "account_id": "bade361765d704ba91e29296a025e4e3", + "id": "f2dbeb2dca3e055df71df866b8188ce7", + "name": "mein.nl", + "name_servers": [ + "jacqueline.ns.cloudflare.com", + "keanu.ns.cloudflare.com" + ], + "paused": false, + "plan": "Free Website", + "status": "active", + "vanity_name_servers": [], + "zone_id": "f2dbeb2dca3e055df71df866b8188ce7" + }, + "sensitive_attributes": [] + } + ] + }, + { + "module": "module.cloudflare", + "mode": "data", + "type": "http", + "name": "public_ipv4", + "provider": "provider[\"registry.terraform.io/hashicorp/http\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "body": "213.34.1.38\n", + "id": "https://ipv4.icanhazip.com", + "request_headers": null, + "response_headers": { + "Access-Control-Allow-Methods": "GET", + "Access-Control-Allow-Origin": "*", + "Alt-Svc": "h3=\":443\"; ma=86400, h3-29=\":443\"; ma=86400", + "Cf-Ray": "765e0ab37b250e3b-AMS", + "Content-Length": "12", + "Content-Type": "text/plain", + "Date": "Sun, 06 Nov 2022 13:05:37 GMT", + "Server": "cloudflare", + "Set-Cookie": "__cf_bm=taclcWAQIB54BH1DHZr6one2Tyy0ahfBWfeRHiQPo9M-1667739937-0-AZ/mqMWjkq3yWY/aZxjqb84LIhJw9el9kC0uA2i3FJrFYmFqs03KrtotC3XRp80M0LV0QcY0V7l5q3Vr1ybbHP0=; path=/; expires=Sun, 06-Nov-22 13:35:37 GMT; domain=.icanhazip.com; HttpOnly; Secure; SameSite=None", + "Vary": "Accept-Encoding" + }, + "url": "https://ipv4.icanhazip.com" + }, + "sensitive_attributes": [] + } + ] + }, + { + "module": "module.cloudflare", + "mode": "managed", + "type": "cloudflare_api_token", + "name": "cert_manager", + "provider": "module.cloudflare.provider[\"registry.terraform.io/cloudflare/cloudflare\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "condition": [ + { + "request_ip": [ + { + "in": [ + "213.34.1.38/32" + ], + "not_in": [] + } + ] + } + ], + "id": "d99082d470ef8ae84df4628aa2fd8660", + "issued_on": "2022-11-06T13:00:43Z", + "modified_on": "2022-11-06T13:03:54Z", + "name": "jupiter_cert_manager", + "policy": [ + { + "effect": "allow", + "permission_groups": [ + "c8fed203ed3043cba015a93ad1616f1f", + "4755a26eedb94da69e1066d98aa820be" + ], + "resources": { + "com.cloudflare.api.account.zone.*": "*" + } + } + ], + "status": "active", + "value": "-BaeJeyJmaBJo2UXQfgrQSiKCNajRm755QaCAy1m" + }, + "sensitive_attributes": [], + "private": "bnVsbA==", + "dependencies": [ + "module.cloudflare.data.cloudflare_api_token_permission_groups.all", + "module.cloudflare.data.http.public_ipv4" + ] + } + ] + }, + { + "module": "module.cloudflare", + "mode": "managed", + "type": "cloudflare_api_token", + "name": "external_dns", + "provider": "module.cloudflare.provider[\"registry.terraform.io/cloudflare/cloudflare\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "condition": [ + { + "request_ip": [ + { + "in": [ + "213.34.1.38/32" + ], + "not_in": [] + } + ] + } + ], + "id": "72a70be82b532edf6f20426c0d3cbc2f", + "issued_on": "2022-11-06T13:00:43Z", + "modified_on": "2022-11-06T13:03:53Z", + "name": "jupiter_external_dns", + "policy": [ + { + "effect": "allow", + "permission_groups": [ + "c8fed203ed3043cba015a93ad1616f1f", + "4755a26eedb94da69e1066d98aa820be" + ], + "resources": { + "com.cloudflare.api.account.zone.*": "*" + } + } + ], + "status": "active", + "value": "1RckTDLQNCZnfLEYvlRwy792gMs-e9LYrMlOQglz" + }, + "sensitive_attributes": [], + "private": "bnVsbA==", + "dependencies": [ + "module.cloudflare.data.cloudflare_api_token_permission_groups.all", + "module.cloudflare.data.http.public_ipv4" + ] + } + ] + }, + { + "module": "module.cloudflare", + "mode": "managed", + "type": "cloudflare_argo_tunnel", + "name": "jupiter", + "provider": "module.cloudflare.provider[\"registry.terraform.io/cloudflare/cloudflare\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "account_id": "bade361765d704ba91e29296a025e4e3", + "cname": "6cc0d529-f729-45c1-891f-281544f35b50.cfargotunnel.com", + "id": "6cc0d529-f729-45c1-891f-281544f35b50", + "name": "jupiter", + "secret": "eHhSc29BVUUzblh0UnV5UDVZdzdCUmJUMEJBWHVzNlE3YTk4VFAwVjZuaURXUjRST3ROMk56dXZnNVk5c21COQ==" + }, + "sensitive_attributes": [ + [ + { + "type": "get_attr", + "value": "secret" + } + ] + ], + "private": "bnVsbA==", + "dependencies": [ + "module.cloudflare.random_password.tunnel_secret" + ] + } + ] + }, + { + "module": "module.cloudflare", + "mode": "managed", + "type": "kubernetes_secret", + "name": "cert_manager_token", + "provider": "provider[\"registry.terraform.io/hashicorp/kubernetes\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "binary_data": null, + "data": { + "api-token": "-BaeJeyJmaBJo2UXQfgrQSiKCNajRm755QaCAy1m" + }, + "id": "cert-manager/cloudflare-api-token", + "immutable": false, + "metadata": [ + { + "annotations": {}, + "generate_name": "", + "generation": 0, + "labels": {}, + "name": "cloudflare-api-token", + "namespace": "cert-manager", + "resource_version": "2053006", + "uid": "3b9abcc6-36a7-45e9-894f-95c5817d3ce1" + } + ], + "type": "Opaque" + }, + "sensitive_attributes": [ + [ + { + "type": "get_attr", + "value": "data" + }, + { + "type": "index", + "value": { + "value": "api-token", + "type": "string" + } + } + ] + ], + "private": "bnVsbA==", + "dependencies": [ + "module.cloudflare.cloudflare_api_token.cert_manager", + "module.cloudflare.data.cloudflare_api_token_permission_groups.all", + "module.cloudflare.data.http.public_ipv4" + ] + } + ] + }, + { + "module": "module.cloudflare", + "mode": "managed", + "type": "kubernetes_secret", + "name": "cloudflared_credentials", + "provider": "provider[\"registry.terraform.io/hashicorp/kubernetes\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "binary_data": null, + "data": { + "credentials.json": "{\"AccountTag\":\"bade361765d704ba91e29296a025e4e3\",\"TunnelID\":\"6cc0d529-f729-45c1-891f-281544f35b50\",\"TunnelName\":\"jupiter\",\"TunnelSecret\":\"eHhSc29BVUUzblh0UnV5UDVZdzdCUmJUMEJBWHVzNlE3YTk4VFAwVjZuaURXUjRST3ROMk56dXZnNVk5c21COQ==\"}" + }, + "id": "cloudflared/cloudflared-credentials", + "immutable": false, + "metadata": [ + { + "annotations": null, + "generate_name": "", + "generation": 0, + "labels": null, + "name": "cloudflared-credentials", + "namespace": "cloudflared", + "resource_version": "2053733", + "uid": "e1aca647-6ad2-48d1-81e9-6556f7c09e3f" + } + ], + "type": "Opaque" + }, + "sensitive_attributes": [ + [ + { + "type": "get_attr", + "value": "data" + }, + { + "type": "index", + "value": { + "value": "credentials.json", + "type": "string" + } + } + ] + ], + "private": "bnVsbA==", + "dependencies": [ + "module.cloudflare.cloudflare_argo_tunnel.jupiter", + "module.cloudflare.random_password.tunnel_secret" + ] + } + ] + }, + { + "module": "module.cloudflare", + "mode": "managed", + "type": "kubernetes_secret", + "name": "external_dns_token", + "provider": "provider[\"registry.terraform.io/hashicorp/kubernetes\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "binary_data": null, + "data": { + "value": "1RckTDLQNCZnfLEYvlRwy792gMs-e9LYrMlOQglz" + }, + "id": "external-dns/cloudflare-api-token", + "immutable": false, + "metadata": [ + { + "annotations": {}, + "generate_name": "", + "generation": 0, + "labels": {}, + "name": "cloudflare-api-token", + "namespace": "external-dns", + "resource_version": "2053008", + "uid": "595051ae-11c9-4c1b-b885-8be600455aac" + } + ], + "type": "Opaque" + }, + "sensitive_attributes": [ + [ + { + "type": "get_attr", + "value": "data" + }, + { + "type": "index", + "value": { + "value": "value", + "type": "string" + } + } + ] + ], + "private": "bnVsbA==", + "dependencies": [ + "module.cloudflare.cloudflare_api_token.external_dns", + "module.cloudflare.data.cloudflare_api_token_permission_groups.all", + "module.cloudflare.data.http.public_ipv4" + ] + } + ] + }, + { + "module": "module.cloudflare", + "mode": "managed", + "type": "random_password", + "name": "tunnel_secret", + "provider": "provider[\"registry.terraform.io/hashicorp/random\"]", + "instances": [ + { + "schema_version": 3, + "attributes": { + "bcrypt_hash": "$2a$10$ojTM2sFC2xzrvppFIXivpOcpNIvHDsetsJqIWDS5/5GCq3GyBf/Fu", + "id": "none", + "keepers": null, + "length": 64, + "lower": true, + "min_lower": 0, + "min_numeric": 0, + "min_special": 0, + "min_upper": 0, + "number": true, + "numeric": true, + "override_special": null, + "result": "xxRsoAUE3nXtRuyP5Yw7BRbT0BAXus6Q7a98TP0V6niDWR4ROtN2Nzuvg5Y9smB9", + "special": false, + "upper": true + }, + "sensitive_attributes": [] + } + ] + }, + { + "module": "module.zerotier", + "mode": "managed", + "type": "kubernetes_secret", + "name": "router", + "provider": "provider[\"registry.terraform.io/hashicorp/kubernetes\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "binary_data": null, + "data": { + "ZEROTIER_IDENTITY_PUBLIC": "db0463b05f:0:e87be2495502a43210b29d7add5aa5671207f6ef63b8754db732d9c0784d6c24bccf2a8bf585b17b6f8ba93dd642aee5d8d08aa76b2acccda3cd3a742a50bd85", + "ZEROTIER_IDENTITY_SECRET": "db0463b05f:0:e87be2495502a43210b29d7add5aa5671207f6ef63b8754db732d9c0784d6c24bccf2a8bf585b17b6f8ba93dd642aee5d8d08aa76b2acccda3cd3a742a50bd85:3c9412a5cc67b12585338b9cb06ad204508421e9d04dcbd89d5aa36c41cffe725c90a88b9fc0836d108c5c4308169d067d6d2a65a5ff0eb0a398838cf4dc8f49", + "ZEROTIER_NETWORK_ID": "52b337794fb44bbd" + }, + "id": "zerotier/zerotier-router", + "immutable": false, + "metadata": [ + { + "annotations": {}, + "generate_name": "", + "generation": 0, + "labels": {}, + "name": "zerotier-router", + "namespace": "zerotier", + "resource_version": "2053007", + "uid": "76c24793-371b-4382-89f2-a153ee8e8ef2" + } + ], + "type": "Opaque" + }, + "sensitive_attributes": [ + [ + { + "type": "get_attr", + "value": "data" + }, + { + "type": "index", + "value": { + "value": "ZEROTIER_IDENTITY_SECRET", + "type": "string" + } + } + ] + ], + "private": "bnVsbA==", + "dependencies": [ + "module.zerotier.zerotier_identity.router", + "module.zerotier.zerotier_network.network" + ] + } + ] + }, + { + "module": "module.zerotier", + "mode": "managed", + "type": "zerotier_identity", + "name": "router", + "provider": "module.zerotier.provider[\"registry.terraform.io/zerotier/zerotier\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "db0463b05f", + "private_key": "db0463b05f:0:e87be2495502a43210b29d7add5aa5671207f6ef63b8754db732d9c0784d6c24bccf2a8bf585b17b6f8ba93dd642aee5d8d08aa76b2acccda3cd3a742a50bd85:3c9412a5cc67b12585338b9cb06ad204508421e9d04dcbd89d5aa36c41cffe725c90a88b9fc0836d108c5c4308169d067d6d2a65a5ff0eb0a398838cf4dc8f49", + "public_key": "db0463b05f:0:e87be2495502a43210b29d7add5aa5671207f6ef63b8754db732d9c0784d6c24bccf2a8bf585b17b6f8ba93dd642aee5d8d08aa76b2acccda3cd3a742a50bd85" + }, + "sensitive_attributes": [], + "private": "bnVsbA==" + } + ] + }, + { + "module": "module.zerotier", + "mode": "managed", + "type": "zerotier_member", + "name": "router", + "provider": "module.zerotier.provider[\"registry.terraform.io/zerotier/zerotier\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "allow_ethernet_bridging": true, + "authorized": true, + "capabilities": [], + "description": "Managed by Terraform", + "hidden": false, + "id": "52b337794fb44bbd/db0463b05f", + "ip_assignments": [ + "10.147.17.1" + ], + "member_id": "db0463b05f", + "name": "router", + "network_id": "52b337794fb44bbd", + "no_auto_assign_ips": true, + "tags": [] + }, + "sensitive_attributes": [], + "private": "bnVsbA==", + "dependencies": [ + "module.zerotier.zerotier_identity.router", + "module.zerotier.zerotier_network.network" + ] + } + ] + }, + { + "module": "module.zerotier", + "mode": "managed", + "type": "zerotier_network", + "name": "network", + "provider": "module.zerotier.provider[\"registry.terraform.io/zerotier/zerotier\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "assign_ipv4": [ + { + "zerotier": true + } + ], + "assign_ipv6": [ + { + "rfc4193": false, + "sixplane": false, + "zerotier": false + } + ], + "assignment_pool": [ + { + "end": "10.147.17.255", + "start": "10.147.17.0" + } + ], + "creation_time": 1667739642875, + "description": "Homelab network", + "enable_broadcast": true, + "flow_rules": "accept;", + "id": "52b337794fb44bbd", + "multicast_limit": 32, + "name": "homelab", + "private": true, + "route": [ + { + "target": "10.147.17.0/24", + "via": "" + }, + { + "target": "192.168.1.0/24", + "via": "10.147.17.1" + } + ] + }, + "sensitive_attributes": [], + "private": "bnVsbA==" + } + ] + } + ], + "check_results": [] +} diff --git a/external/versions.tf b/external/versions.tf index 18787e7e87..021ad50ac5 100644 --- a/external/versions.tf +++ b/external/versions.tf @@ -1,15 +1,11 @@ terraform { required_version = "~> 1.3.0" - backend "remote" { - hostname = "app.terraform.io" - organization = "khuedoan" + backend "local" { - workspaces { - name = "homelab-external" - } } + required_providers { cloudflare = { source = "cloudflare/cloudflare" @@ -23,7 +19,7 @@ terraform { http = { source = "hashicorp/http" - version = "~> 2.1.0" + version = "~> 3.4.0" } } } diff --git a/home/adminer/Chart.yaml b/home/adminer/Chart.yaml new file mode 100644 index 0000000000..08eac5d74e --- /dev/null +++ b/home/adminer/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v2 +name: adminer +version: 0.0.0 +dependencies: + - name: adminer + version: 0.2.0 + - name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 2.x +engine: gotpl \ No newline at end of file diff --git a/home/adminer/charts/adminer/.circleci/config.yml b/home/adminer/charts/adminer/.circleci/config.yml new file mode 100644 index 0000000000..5309c82587 --- /dev/null +++ b/home/adminer/charts/adminer/.circleci/config.yml @@ -0,0 +1,14 @@ +version: 2 +jobs: + build: + docker: + - image: alpine + steps: + - checkout + - run: + name: helm-github-pages + environment: + - GITHUB_PAGES_REPO: cetic/helm-charts + - HELM_CHART: adminer + - HELM_VERSION: 3.2.4 + command: wget -O - https://raw.githubusercontent.com/cetic/helm-chart-publisher/master/publish.sh | sh \ No newline at end of file diff --git a/home/adminer/charts/adminer/.github/ISSUE_TEMPLATE/bug_report.md b/home/adminer/charts/adminer/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000..9af2bccefc --- /dev/null +++ b/home/adminer/charts/adminer/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,34 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '[cetic/adminer] issue title' +labels: '' +assignees: '' + +--- + + + +**Describe the bug** +A clear and concise description of what the bug is. + +**Version of Helm and Kubernetes**: + + +**What happened**: + + +**What you expected to happen**: + + +**How to reproduce it** (as minimally and precisely as possible): + + +**Anything else we need to know**: + + diff --git a/home/adminer/charts/adminer/.github/ISSUE_TEMPLATE/feature_request.md b/home/adminer/charts/adminer/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000..84c4aa6ff0 --- /dev/null +++ b/home/adminer/charts/adminer/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,28 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '[cetic/adminer] issue title' +labels: '' +assignees: '' + +--- + + + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. + diff --git a/home/adminer/charts/adminer/.github/PULL_REQUEST_TEMPLATE.md b/home/adminer/charts/adminer/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..b2e13b69d2 --- /dev/null +++ b/home/adminer/charts/adminer/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,40 @@ + + +#### What this PR does / why we need it: + +#### Which issue this PR fixes +*(optional, in `fixes #(, fixes #, ...)` format, will close that issue when PR gets merged)* + - fixes # + +#### Special notes for your reviewer: + +#### Checklist +[Place an '[x]' (no spaces) in all applicable fields. Please remove unrelated fields.] +- [ ] [DCO](https://github.com/helm/charts/blob/master/CONTRIBUTING.md#sign-your-work) signed +- [ ] Chart Version bumped +- [ ] Variables are documented in the README.md diff --git a/home/adminer/charts/adminer/.gitignore b/home/adminer/charts/adminer/.gitignore new file mode 100644 index 0000000000..3f18c1655b --- /dev/null +++ b/home/adminer/charts/adminer/.gitignore @@ -0,0 +1,7 @@ +*.tgz + +# Helm Charts dependencies +#/charts +#*.lock + +.idea diff --git a/home/adminer/charts/adminer/.helmignore b/home/adminer/charts/adminer/.helmignore new file mode 100644 index 0000000000..fbdd321f57 --- /dev/null +++ b/home/adminer/charts/adminer/.helmignore @@ -0,0 +1,26 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store + +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ + +# Common backup files +*.swp +*.bak +*.tmp +*~ + +# Various IDEs +.project +.idea/ +*.tmproj + +.circleci/ diff --git a/home/adminer/charts/adminer/Chart.yaml b/home/adminer/charts/adminer/Chart.yaml new file mode 100644 index 0000000000..adee9e761e --- /dev/null +++ b/home/adminer/charts/adminer/Chart.yaml @@ -0,0 +1,31 @@ +--- +apiVersion: v2 +name: adminer +appVersion: 4.8.1 +version: 0.2.0 +description: Adminer is a full-featured database management tool written in PHP. Conversely to phpMyAdmin, it consist of a single file ready to deploy to the target server. Adminer is available for MySQL, MariaDB, PostgreSQL, SQLite, MS SQL, Oracle, Firebird, SimpleDB, Elasticsearch and MongoDB +home: https://www.adminer.org +icon: https://raw.githubusercontent.com/Kong/docker-official-docs/master/adminer/logo.png +dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 2.x +engine: gotpl +keywords: + - adminer + - postgres + - mysql + - sqlite + - elasticsearch + - mongodb + - database + - sql +maintainers: + - name: fzalila + email: faiez.zalila@cetic.be + url: https://github.com/fzalila + - name: alexnuttinck + email: alexandre.nuttinck@cetic.be + url: https://github.com/alexnuttinck diff --git a/home/adminer/charts/adminer/LICENSE b/home/adminer/charts/adminer/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/home/adminer/charts/adminer/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/home/adminer/charts/adminer/README.md b/home/adminer/charts/adminer/README.md new file mode 100644 index 0000000000..25eef8257a --- /dev/null +++ b/home/adminer/charts/adminer/README.md @@ -0,0 +1,107 @@ +# Helm Chart for Adminer + +[![CircleCI](https://circleci.com/gh/cetic/helm-adminer.svg?style=svg)](https://circleci.com/gh/cetic/helm-adminer/tree/master) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) ![version](https://img.shields.io/github/tag/cetic/helm-adminer.svg?label=release) + +## Introduction + +This [Helm](https://github.com/kubernetes/helm) chart installs [Adminer](https://www.adminer.org) in a Kubernetes cluster. + +## Prerequisites + +- Kubernetes cluster 1.10+ +- Helm 3.0.0+ +- PV provisioner support in the underlying infrastructure. + +## Installation + +### Add Helm repository + +```bash +helm repo add cetic https://cetic.github.io/helm-charts +helm repo update +``` + +### Configure the chart + +The following items can be set via `--set` flag during installation or configured by editing the `values.yaml` directly (need to download the chart first). + +#### Configure the way how to expose Adminer service: + +- **Ingress**: The ingress controller must be installed in the Kubernetes cluster. +- **ClusterIP**: Exposes the service on a cluster-internal IP. Choosing this value makes the service only reachable from within the cluster. +- **NodePort**: Exposes the service on each Node’s IP at a static port (the NodePort). You’ll be able to contact the NodePort service, from outside the cluster, by requesting `NodeIP:NodePort`. +- **LoadBalancer**: Exposes the service externally using a cloud provider’s load balancer. + +### Install the chart + +Install the Adminer helm chart with a release name `my-release`: + +```bash +helm install --name my-release cetic/adminer +``` + +## Uninstallation + +To uninstall/delete the `my-release` deployment: + +```bash +helm delete --purge my-release +``` + +## Configuration + +The following table lists the configurable parameters of the Adminer chart and the default values. + +| Parameter | Description | Default | +| --------------------------------- | ----------------------------------------------------------------------- | --------------------------- | +| **Image** | +| `image.repository` | Image | `adminer` | +| `image.tag` | Image tag | `4.7.7-standalone` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Image pull secrets for private registry | `[]` | +| **Config** | +| `config.plugins` | List of plugins to install. You can find the list of plugins on [GitHub](https://github.com/vrana/adminer/tree/master/plugins)| ``| +| `config.design` | A bundled design to use. You can find the list of designs on [GitHub](https://github.com/vrana/adminer/tree/master/designs)| ``| +| `config.externalserver` | The default host | `` | +| **Service** | +| `service.type` | Service type | `NodePort` | +| `service.port` | The service port | `80` | +| `service.annotations` | Custom annotations for service | `{}` | +| `service.labels` | Additional custom labels for the service | `{}` | +| `service.loadBalancerIP` | LoadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges`| Address that are allowed when svc is `LoadBalancer` | `[]` | +| **Ingress** | +| `ingress.enabled` | Enables Ingress | `false` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.labels` | Custom labels | `{}` | +| `ingress.hosts` | Ingress accepted hostnames | `[]` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| **Resources** | +| `resources` | CPU/Memory resource requests/limits | `{}` | +| **Tolerations** | +| `tolerations` | Add tolerations | `[]` | +| **NodeSelector** | +| `nodeSelector` | node labels for pod assignment | `{}` | +| **Affinity** | +| `affinity` | node/pod affinities | `{}` | +| **LivenessProbe** | +| `livenessProbe` | Liveness probe settings | `nil` | +| **ReadnessProbe** | +| `readinessProbe` | Readiness probe settings | `nil` | +| `volumeMounts` | Add volumeMount to the adminer container | `nil` | +| `volumes` | Add volumes to the pod | `nil` | + +## Credits + +Initially inspired from https://github.com/mogaal/helm-charts/tree/master/adminer. + +## Contributing + +Feel free to contribute by making a [pull request](https://github.com/cetic/helm-adminer/pull/new/master). + +Please read the official [Contribution Guide](https://github.com/helm/charts/blob/master/CONTRIBUTING.md) from Helm for more information on how you can contribute to this Chart. + +## License + +[Apache License 2.0](/LICENSE.md) + diff --git a/home/adminer/charts/adminer/templates/NOTES.txt b/home/adminer/charts/adminer/templates/NOTES.txt new file mode 100644 index 0000000000..7d187a5890 --- /dev/null +++ b/home/adminer/charts/adminer/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ if $.Values.ingress.host }}{{.Values.ingress.host}}{{else}}your-cluster-ip{{end}}{{ $.Values.ingress.path }} + {{if not $.Values.ingress.host}} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "adminer.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "adminer.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "adminer.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "adminer.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/home/adminer/charts/adminer/templates/_helpers.tpl b/home/adminer/charts/adminer/templates/_helpers.tpl new file mode 100644 index 0000000000..d223408a51 --- /dev/null +++ b/home/adminer/charts/adminer/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "adminer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "adminer.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "adminer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/home/adminer/charts/adminer/templates/deployment.yaml b/home/adminer/charts/adminer/templates/deployment.yaml new file mode 100644 index 0000000000..7b1db5fbd0 --- /dev/null +++ b/home/adminer/charts/adminer/templates/deployment.yaml @@ -0,0 +1,74 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "adminer.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "adminer.name" . }} + helm.sh/chart: {{ include "adminer.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "adminer.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "adminer.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 8080 + protocol: TCP + env: + - name: ADMINER_PLUGINS + value: {{ .Values.config.plugins }} + - name: ADMINER_DESIGN + value: {{ .Values.config.design }} + - name: ADMINER_DEFAULT_SERVER + value: {{ .Values.config.externalserver }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.livenessProbe }} + livenessProbe: + {{ toYaml . | indent 12 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{ toYaml . | indent 12 }} + {{- end }} + {{- with .Values.volumes }} + volumes: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/home/adminer/charts/adminer/templates/ingress.yaml b/home/adminer/charts/adminer/templates/ingress.yaml new file mode 100644 index 0000000000..56cb3432a8 --- /dev/null +++ b/home/adminer/charts/adminer/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ template "common.names.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.ingress.annotations .Values.commonAnnotations .Values.ingress.certManager }} + annotations: + {{- if .Values.ingress.certManager }} + kubernetes.io/tls-acme: "true" + {{- end }} + {{- if .Values.ingress.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingress.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ .Values.ingress.hostname }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ .name | quote }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- if or .Values.ingress.tls .Values.ingress.extraTls }} + tls: + {{- if .Values.ingress.tls }} + - hosts: + - {{ .Values.ingress.hostname }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingress.extraTls "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/home/adminer/charts/adminer/templates/service.yaml b/home/adminer/charts/adminer/templates/service.yaml new file mode 100644 index 0000000000..1ab66d386f --- /dev/null +++ b/home/adminer/charts/adminer/templates/service.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "adminer.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "adminer.name" . }} + helm.sh/chart: {{ include "adminer.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ with .Values.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: {{ include "adminer.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/home/adminer/charts/adminer/values.yaml b/home/adminer/charts/adminer/values.yaml new file mode 100644 index 0000000000..f557909cbc --- /dev/null +++ b/home/adminer/charts/adminer/values.yaml @@ -0,0 +1,194 @@ +# Default values for adminer. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: adminer + tag: 4.8.1-standalone + pullPolicy: IfNotPresent + pullSecrets: [] + +# See envs from https://hub.docker.com/_/adminer/ +config: + plugins: "" + design: "pepa-linha" + externalserver: "" + +## @section Common parameters + +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" +## @param nameOverride String to partially override influxdb.fullname template with a string (will prepend the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override influxdb.fullname template with a string +## +fullnameOverride: "" +## @param clusterDomain Default Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +service: + type: NodePort + port: 80 + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +ingress: + ## @param ingress.enabled Enable ingress controller resource + ## + enabled: false + ## @param ingress.tls Create TLS Secret + ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }} + ## You can use the ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it + ## + tls: false + ## DEPRECATED: Use ingress.annotations instead of ingress.certManager + ## certManager: false + ## + + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param ingress.hostname Default host for the ingress resource + ## + hostname: adminer.local + ## @param ingress.path Ingress path*' in order to use this + ## with ALB ingress controllers. + ## + path: / + ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## extraHosts: + ## - name: influxdb.local + ## path: / + ## + extraHosts: [] + ## @param ingress.extraPaths Additional arbitrary path/backend objects + ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## extraTls: + ## - hosts: + ## - influxdb.local + ## secretName: influxdb.local-tls + ## + extraTls: [] + ## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## - name: influxdb.local-tls + ## key: + ## certificate: + ## + secrets: [] + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + +# kubectl create secret generic ca --from-file=ca.pem +# cat > login-ssl.php << EOF +# "/secrets/ca.pem")); +# EOF +# kubectl create configmap plugins-enabled --from-file=login-ssl.php +# volumeMounts: +# - name: ca +# mountPath: "/secrets/" +# readOnly: true +# - name: plugins-enabled +# mountPath: "/var/www/html/plugins-enabled/" +# readOnly: true + +# volumes: +# - name: ca +# secret: +# secretName: ca +# - name: plugins-enabled +# configMap: +# name: plugins-enabled + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/home/adminer/values.yaml b/home/adminer/values.yaml new file mode 100644 index 0000000000..770b685207 --- /dev/null +++ b/home/adminer/values.yaml @@ -0,0 +1,97 @@ +adminer: + # Default values for adminer. + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + + replicaCount: 1 + + image: + repository: adminer + tag: 4.8.1-standalone + pullPolicy: IfNotPresent + pullSecrets: [] + + # See envs from https://hub.docker.com/_/adminer/ + config: + plugins: "" + design: "pepa-linha" + externalserver: "" + + nameOverride: "" + fullnameOverride: "" + + service: + type: NodePort + port: 80 + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + + ## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/ + ## +## Configure the ingress resource that allows you to access the +## ®abbitMQ installation. Set up the URL +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## + ingress: + enabled: true + path: / + pathType: ImplementationSpecific + hostname: adminer.jupiter.mein.nl + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + tls: true + selfSigned: false + extraHosts: [] + extraTls: [] + secrets: [] + ingressClassName: "nginx" + + # kubectl create secret generic ca --from-file=ca.pem + # cat > login-ssl.php << EOF + # "/secrets/ca.pem")); + # EOF + # kubectl create configmap plugins-enabled --from-file=login-ssl.php + # volumeMounts: + # - name: ca + # mountPath: "/secrets/" + # readOnly: true + # - name: plugins-enabled + # mountPath: "/var/www/html/plugins-enabled/" + # readOnly: true + + # volumes: + # - name: ca + # secret: + # secretName: ca + # - name: plugins-enabled + # configMap: + # name: plugins-enabled + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + nodeSelector: {} + + tolerations: [] + + affinity: {} \ No newline at end of file diff --git a/home/firefly/Chart.yaml.disabled b/home/firefly/Chart.yaml.disabled new file mode 100644 index 0000000000..86c6cc089a --- /dev/null +++ b/home/firefly/Chart.yaml.disabled @@ -0,0 +1,10 @@ +apiVersion: v2 +name: firefly-iii +version: 0.0.0 +dependencies: + - name: firefly-iii + version: 1.2.1 + repository: https://firefly-iii.github.io/kubernetes + - name: importer + version: 1.1.3 + repository: https://firefly-iii.github.io/kubernetes \ No newline at end of file diff --git a/home/firefly/values.yaml b/home/firefly/values.yaml new file mode 100644 index 0000000000..9dbab04abd --- /dev/null +++ b/home/firefly/values.yaml @@ -0,0 +1,214 @@ +firefly-iii: + replicaCount: 1 + + image: + repository: "fireflyiii/core" + pullPolicy: IfNotPresent + tag: version-5.6.14 + + imagePullSecrets: [] + nameOverride: "" + fullnameOverride: "" + + persistence: + # -- If you set this to false, uploaded attachments are not stored persistently and will be lost with every restart of the pod + enabled: true + class: "longhorn" + accessModes: ReadWriteOnce + storage: 1Gi + # -- If you want to use an existing claim, set it here + existingClaim: "" + + # -- Environment variables for Firefly III. See docs at: https://github.com/firefly-iii/firefly-iii/blob/main/.env.example + config: + # -- Set this to the name of a secret to load environment variables from. If defined, values in the secret will override values in config.env + existingSecret: "firefly-iii-secret" + + # -- Directly defined environment variables. Use this for non-secret configuration values. + env: + DEFAULT_LANGUAGE: "nl_NL" + DEFAULT_LOCALE: "equal" + TZ: "Europe/Amsterdam" + TRUSTED_PROXIES: "**" + + # -- A cronjob for [recurring Firefly III tasks](https://docs.firefly-iii.org/firefly-iii/advanced-installation/cron/). + cronjob: + # -- Set to true to enable the CronJob. Note that you need to specify either cronjob.auth.existingSecret or cronjob.auth.token for it to actually be deployed. + enabled: true + + # -- Authorization for the CronJob. See https://docs.firefly-iii.org/firefly-iii/advanced-installation/cron/#request-a-page-over-the-web + auth: + # -- The name of a secret containing a data.token field with the cronjob token + existingSecret: "firefly-iii-cron-secret" + + # -- The token in plain text + token: "" + + # -- Annotations for the CronJob + annotations: {} + + # -- When to run the CronJob. Defaults to 03:00 as this is when Firefly III executes regular tasks. + schedule: "0 3 * * *" + + # -- How many pods to keep around for successful jobs + successfulJobsHistoryLimit: 3 + + # -- How many pods to keep around for failed jobs + failedJobsHistoryLimit: 1 + + # -- How to treat failed jobs + restartPolicy: OnFailure + + image: + repository: curlimages/curl + pullPolicy: IfNotPresent + tag: 7.86.0 + + imagePullSecrets: [] + + podAnnotations: {} + + securityContext: {} + + podSecurityContext: {} + + resources: {} + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + podAnnotations: {} + + podSecurityContext: {} + # fsGroup: 2000 + + securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + service: + type: ClusterIP + port: 80 + + ingress: + enabled: true + className: "nginx" + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + + hosts: + - firefly.jupiter.mein.nl + tls: + - secretName: firefly-tls-certificate + hosts: + - firefly.jupiter.mein.nl + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + + nodeSelector: {} + + tolerations: [] + + affinity: {} + +importer: + + replicaCount: 1 + + # -- The proxies that are trusted by the importer + trustedProxies: "**" + + fireflyiii: + # -- The URL at which Firefly III is available. If you change this value, click the "Reauthenticate" button on the importer after opening it! + url: "http://firefly-firefly-iii:80" + + # -- The URL at which you access Firefly III. Check https://docs.firefly-iii.org/data-importer/install/configure/#configure-fidi to find out if you should set this. + vanityUrl: "" + + auth: + # -- If you specify an existingSecret, it has to have the accessToken in a .spec.data.accessToken + existingSecret: "firefly-iii-importer-secret" + + # -- The access token in plain text + accessToken: "" + + image: + repository: fireflyiii/data-importer + pullPolicy: IfNotPresent + tag: "version-0.8.0" + + imagePullSecrets: [] + nameOverride: "" + fullnameOverride: "" + + podAnnotations: {} + + podSecurityContext: {} + # fsGroup: 2000 + + securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + service: + type: ClusterIP + port: 80 + + ingress: + enabled: false + className: "nginx" + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + + hosts: + - importer.firefly.mein.jupiter.nl + tls: + - secretName: firefly-importer-tls-certificate + hosts: + - importer.firefly.mein.jupiter.nl + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + nodeSelector: {} + + tolerations: [] + + affinity: {} \ No newline at end of file diff --git a/home/homeassistant/Chart.yaml b/home/homeassistant/Chart.yaml new file mode 100644 index 0000000000..72b3762705 --- /dev/null +++ b/home/homeassistant/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: homeassistant +version: 0.0.0 +dependencies: + - name: home-assistant + version: 13.4.2 + repository: https://k8s-at-home.com/charts/ diff --git a/home/homeassistant/values.yaml b/home/homeassistant/values.yaml new file mode 100644 index 0000000000..e942dde122 --- /dev/null +++ b/home/homeassistant/values.yaml @@ -0,0 +1,177 @@ +home-assistant: + image: + # -- image repository + repository: homeassistant/home-assistant + # -- image tag + tag: 2022.10.5 + # -- image pull policy + pullPolicy: IfNotPresent + + # -- environment variables. + # @default -- See below + env: + # -- Set the container timezone + TZ: Europe/Amsterdam + + # -- Configures service settings for the chart. Normally this does not need to be modified. + # @default -- See values.yaml + service: + main: + ports: + http: + port: 8123 + + ingress: + # -- Enable and configure ingress settings for the chart under this key. + # @default -- See values.yaml + main: + enabled: true + ingressClassName: nginx + annotations: + nginx.org/websocket-services: home-assistant + cert-manager.io/cluster-issuer: letsencrypt-prod + hosts: + - host: &host homeassistant.europa.jupiter.mein.nl + paths: + - path: / + pathType: Prefix + tls: + - secretName: homeassistant-tls-certificate + hosts: + - *host + + # -- Enable devices to be discoverable + hostNetwork: true + + # -- When hostNetwork is true set dnsPolicy to ClusterFirstWithHostNet + # dnsPolicy: ClusterFirstWithHostNet + + securityContext: + # -- (bool) Privileged securityContext may be required if USB devics are accessed directly through the host machine + privileged: # true + + # -- Configure persistence settings for the chart under this key. + # @default -- See values.yaml + persistence: + config: + enabled: true + type: pvc + accessMode: ReadWriteOnce + size: 5Gi + storageClass: longhorn + + # -- Configure a hostPathMount to mount a USB device in the container. + # @default -- See values.yaml + usb: + enabled: false + type: hostPath + hostPath: /path/to/device + + # -- Enable and configure mariadb database subchart under this key. + # For more options see [mariadb chart documentation](https://github.com/bitnami/charts/tree/master/bitnami/mariadb) + # @default -- See values.yaml + mariadb: + enabled: false + architecture: standalone + auth: + database: home-assistant + username: home-assistant + password: home-assistant-pass + rootPassword: home-assistantrootpass + primary: + persistence: + enabled: false + storageClass: "" + + # -- Enable and configure postgresql database subchart under this key. + # For more options see [postgresql chart documentation](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) + # @default -- See values.yaml + postgresql: + enabled: false + image: + repository: bitnami/postgresql + tag: 14.5.0 + postgresqlUsername: home-assistant + postgresqlPassword: home-assistant-pass + postgresqlDatabase: home-assistant + persistence: + enabled: true + storageClass: longhorn + size: 4Gi + + metrics: &metrics + # -- Enable and configure a Prometheus serviceMonitor for the chart under this key. + # @default -- See values.yaml + enabled: true + serviceMonitor: + enabled: true + interval: 1m + scrapeTimeout: 30s + labels: {} + ## See https://www.home-assistant.io/docs/authentication/ for where to find + ## long lived access token creation under your account profile, which is + ## needed to monitor Home Assistant + # bearerTokenSecret: + # name: "" + # key: "" + + # -- Enable and configure Prometheus Rules for the chart under this key. + # @default -- See values.yaml + prometheusRule: + enabled: false + labels: {} + # -- Configure additionial rules for the chart under this key. + # @default -- See prometheusrules.yaml + rules: [] + # - alert: HomeAssistantAbsent + # annotations: + # description: Home Assistant has disappeared from Prometheus service discovery. + # summary: Home Assistant is down. + # expr: | + # absent(up{job=~".*home-assistant.*"} == 1) + # for: 5m + # labels: + # severity: critical + + addons: + codeserver: + enabled: true + image: + repository: codercom/code-server + tag: 4.8.1 + workingDir: "/config" + args: + - --auth + - "none" + - --user-data-dir + - "/config/.vscode" + - --extensions-dir + - "/config/.vscode" + ingress: + enabled: true + ingressClassName: nginx + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + nginx.org/websocket-services: home-assistant + hosts: + - host: &config homeassistant-config.europa.jupiter.mein.nl + paths: + - path: / + pathType: Prefix + tls: + - secretName: home-assistant-config-tls-certificate + hosts: + - *config + volumeMounts: + - name: config + mountPath: /config + + # git: + # enabled: true + # user: + # name: homeassistant-bot + # email: homeassitant@europa.jupiter.mein.nl + + + + \ No newline at end of file diff --git a/home/mariadb/Chart.yaml b/home/mariadb/Chart.yaml new file mode 100644 index 0000000000..a920b6af7a --- /dev/null +++ b/home/mariadb/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: mariadb +version: 0.0.0 +dependencies: + - name: mariadb + version: 11.3.3 + repository: https://charts.bitnami.com/bitnami \ No newline at end of file diff --git a/home/mariadb/values.yaml b/home/mariadb/values.yaml new file mode 100644 index 0000000000..b5144cd8bd --- /dev/null +++ b/home/mariadb/values.yaml @@ -0,0 +1,1249 @@ +mariadb: + ## @section Global parameters + ## Global Docker image parameters + ## Please, note that this will override the image parameters, including dependencies, configured to use the global value + ## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + + ## @param global.imageRegistry Global Docker Image registry + ## @param global.imagePullSecrets Global Docker registry secret names as an array + ## @param global.storageClass Global storage class for dynamic provisioning + ## + global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + + ## @section Common parameters + + ## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) + ## + kubeVersion: "" + ## @param nameOverride String to partially override mariadb.fullname + ## + nameOverride: "" + ## @param fullnameOverride String to fully override mariadb.fullname + ## + fullnameOverride: "" + ## @param clusterDomain Default Kubernetes cluster domain + ## + clusterDomain: cluster.local + ## @param commonAnnotations Common annotations to add to all MariaDB resources (sub-charts are not considered) + ## + commonAnnotations: {} + ## @param commonLabels Common labels to add to all MariaDB resources (sub-charts are not considered) + ## + commonLabels: {} + ## @param schedulerName Name of the scheduler (other than default) to dispatch pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template) + ## + extraDeploy: [] + + ## Enable diagnostic mode in the deployment + ## + diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + + ## @section MariaDB common parameters + + ## Bitnami MariaDB image + ## ref: https://hub.docker.com/r/bitnami/mariadb/tags/ + ## @param image.registry MariaDB image registry + ## @param image.repository MariaDB image repository + ## @param image.tag MariaDB image tag (immutable tags are recommended) + ## @param image.pullPolicy MariaDB image pull policy + ## @param image.pullSecrets Specify docker-registry secret names as an array + ## @param image.debug Specify if debug logs should be enabled + ## + image: + registry: docker.io + repository: bitnami/mariadb + tag: 10.7.4-debian-10-r11 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## It turns BASH and/or NAMI debugging in the image + ## + debug: false + ## @param architecture MariaDB architecture (`standalone` or `replication`) + ## + architecture: standalone + ## MariaDB Authentication parameters + ## + auth: + ## @param auth.rootPassword Password for the `root` user. Ignored if existing secret is provided. + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-the-root-password-on-first-run + ## + rootPassword: "" + ## @param auth.database Name for a custom database to create + ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-on-first-run + ## + database: my_database + ## @param auth.username Name for a custom user to create + ## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run + ## + username: "" + ## @param auth.password Password for the new user. Ignored if existing secret is provided + ## + password: "" + ## @param auth.replicationUser MariaDB replication user + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-up-a-replication-cluster + ## + replicationUser: replicator + ## @param auth.replicationPassword MariaDB replication user password. Ignored if existing secret is provided + ## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-up-a-replication-cluster + ## + replicationPassword: "" + ## @param auth.existingSecret Use existing secret for password details (`auth.rootPassword`, `auth.password`, `auth.replicationPassword` will be ignored and picked up from this secret). The secret has to contain the keys `mariadb-root-password`, `mariadb-replication-password` and `mariadb-password` + ## + existingSecret: "mariadb-secret" + ## @param auth.forcePassword Force users to specify required passwords + ## + forcePassword: false + ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable + ## + usePasswordFiles: false + ## @param auth.customPasswordFiles Use custom password files when `auth.usePasswordFiles` is set to `true`. Define path for keys `root` and `user`, also define `replicator` if `architecture` is set to `replication` + ## Example: + ## customPasswordFiles: + ## root: /vault/secrets/mariadb-root + ## user: /vault/secrets/mariadb-user + ## replicator: /vault/secrets/mariadb-replicator + ## + customPasswordFiles: {} + ## @param initdbScripts Dictionary of initdb scripts + ## Specify dictionary of scripts to be run at first boot + ## Example: + ## initdbScripts: + ## my_init_script.sh: | + ## #!/bin/bash + ## echo "Do something." + ## + initdbScripts: {} + ## @param initdbScriptsConfigMap ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) + ## + initdbScriptsConfigMap: "" + + ## @section MariaDB Primary parameters + + ## Mariadb Primary parameters + ## + primary: + ## @param primary.command Override default container command on MariaDB Primary container(s) (useful when using custom images) + ## + command: [] + ## @param primary.args Override default container args on MariaDB Primary container(s) (useful when using custom images) + ## + args: [] + ## @param primary.lifecycleHooks for the MariaDB Primary container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param primary.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.configuration [string] MariaDB Primary configuration to be injected as ConfigMap + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mariadb + plugin_dir=/opt/bitnami/mariadb/plugin + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + tmpdir=/opt/bitnami/mariadb/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + log-error=/opt/bitnami/mariadb/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mariadb/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + ## @param primary.existingConfigmap Name of existing ConfigMap with MariaDB Primary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + existingConfigmap: "" + ## @param primary.updateStrategy.type MariaDB primary statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + ## @param primary.rollingUpdatePartition Partition update strategy for Mariadb Primary statefulset + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + ## + rollingUpdatePartition: "" + ## @param primary.podAnnotations Additional pod annotations for MariaDB primary pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param primary.podLabels Extra labels for MariaDB primary pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param primary.podAffinityPreset MariaDB primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset MariaDB primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Mariadb Primary node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type MariaDB primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param primary.nodeAffinityPreset.key MariaDB primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param primary.nodeAffinityPreset.values MariaDB primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param primary.affinity Affinity for MariaDB primary pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param primary.nodeSelector Node labels for MariaDB primary pods assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param primary.tolerations Tolerations for MariaDB primary pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param primary.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param primary.podManagementPolicy podManagementPolicy to manage scaling operation of MariaDB primary pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "" + ## @param primary.topologySpreadConstraints Topology Spread Constraints for MariaDB primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: topology.kubernetes.io/zone + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: {} + ## @param primary.priorityClassName Priority class for MariaDB primary pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## MariaDB primary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param primary.podSecurityContext.enabled Enable security context for MariaDB primary pods + ## @param primary.podSecurityContext.fsGroup Group ID for the mounted volumes' filesystem + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## MariaDB primary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param primary.containerSecurityContext.enabled MariaDB primary container securityContext + ## @param primary.containerSecurityContext.runAsUser User ID for the MariaDB primary container + ## @param primary.containerSecurityContext.runAsNonRoot Set Controller container's Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## MariaDB primary container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param primary.resources.limits The resources limits for MariaDB primary containers + ## @param primary.resources.requests The requested resources for MariaDB primary containers + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 256Mi + requests: {} + ## Configure extra options for MariaDB primary containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## @param primary.startupProbe.enabled Enable startupProbe + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 120 + periodSeconds: 15 + timeoutSeconds: 5 + failureThreshold: 10 + successThreshold: 1 + ## Configure extra options for liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## @param primary.readinessProbe.enabled Enable readinessProbe + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## @param primary.customStartupProbe Override default startup probe for MariaDB primary containers + ## + customStartupProbe: {} + ## @param primary.customLivenessProbe Override default liveness probe for MariaDB primary containers + ## + customLivenessProbe: {} + ## @param primary.customReadinessProbe Override default readiness probe for MariaDB primary containers + ## + customReadinessProbe: {} + ## @param primary.startupWaitOptions Override default builtin startup wait check options for MariaDB primary containers + ## `bitnami/mariadb` Docker image has built-in startup check mechanism, + ## which periodically checks if MariaDB service has started up and stops it + ## if all checks have failed after X tries. Use these to control these checks. + ## ref: https://github.com/bitnami/bitnami-docker-mariadb/pull/240 + ## Example (with default options): + ## startupWaitOptions: + ## retries: 300 + ## waitTime: 2 + ## + startupWaitOptions: {} + ## @param primary.extraFlags MariaDB primary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + ## @param primary.extraEnvVars Extra environment variables to be set on MariaDB primary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for MariaDB primary containers + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for MariaDB primary containers + ## + extraEnvVarsSecret: "" + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param primary.persistence.enabled Enable persistence on MariaDB primary replicas using a `PersistentVolumeClaim`. If false, use emptyDir + ## + enabled: true + ## @param primary.persistence.existingClaim Name of an existing `PersistentVolumeClaim` for MariaDB primary replicas + ## NOTE: When it's set the rest of persistence parameters are ignored + ## + existingClaim: "" + ## @param primary.persistence.subPath Subdirectory of the volume to mount at + ## + subPath: "" + ## @param primary.persistence.storageClass MariaDB primary persistent volume storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "nfs" + ## @param primary.persistence.annotations MariaDB primary persistent volume claim annotations + ## + annotations: {} + ## @param primary.persistence.accessModes MariaDB primary persistent volume access Modes + ## + accessModes: + - ReadWriteOnce + ## @param primary.persistence.size MariaDB primary persistent volume size + ## + size: 8Gi + ## @param primary.persistence.selector Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param primary.extraVolumes Optionally specify extra list of additional volumes to the MariaDB Primary pod(s) + ## + extraVolumes: [] + ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the MariaDB Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.initContainers Add additional init containers for the MariaDB Primary pod(s) + ## + initContainers: [] + ## @param primary.sidecars Add additional sidecar containers for the MariaDB Primary pod(s) + ## + sidecars: [] + ## MariaDB Primary Service parameters + ## + service: + ## @param primary.service.type MariaDB Primary Kubernetes service type + ## + type: ClusterIP + ## @param primary.service.ports.mysql MariaDB Primary Kubernetes service port + ## + ports: + mysql: 3306 + ## @param primary.service.nodePorts.mysql MariaDB Primary Kubernetes service node port + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + mysql: "" + ## @param primary.service.clusterIP MariaDB Primary Kubernetes service clusterIP IP + ## + clusterIP: "" + ## @param primary.service.loadBalancerIP MariaDB Primary loadBalancerIP if service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges Address that are allowed when MariaDB Primary service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param primary.service.annotations Provide any additional annotations which may be required + ## + annotations: {} + ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + sessionAffinityConfig: {} + ## MariaDB primary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + ## @param primary.pdb.create Enable/disable a Pod Disruption Budget creation for MariaDB primary pods + ## + create: false + ## @param primary.pdb.minAvailable Minimum number/percentage of MariaDB primary pods that must still be available after the eviction + ## + minAvailable: 1 + ## @param primary.pdb.maxUnavailable Maximum number/percentage of MariaDB primary pods that can be unavailable after the eviction + ## + maxUnavailable: "" + ## @param primary.revisionHistoryLimit Maximum number of revisions that will be maintained in the StatefulSet + ## + revisionHistoryLimit: 10 + + ## @section MariaDB Secondary parameters + + ## Mariadb Secondary parameters + ## + secondary: + ## @param secondary.replicaCount Number of MariaDB secondary replicas + ## + replicaCount: 1 + ## @param secondary.command Override default container command on MariaDB Secondary container(s) (useful when using custom images) + ## + command: [] + ## @param secondary.args Override default container args on MariaDB Secondary container(s) (useful when using custom images) + ## + args: [] + ## @param secondary.lifecycleHooks for the MariaDB Secondary container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param secondary.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param secondary.configuration [string] MariaDB Secondary configuration to be injected as ConfigMap + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mariadb + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + tmpdir=/opt/bitnami/mariadb/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + log-error=/opt/bitnami/mariadb/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + default-character-set=UTF8 + + [manager] + port=3306 + socket=/opt/bitnami/mariadb/tmp/mysql.sock + pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid + ## @param secondary.existingConfigmap Name of existing ConfigMap with MariaDB Secondary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + existingConfigmap: "" + ## @param secondary.updateStrategy.type MariaDB secondary statefulset strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + ## @param secondary.rollingUpdatePartition Partition update strategy for Mariadb Secondary statefulset + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + ## + rollingUpdatePartition: "" + ## @param secondary.podAnnotations Additional pod annotations for MariaDB secondary pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param secondary.podLabels Extra labels for MariaDB secondary pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param secondary.podAffinityPreset MariaDB secondary pod affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param secondary.podAntiAffinityPreset MariaDB secondary pod anti-affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Mariadb Secondary node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param secondary.nodeAffinityPreset.type MariaDB secondary node affinity preset type. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param secondary.nodeAffinityPreset.key MariaDB secondary node label key to match Ignored if `secondary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param secondary.nodeAffinityPreset.values MariaDB secondary node label values to match. Ignored if `secondary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param secondary.affinity Affinity for MariaDB secondary pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param secondary.nodeSelector Node labels for MariaDB secondary pods assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param secondary.tolerations Tolerations for MariaDB secondary pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param secondary.topologySpreadConstraints Topology Spread Constraints for MariaDB secondary pods assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: topology.kubernetes.io/zone + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: {} + ## @param secondary.priorityClassName Priority class for MariaDB secondary pods assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param secondary.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param secondary.podManagementPolicy podManagementPolicy to manage scaling operation of MariaDB secondary pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "" + ## MariaDB secondary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param secondary.podSecurityContext.enabled Enable security context for MariaDB secondary pods + ## @param secondary.podSecurityContext.fsGroup Group ID for the mounted volumes' filesystem + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## MariaDB secondary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param secondary.containerSecurityContext.enabled MariaDB secondary container securityContext + ## @param secondary.containerSecurityContext.runAsUser User ID for the MariaDB secondary container + ## @param secondary.containerSecurityContext.runAsNonRoot Set Controller container's Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## MariaDB secondary container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param secondary.resources.limits The resources limits for MariaDB secondary containers + ## @param secondary.resources.requests The requested resources for MariaDB secondary containers + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 256Mi + requests: {} + ## Configure extra options for MariaDB Secondary containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## @param secondary.startupProbe.enabled Enable startupProbe + ## @param secondary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param secondary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param secondary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param secondary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param secondary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 120 + periodSeconds: 15 + timeoutSeconds: 5 + failureThreshold: 10 + successThreshold: 1 + ## Configure extra options for liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param secondary.livenessProbe.enabled Enable livenessProbe + ## @param secondary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param secondary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param secondary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param secondary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param secondary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## @param secondary.readinessProbe.enabled Enable readinessProbe + ## @param secondary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param secondary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param secondary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param secondary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param secondary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## @param secondary.customStartupProbe Override default startup probe for MariaDB secondary containers + ## + customStartupProbe: {} + ## @param secondary.customLivenessProbe Override default liveness probe for MariaDB secondary containers + ## + customLivenessProbe: {} + ## @param secondary.customReadinessProbe Override default readiness probe for MariaDB secondary containers + ## + customReadinessProbe: {} + ## @param secondary.startupWaitOptions Override default builtin startup wait check options for MariaDB secondary containers + ## `bitnami/mariadb` Docker image has built-in startup check mechanism, + ## which periodically checks if MariaDB service has started up and stops it + ## if all checks have failed after X tries. Use these to control these checks. + ## ref: https://github.com/bitnami/bitnami-docker-mariadb/pull/240 + ## Example (with default options): + ## startupWaitOptions: + ## retries: 300 + ## waitTime: 2 + ## + startupWaitOptions: {} + ## @param secondary.extraFlags MariaDB secondary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + ## @param secondary.extraEnvVars Extra environment variables to be set on MariaDB secondary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + ## @param secondary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for MariaDB secondary containers + ## + extraEnvVarsCM: "" + ## @param secondary.extraEnvVarsSecret Name of existing Secret containing extra env vars for MariaDB secondary containers + ## + extraEnvVarsSecret: "" + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param secondary.persistence.enabled Enable persistence on MariaDB secondary replicas using a `PersistentVolumeClaim` + ## + enabled: true + ## @param secondary.persistence.subPath Subdirectory of the volume to mount at + ## + subPath: "" + ## @param secondary.persistence.storageClass MariaDB secondary persistent volume storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param secondary.persistence.annotations MariaDB secondary persistent volume claim annotations + ## + annotations: {} + ## @param secondary.persistence.accessModes MariaDB secondary persistent volume access Modes + ## + accessModes: + - ReadWriteOnce + ## @param secondary.persistence.size MariaDB secondary persistent volume size + ## + size: 8Gi + ## @param secondary.persistence.selector Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param secondary.extraVolumes Optionally specify extra list of additional volumes to the MariaDB secondary pod(s) + ## + extraVolumes: [] + ## @param secondary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the MariaDB secondary container(s) + ## + extraVolumeMounts: [] + ## @param secondary.initContainers Add additional init containers for the MariaDB secondary pod(s) + ## + initContainers: [] + ## @param secondary.sidecars Add additional sidecar containers for the MariaDB secondary pod(s) + ## + sidecars: [] + ## MariaDB Secondary Service parameters + ## + service: + ## @param secondary.service.type MariaDB secondary Kubernetes service type + ## + type: ClusterIP + ## @param secondary.service.ports.mysql MariaDB secondary Kubernetes service port + ## + ports: + mysql: 3306 + ## @param secondary.service.nodePorts.mysql MariaDB secondary Kubernetes service node port + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + mysql: "" + ## @param secondary.service.clusterIP MariaDB secondary Kubernetes service clusterIP IP + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param secondary.service.loadBalancerIP MariaDB secondary loadBalancerIP if service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param secondary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param secondary.service.loadBalancerSourceRanges Address that are allowed when MariaDB secondary service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param secondary.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param secondary.service.annotations Provide any additional annotations which may be required + ## + annotations: {} + ## @param secondary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param secondary.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + sessionAffinityConfig: {} + ## MariaDB secondary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + ## @param secondary.pdb.create Enable/disable a Pod Disruption Budget creation for MariaDB secondary pods + ## + create: false + ## @param secondary.pdb.minAvailable Minimum number/percentage of MariaDB secondary pods that should remain scheduled + ## + minAvailable: 1 + ## @param secondary.pdb.maxUnavailable Maximum number/percentage of MariaDB secondary pods that may be made unavailable + ## + maxUnavailable: "" + ## @param secondary.revisionHistoryLimit Maximum number of revisions that will be maintained in the StatefulSet + ## + revisionHistoryLimit: 10 + + ## @section RBAC parameters + + ## MariaDB pods ServiceAccount + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param serviceAccount.create Enable the creation of a ServiceAccount for MariaDB pods + ## + create: true + ## @param serviceAccount.name Name of the created ServiceAccount + ## If not set and create is true, a name is generated using the mariadb.fullname template + ## + name: "" + ## @param serviceAccount.annotations Annotations for MariaDB Service Account + ## + annotations: {} + ## @param serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## + automountServiceAccountToken: false + ## Role Based Access + ## ref: https://kubernetes.io/docs/admin/authorization/rbac/ + ## + rbac: + ## @param rbac.create Whether to create and use RBAC resources or not + ## + create: false + + ## @section Volume Permissions parameters + + ## Init containers parameters: + ## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. + ## + volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r349 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + + ## @section Metrics parameters + + ## Mysqld Prometheus exporter parameters + ## + metrics: + ## @param metrics.enabled Start a side-car prometheus exporter + ## + enabled: false + ## @param metrics.image.registry Exporter image registry + ## @param metrics.image.repository Exporter image repository + ## @param metrics.image.tag Exporter image tag (immutable tags are recommended) + ## @param metrics.image.pullPolicy Exporter image pull policy + ## @param metrics.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/mysqld-exporter + tag: 0.13.0-debian-10-r256 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.annotations [object] Annotations for the Exporter pod + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9104" + ## @param metrics.extraArgs [object] Extra args to be passed to mysqld_exporter + ## ref: https://github.com/prometheus/mysqld_exporter/ + ## E.g. + ## - --collect.auto_increment.columns + ## - --collect.binlog_size + ## - --collect.engine_innodb_status + ## - --collect.engine_tokudb_status + ## - --collect.global_status + ## - --collect.global_variables + ## - --collect.info_schema.clientstats + ## - --collect.info_schema.innodb_metrics + ## - --collect.info_schema.innodb_tablespaces + ## - --collect.info_schema.innodb_cmp + ## - --collect.info_schema.innodb_cmpmem + ## - --collect.info_schema.processlist + ## - --collect.info_schema.processlist.min_time + ## - --collect.info_schema.query_response_time + ## - --collect.info_schema.tables + ## - --collect.info_schema.tables.databases + ## - --collect.info_schema.tablestats + ## - --collect.info_schema.userstats + ## - --collect.perf_schema.eventsstatements + ## - --collect.perf_schema.eventsstatements.digest_text_limit + ## - --collect.perf_schema.eventsstatements.limit + ## - --collect.perf_schema.eventsstatements.timelimit + ## - --collect.perf_schema.eventswaits + ## - --collect.perf_schema.file_events + ## - --collect.perf_schema.file_instances + ## - --collect.perf_schema.indexiowaits + ## - --collect.perf_schema.tableiowaits + ## - --collect.perf_schema.tablelocks + ## - --collect.perf_schema.replication_group_member_stats + ## - --collect.slave_status + ## - --collect.slave_hosts + ## - --collect.heartbeat + ## - --collect.heartbeat.database + ## - --collect.heartbeat.table + ## + extraArgs: + primary: [] + secondary: [] + ## MariaDB metrics container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.containerSecurityContext.enabled Enable security context for MariaDB metrics container + ## Example: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: false + ## Mysqld Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param metrics.resources.limits The resources limits for MariaDB prometheus exporter containers + ## @param metrics.resources.requests The requested resources for MariaDB prometheus exporter containers + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 256Mi + requests: {} + ## Configure extra options for liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ## Configure extra options for readiness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param metrics.readinessProbe.enabled Enable readinessProbe + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace which Prometheus is running in + ## + namespace: "" + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param metrics.serviceMonitor.selector ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled if `true`, creates a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Prometheus Rule definitions + ## - alert: MariaDB-Down + ## expr: absent(up{job="mariadb"} == 1) + ## for: 5m + ## labels: + ## severity: warning + ## service: mariadb + ## annotations: + ## message: 'MariaDB instance {{ $labels.instance }} is down' + ## summary: MariaDB instance is down + ## + rules: [] + + ## @section NetworkPolicy parameters + + ## Add networkpolicies + ## + networkPolicy: + ## @param networkPolicy.enabled Enable network policies + ## + enabled: false + ## @param networkPolicy.metrics.enabled Enable network policy for metrics (prometheus) + ## @param networkPolicy.metrics.namespaceSelector [object] Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. + ## @param networkPolicy.metrics.podSelector [object] Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. + ## + metrics: + enabled: false + ## e.g: + ## podSelector: + ## label: monitoring + ## + podSelector: {} + ## e.g: + ## namespaceSelector: + ## label: monitoring + ## + namespaceSelector: {} + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled Enable ingress rule that makes primary mariadb nodes only accessible from a particular origin. + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the primary node. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the primary node. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules [object] Custom network policy for the primary node. + ## @param networkPolicy.ingressRules.secondaryAccessOnlyFrom.enabled Enable ingress rule that makes primary mariadb nodes only accessible from a particular origin. + ## @param networkPolicy.ingressRules.secondaryAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to acces the secondary nodes. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.secondaryAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the secondary nodes. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.secondaryAccessOnlyFrom.customRules [object] Custom network policy for the secondary nodes. + ## + ingressRules: + ## Allow access to the primary node only from the indicated: + primaryAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## customRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + + ## Allow access to the secondary node only from the indicated: + secondaryAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## CustomRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + + ## @param networkPolicy.egressRules.denyConnectionsToExternal Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). + ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule + ## + egressRules: + # Deny connections to external. This is not compatible with an external database. + denyConnectionsToExternal: false + ## Additional custom egress rules + ## e.g: + ## customRules: + ## - to: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} diff --git a/home/nextcloud/Chart.yaml b/home/nextcloud/Chart.yaml new file mode 100644 index 0000000000..ae1cfee164 --- /dev/null +++ b/home/nextcloud/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: nextcloud +version: 0.0.0 +dependencies: + - name: nextcloud + version: 3.2.0 + repository: https://nextcloud.github.io/helm/ diff --git a/home/nextcloud/values.yaml b/home/nextcloud/values.yaml new file mode 100644 index 0000000000..4c53aaf9be --- /dev/null +++ b/home/nextcloud/values.yaml @@ -0,0 +1,473 @@ +nextcloud: + + ## Official nextcloud image version + ## ref: https://hub.docker.com/r/library/nextcloud/tags/ + ## + image: + repository: nextcloud + tag: 25.0.0-fpm-alpine + pullPolicy: IfNotPresent + # pullSecrets: + # - myRegistrKeySecretName + + nameOverride: "" + fullnameOverride: "" + podAnnotations: {} + deploymentAnnotations: {} + + # Number of replicas to be deployed + replicaCount: 1 + + ## Allowing use of ingress controllers + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ + ## + ingress: + enabled: true + className: nginx + annotations: + external-dns.alpha.kubernetes.io/exclude: 'true' + nginx.ingress.kubernetes.io/proxy-body-size: 4G + cert-manager.io/cluster-issuer: letsencrypt-prod + nginx.ingress.kubernetes.io/server-snippet: |- + server_tokens off; + proxy_hide_header X-Powered-By; + + rewrite ^/.well-known/webfinger /public.php?service=webfinger last; + rewrite ^/.well-known/host-meta /public.php?service=host-meta last; + rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json; + location = /.well-known/carddav { + return 301 $scheme://$host/remote.php/dav; + } + location = /.well-known/caldav { + return 301 $scheme://$host/remote.php/dav; + } + location = /robots.txt { + allow all; + log_not_found off; + access_log off; + } + location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)/ { + deny all; + } + location ~ ^/(?:autotest|occ|issue|indie|db_|console) { + deny all; + } + tls: + - secretName: nextcloud-tls + hosts: + - nextcloud.jupiter.mein.nl + labels: {} + path: / + pathType: Prefix + + + # Allow configuration of lifecycle hooks + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/ + lifecycle: {} + # postStartCommand: [] + # preStopCommand: [] + + nextcloud: + host: nextcloud.jupiter.mein.nl + + ## Use an existing secret + existingSecret: + enabled: true + secretName: nextcloud-secret + # usernameKey: username + # passwordKey: password + # tokenKey: serverinfo_token + # smtpUsernameKey: smtp_username + # smtpPasswordKey: smtp_password + update: 0 + # If web server is not binding default port, you can define it + # containerPort: 8080 + datadir: /var/www/html/data + persistence: + subPath: + mail: + enabled: true + fromAddress: nextcloud + domain: jupiter.mein.nl + smtp: + host: smtp.sendgrid.net + secure: tls + port: 587 + authtype: LOGIN + # PHP Configuration files + # Will be injected in /usr/local/etc/php/conf.d for apache image and in /usr/local/etc/php-fpm.d when nginx.enabled: true + phpConfigs: {} + # Default config files + # IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself + # Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/16.0/apache/config + defaultConfigs: + # To protect /var/www/html/config + .htaccess: true + # Redis default configuration + redis.config.php: true + # Apache configuration for rewrite urls + apache-pretty-urls.config.php: true + # Define APCu as local cache + apcu.config.php: true + # Apps directory configs + apps.config.php: true + # Used for auto configure database + autoconfig.php: true + # SMTP default configuration + smtp.config.php: true + # Extra config files created in /var/www/html/config/ + # ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file + configs: + custom.config.php: |- + 'https', + 'overwrite.cli.url' => '__baseUrl__', + 'filelocking.enabled' => 'true', + 'loglevel' => '2', + 'enable_previews' => true, + 'trusted_domains' => array( + 0 => 'localhost', + 1 => '__baseUrl__' + ) + ); + + # redis.config.php: |- + # '\OC\Memcache\Redis', + # 'memcache.locking' => '\OC\Memcache\Redis', + # 'redis' => array( + # 'host' => getenv('REDIS_HOST'), + # 'port' => getenv('REDIS_HOST_PORT') ?: 6379, + # 'password' => getenv('REDIS_HOST_PASSWORD') + # ) + # ); + + # For example, to use S3 as primary storage + # ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3 + # + # configs: + # s3.config.php: |- + # array( + # 'class' => '\\OC\\Files\\ObjectStore\\S3', + # 'arguments' => array( + # 'bucket' => 'my-bucket', + # 'autocreate' => true, + # 'key' => 'xxx', + # 'secret' => 'xxx', + # 'region' => 'us-east-1', + # 'use_ssl' => true + # ) + # ) + # ); + + ## Strategy used to replace old pods + ## IMPORTANT: use with care, it is suggested to leave as that for upgrade purposes + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + strategy: + type: Recreate + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 1 + # maxUnavailable: 0 + + ## + ## Extra environment variables + extraEnv: + - name: REDIS_HOST + value: redis-master.redis.svc.cluster.local + + # Extra mounts for the pods. Example shown is for connecting a legacy NFS volume + # to NextCloud pods in Kubernetes. This can then be configured in External Storage + extraVolumes: + # - name: nfs + # nfs: + # server: "10.0.0.1" + # path: "/nextcloud_data" + # readOnly: false + extraVolumeMounts: + # - name: nfs + # mountPath: "/legacy_data" + + # Extra secuurityContext parameters. For example you may need to define runAsNonRoot directive + extraSecurityContext: + runAsUser: "82" + runAsGroup: "82" + runAsNonRoot: true + readOnlyRootFilesystem: true + + nginx: + ## You need to set an fpm version of the image for nextcloud if you want to use nginx! + enabled: true + image: + repository: nginx + tag: alpine + pullPolicy: IfNotPresent + + config: + # This generates the default nginx config as per the nextcloud documentation + default: true + # custom: |- + # worker_processes 1;.. + + resources: {} + + internalDatabase: + enabled: false + name: nextcloud + + ## + ## External database configuration + ## + externalDatabase: + enabled: true + + ## Supported database engines: mysql or postgresql + type: mysql + + ## Database host + host: mariadb.mariadb.svc.cluster.local + + ## Database name + database: nextcloud + + ## Use a existing secret + existingSecret: + enabled: true + secretName: nextcloud-db + # usernameKey: username + # passwordKey: password + + ## + ## MariaDB chart configuration + ## + mariadb: + ## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters + enabled: false + + auth: + database: nextcloud + username: nextcloud + password: changeme + + architecture: standalone + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + primary: + persistence: + enabled: false + # storageClass: "" + accessMode: ReadWriteOnce + size: 8Gi + + ## + ## PostgreSQL chart configuration + ## for more options see https://github.com/bitnami/charts/tree/master/bitnami/postgresql + ## + postgresql: + enabled: false + postgresqlUsername: nextcloud + postgresqlPassword: changeme + postgresqlDatabase: nextcloud + persistence: + enabled: false + # storageClass: "" + + ## + ## Redis chart configuration + ## for more options see https://github.com/bitnami/charts/tree/master/bitnami/redis + ## + + redis: + enabled: false + auth: + enabled: true + password: 'changeme' + + ## Cronjob to execute Nextcloud background tasks + ## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#webcron + ## + cronjob: + enabled: true + # Nexcloud image is used as default but only curl is needed + image: {} + # repository: nextcloud + # tag: 16.0.3-apache + # pullPolicy: IfNotPresent + # pullSecrets: + # - myRegistrKeySecretName + # Every 5 minutes + # Note: Setting this to any any other value than 5 minutes might + # cause issues with how nextcloud background jobs are executed + schedule: "*/5 * * * *" + annotations: {} + # Set curl's insecure option if you use e.g. self-signed certificates + curlInsecure: true + failedJobsHistoryLimit: 5 + successfulJobsHistoryLimit: 2 + # If not set, nextcloud deployment one will be set + # resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # If not set, nextcloud deployment one will be set + # nodeSelector: {} + + # If not set, nextcloud deployment one will be set + # tolerations: [] + + # If not set, nextcloud deployment one will be set + # affinity: {} + + service: + type: ClusterIP + port: 8080 + loadBalancerIP: nil + nodePort: nil + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + # Nextcloud Data (/var/www/html) + enabled: true + annotations: {} + ## nextcloud data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "longhorn" + + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + accessMode: ReadWriteOnce + size: 8Gi + + ## Use an additional pvc for the data directory rather than a subpath of the default PVC + ## Useful to store data on a different storageClass (e.g. on slower disks) + nextcloudData: + enabled: true + subPath: + annotations: {} + storageClass: "longhorn" + # existingClaim: + accessMode: ReadWriteOnce + size: 100Gi + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 30 + successThreshold: 1 + + + ## Enable pod autoscaling using HorizontalPodAutoscaler + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## + hpa: + enabled: false + cputhreshold: 60 + minPods: 1 + maxPods: 10 + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + + ## Prometheus Exporter / Metrics + ## + metrics: + enabled: false + + replicaCount: 1 + # The metrics exporter needs to know how you serve Nextcloud either http or https + https: false + # Use API token if set, otherwise fall back to password authentication + # https://github.com/xperimental/nextcloud-exporter#token-authentication + # Currently you still need to set the token manually in your nextcloud install + token: "" + timeout: 5s + + image: + repository: xperimental/nextcloud-exporter + tag: 0.6.0 + pullPolicy: IfNotPresent + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Metrics exporter pod Annotation and Labels + # podAnnotations: {} + + # podLabels: {} + + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9205" + labels: {} + + rbac: + enabled: false + serviceaccount: + create: true + name: nextcloud-serviceaccount diff --git a/home/pihole/Chart.yaml b/home/pihole/Chart.yaml new file mode 100644 index 0000000000..f4b346fe6b --- /dev/null +++ b/home/pihole/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: pihole +version: 0.0.0 +dependencies: + - name: pihole + version: 2.9.3 + repository: https://mojo2600.github.io/pihole-kubernetes/ \ No newline at end of file diff --git a/home/pihole/templates/pihole-password.yaml b/home/pihole/templates/pihole-password.yaml new file mode 100644 index 0000000000..f99aec52c9 --- /dev/null +++ b/home/pihole/templates/pihole-password.yaml @@ -0,0 +1,19 @@ +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: {{ .Values.pihole.admin.existingSecret }} + namespace: {{ .Release.Namespace }} +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault + target: + template: + engineVersion: v2 + data: + password: {{` "{{ .password }}" `}} + data: + - secretKey: password + remoteRef: + key: /pihole/admin + property: password diff --git a/home/pihole/values.yaml b/home/pihole/values.yaml new file mode 100644 index 0000000000..23eec81ba0 --- /dev/null +++ b/home/pihole/values.yaml @@ -0,0 +1,401 @@ +pihole: + # Default values for pihole. + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + + # -- The number of replicas + replicaCount: 1 + + # -- The `spec.strategyTpye` for updates + strategyType: RollingUpdate + + # -- The maximum number of Pods that can be created over the desired number of `ReplicaSet` during updating. + maxSurge: 1 + + # -- The maximum number of Pods that can be unavailable during updating + maxUnavailable: 1 + + image: + # -- the repostory to pull the image from + repository: "pihole/pihole" + # -- the docker tag, if left empty it will get it from the chart's appVersion + tag: "2022.10" + # -- the pull policy + pullPolicy: IfNotPresent + + dualStack: + # -- set this to true to enable creation of DualStack services or creation of separate IPv6 services if `serviceDns.type` is set to `"LoadBalancer"` + enabled: false + + dnsHostPort: + # -- set this to true to enable dnsHostPort + enabled: false + # -- default port for this pod + port: 53 + + # -- Configuration for the DNS service on port 53 + serviceDns: + + # -- deploys a mixed (TCP + UDP) Service instead of separate ones + mixedService: false + + # -- `spec.type` for the DNS Service + type: LoadBalancer + + # -- The port of the DNS service + port: 53 + + # -- Optional node port for the DNS service + nodePort: "" + + # -- `spec.externalTrafficPolicy` for the DHCP Service + externalTrafficPolicy: Local + + # -- A fixed `spec.loadBalancerIP` for the DNS Service + loadBalancerIP: "192.168.1.225" + # -- A fixed `spec.loadBalancerIP` for the IPv6 DNS Service + loadBalancerIPv6: "" + + # -- Annotations for the DNS service + annotations: + metallb.universe.tf/address-pool: network-services + metallb.universe.tf/allow-shared-ip: pihole-svc + + # -- Configuration for the DHCP service on port 67 + serviceDhcp: + + # -- Generate a Service resource for DHCP traffic + enabled: true + + # -- `spec.type` for the DHCP Service + type: NodePort + + # -- `spec.externalTrafficPolicy` for the DHCP Service + externalTrafficPolicy: Local + + # -- A fixed `spec.loadBalancerIP` for the DHCP Service + loadBalancerIP: "" + # -- A fixed `spec.loadBalancerIP` for the IPv6 DHCP Service + loadBalancerIPv6: "" + + # -- Annotations for the DHCP service + annotations: {} + # metallb.universe.tf/address-pool: network-services + # metallb.universe.tf/allow-shared-ip: pihole-svc + + # -- Configuration for the web interface service + serviceWeb: + # -- Configuration for the HTTP web interface listener + http: + + # -- Generate a service for HTTP traffic + enabled: true + + # -- The port of the web HTTP service + port: 80 + + # -- Configuration for the HTTPS web interface listener + https: + # -- Generate a service for HTTPS traffic + enabled: true + + # -- The port of the web HTTPS service + port: 443 + + # -- `spec.type` for the web interface Service + type: ClusterIP + + # -- `spec.externalTrafficPolicy` for the web interface Service + externalTrafficPolicy: Local + + # -- A fixed `spec.loadBalancerIP` for the web interface Service + loadBalancerIP: "" + # -- A fixed `spec.loadBalancerIP` for the IPv6 web interface Service + loadBalancerIPv6: "" + + # -- Annotations for the DHCP service + annotations: {} + # metallb.universe.tf/address-pool: network-services + # metallb.universe.tf/allow-shared-ip: pihole-svc + + virtualHost: pi.hole + + # -- Configuration for the Ingress + ingress: + enabled: true + ingressClassName: nginx + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + hosts: + - &host pihole.jupiter.mein.nl + path: / + tls: + - secretName: pihole-tls-certificate + hosts: + - *host + + # -- Probes configuration + probes: + # -- probes.liveness -- Configure the healthcheck for the ingress controller + liveness: + # -- Generate a liveness probe + enabled: true + initialDelaySeconds: 60 + failureThreshold: 10 + timeoutSeconds: 5 + readiness: + # -- Generate a readiness probe + enabled: true + initialDelaySeconds: 60 + failureThreshold: 3 + timeoutSeconds: 5 + + # -- We usually recommend not to specify default resources and to leave this as a conscious + # -- choice for the user. This also increases chances charts run on environments with little + # -- resources, such as Minikube. If you do want to specify resources, uncomment the following + # -- lines, adjust them as necessary, and remove the curly braces after 'resources:'. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # -- `spec.PersitentVolumeClaim` configuration + persistentVolumeClaim: + # -- set to true to use pvc + enabled: false + + # -- specify an existing `PersistentVolumeClaim` to use + # existingClaim: "" + + # -- Annotations for the `PersitentVolumeClaim` + annotations: {} + + accessModes: + - ReadWriteOnce + + size: "500Mi" + + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## If subPath is set mount a sub folder of a volume instead of the root of the volume. + ## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs). + + ## subPath: "pihole" + + nodeSelector: {} + + tolerations: [] + + # -- Specify a priorityClassName + # priorityClassName: "" + + # Reference: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + topologySpreadConstraints: [] + # - maxSkew: + # topologyKey: + # whenUnsatisfiable: + # labelSelector: + + affinity: {} + + # -- Administrator password when not using an existing secret (see below) + # adminPassword: "admin" + + # -- Use an existing secret for the admin password. + admin: + # -- Specify an existing secret to use as admin password + existingSecret: "pihole-password" + # -- Specify the key inside the secret to use + passwordKey: "password" + + # -- extraEnvironmentVars is a list of extra enviroment variables to set for pihole to use + extraEnvVars: + TZ: Europe/Amsterdam + + # -- extraEnvVarsSecret is a list of secrets to load in as environment variables. + extraEnvVarsSecret: {} + # env_var: + # name: secret-name + # key: secret-key + + # -- default upstream DNS 1 server to use + DNS1: "1.1.1.1" + # -- default upstream DNS 2 server to use + DNS2: "8.8.8.8" + + antiaff: + # -- set to true to enable antiaffinity (example: 2 pihole DNS in the same cluster) + enabled: false + # -- Here you can set the pihole release (you set in `helm install ...`) + # you want to avoid + avoidRelease: pihole1 + # -- Here you can choose between preferred or required + strict: true + + doh: + # -- set to true to enabled DNS over HTTPs via cloudflared + enabled: false + name: "cloudflared" + repository: "crazymax/cloudflared" + tag: latest + pullPolicy: IfNotPresent + # -- Here you can pass environment variables to the DoH container, for example: + envVars: {} + # TUNNEL_DNS_UPSTREAM: "https://1.1.1.2/dns-query,https://1.0.0.2/dns-query" + + # -- Probes configuration + probes: + # -- Configure the healthcheck for the doh container + liveness: + # -- set to true to enable liveness probe + enabled: true + # -- defines the initial delay for the liveness probe + initialDelaySeconds: 60 + # -- defines the failure threshold for the liveness probe + failureThreshold: 10 + # -- defines the timeout in secondes for the liveness probe + timeoutSeconds: 5 + + dnsmasq: + # -- Add upstream dns servers. All lines will be added to the pihole dnsmasq configuration + upstreamServers: [] + # - server=/foo.bar/192.168.178.10 + # - server=/bar.foo/192.168.178.11 + + # -- Add custom dns entries to override the dns resolution. All lines will be added to the pihole dnsmasq configuration. + customDnsEntries: [] + # - address=/foo.bar/192.168.178.10 + # - address=/bar.foo/192.168.178.11 + + # -- Dnsmasq reads the /etc/hosts file to resolve ips. You can add additional entries if you like + additionalHostsEntries: [] + # - 192.168.0.3 host4 + # - 192.168.0.4 host5 + + # -- Static DHCP config + staticDhcpEntries: [] + # staticDhcpEntries: + # - dhcp-host=MAC_ADDRESS,IP_ADDRESS,HOSTNAME + + # -- Other options + customSettings: + # otherSettings: + # - rebind-domain-ok=/plex.direct/ + + # -- Here we specify custom cname entries that should point to `A` records or + # elements in customDnsEntries array. + # The format should be: + # - cname=cname.foo.bar,foo.bar + # - cname=cname.bar.foo,bar.foo + # - cname=cname record,dns record + customCnameEntries: [] + # Here we specify custom cname entries that should point to `A` records or + # elements in customDnsEntries array. + # The format should be: + # - cname=cname.foo.bar,foo.bar + # - cname=cname.bar.foo,bar.foo + # - cname=cname record,dns record + + # -- list of adlists to import during initial start of the container + adlists: {} + # If you want to provide blocklists, add them here. + # - https://hosts-file.net/grm.txt + # - https://reddestdream.github.io/Projects/MinimalHosts/etc/MinimalHostsBlocker/minimalhosts + + # -- list of whitelisted domains to import during initial start of the container + whitelist: {} + # If you want to provide whitelisted domains, add them here. + # - clients4.google.com + + # -- list of blacklisted domains to import during initial start of the container + blacklist: {} + # If you want to have special domains blacklisted, add them here + # - *.blackist.com + + # -- list of blacklisted regex expressions to import during initial start of the container + regex: {} + # Add regular expression blacklist items + # - (^|\.)facebook\.com$ + + # -- values that should be added to pihole-FTL.conf + ftl: {} + # Add values for pihole-FTL.conf + # MAXDBDAYS: 14 + + # -- port the container should use to expose HTTP traffic + webHttp: "80" + + # -- port the container should use to expose HTTPS traffic + webHttps: "443" + + # -- hostname of pod + hostname: "" + + # -- should the container use host network + hostNetwork: "false" + + # -- should container run in privileged mode + privileged: "false" + + customVolumes: + # -- set this to true to enable custom volumes + enabled: false + # -- any volume type can be used here + config: {} + # hostPath: + # path: "/mnt/data" + + # -- any extra volumes you might want + extraVolumes: {} + # external-conf: + # configMap: + # name: pi-hole-lighttpd-external-conf + + # -- any extra volume mounts you might want + extraVolumeMounts: {} + # external-conf: + # mountPath: /etc/lighttpd/external.conf + # subPath: external.conf + + # -- Additional annotations for pods + podAnnotations: {} + # Example below allows Prometheus to scape on metric port (requires pihole-exporter sidecar enabled) + # prometheus.io/port: '9617' + # prometheus.io/scrape: 'true' + + monitoring: + # -- Preferably adding prometheus scrape annotations rather than enabling podMonitor. + podMonitor: + # -- set this to true to enable podMonitor + enabled: false + # -- Sidecar configuration + sidecar: + # -- set this to true to enable podMonitor as sidecar + enabled: false + port: 9617 + image: + repository: ekofr/pihole-exporter + tag: v0.3.0 + pullPolicy: IfNotPresent + resources: + limits: + memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + podDnsConfig: + enabled: true + policy: "None" + nameservers: + - 127.0.0.1 + - 8.8.8.8 diff --git a/home/plex/Chart.yaml b/home/plex/Chart.yaml new file mode 100644 index 0000000000..18b9c3552c --- /dev/null +++ b/home/plex/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: plex +version: 0.0.0 +dependencies: + - name: plex + version: 6.4.3 + repository: https://k8s-at-home.com/charts/ diff --git a/home/plex/values.yaml b/home/plex/values.yaml new file mode 100644 index 0000000000..23549d352d --- /dev/null +++ b/home/plex/values.yaml @@ -0,0 +1,124 @@ +plex: + # + # IMPORTANT NOTE + # + # This chart inherits from our common library chart. You can check the default values/options here: + # https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml + # + + image: + # -- image repository + repository: ghcr.io/k8s-at-home/plex + # @default -- chart.appVersion + tag: + # -- image pull policy + pullPolicy: IfNotPresent + + # -- environment variables. See [plex docs](https://support.plex.tv/articles/201105343-advanced-hidden-server-settings/) for more details. + # **NOTE:** Plex preference options are camelCase and CASE SENSITIVE! + # You can do horrible things to your Plex configuration if you are not careful + # @default -- See below + env: + # -- Set the container timezone + TZ: Amsterdam/Europe + # ADVERTISE_IP: + # ALLOWED_NETWORKS: + # PLEX_CLAIM: + # PLEX_PREFERENCE_1: "FriendlyName=plex-kubernetes" + # PLEX_PREFERENCE_2: "EnableIPv6=0" + # PLEX_PREFERENCE_3: "logDebug=0" + # PLEX_PREFERENCE_4: "DisableTLSv1_0=1" + # PLEX_PREFERENCE_5: "LanNetworksBandwidth=xxx.xxx.xxx.0/18\,xxx.xxx.xxx.0/24\,xxx.xxx.xxx.0/24" + # PLEX_PREFERENCE_6: "TranscoderQuality=2" + # PLEX_PREFERENCE_7: "TreatWanIpAsLocal=0" + # PLEX_PREFERENCE_8: "TranscoderH264BackgroundPreset=fast" + + # -- Configures service settings for the chart. + # @default -- See values.yaml + service: + main: + primary: true + ports: + http: + port: 32400 + dnla-tcp: + enabled: true + type: LoadBalancer + ports: + dnla-tcp: + enabled: true + port: 32469 + protocol: TCP + targetPort: 32469 + externalTrafficPolicy: Local + dnla-udp: + enabled: true + type: LoadBalancer + ports: + dnla-udp: + enabled: true + port: 1900 + protocol: TCP + targetPort: 1900 + externalTrafficPolicy: Local + + ingress: + # -- Enable and configure ingress settings for the chart under this key. + # @default -- See values.yaml + main: + enabled: true + ingressClassName: nginx + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + hosts: + - host: &host plex.jupiter.mein.nl + paths: + - path: / + pathType: Prefix + tls: + - secretName: plex-tls-certificate + hosts: + - *host + + # -- Enable devices to be discoverable + hostNetwork: false + + # -- Configure persistence settings for the chart under this key. + # @default -- See values.yaml + persistence: + config: + enabled: false + mountPath: /config + + transcode: + enabled: false + mountPath: /transcode + + # -- Configure the Security Context for the Pod + podSecurityContext: {} + # runAsUser: 568 + # runAsGroup: 568 + # fsGroup: 568 + # # Hardware acceleration using an Intel iGPU w/ QuickSync + # # These IDs below should be matched to your `video` and `render` group on the host + # # To obtain those IDs run the following grep statement on the host: + # # $ cat /etc/group | grep "video\|render" + # # video:x:44: + # # render:x:109: + # supplementalGroups: + # - 44 + # - 109 + + # -- Configure the resource requests and/or limits for the Pod + resources: {} + # requests: + # # Hardware acceleration using an Intel iGPU w/ QuickSync and + # # using intel-gpu-plugin (https://github.com/intel/intel-device-plugins-for-kubernetes) + # gpu.intel.com/i915: 1 + # cpu: 200m + # memory: 256Mi + # limits: + # # Hardware acceleration using an Intel iGPU w/ QuickSync and + # # using intel-gpu-plugin (https://github.com/intel/intel-device-plugins-for-kubernetes) + # gpu.intel.com/i915: 1 + # memory: 4096Mi \ No newline at end of file diff --git a/home/postgresql/Chart.yaml b/home/postgresql/Chart.yaml new file mode 100644 index 0000000000..be96dec01e --- /dev/null +++ b/home/postgresql/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: postgresql +version: 0.0.0 +dependencies: + - name: postgresql + version: 11.9.13 + repository: https://charts.bitnami.com/bitnami \ No newline at end of file diff --git a/home/postgresql/values.yaml b/home/postgresql/values.yaml new file mode 100644 index 0000000000..a56a99419b --- /dev/null +++ b/home/postgresql/values.yaml @@ -0,0 +1,1331 @@ +postgresql: + ## @section Global parameters + ## Please, note that this will override the parameters, including dependencies, configured to use the global value + ## + global: + ## @param global.imageRegistry Global Docker image registry + ## + imageRegistry: "" + ## @param global.imagePullSecrets Global Docker registry secret names as an array + ## e.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + ## @param global.storageClass Global StorageClass for Persistent Volume(s) + ## + storageClass: "" + postgresql: + ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`) + ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`) + ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`) + ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`) + ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`) + ## + auth: + postgresPassword: "" + username: "" + password: "" + database: "" + existingSecret: "" + ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) + ## + service: + ports: + postgresql: "" + + ## @section Common parameters + ## + + ## @param kubeVersion Override Kubernetes version + ## + kubeVersion: "" + ## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) + ## + nameOverride: "" + ## @param fullnameOverride String to fully override common.names.fullname template + ## + fullnameOverride: "" + ## @param clusterDomain Kubernetes Cluster Domain + ## + clusterDomain: cluster.local + ## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template) + ## + extraDeploy: [] + ## @param commonLabels Add labels to all the deployed resources + ## + commonLabels: {} + ## @param commonAnnotations Add annotations to all the deployed resources + ## + commonAnnotations: {} + ## Enable diagnostic mode in the statefulset + ## + diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + + ## @section PostgreSQL common parameters + ## + + ## Bitnami PostgreSQL image version + ## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ + ## @param image.registry PostgreSQL image registry + ## @param image.repository PostgreSQL image repository + ## @param image.tag PostgreSQL image tag (immutable tags are recommended) + ## @param image.pullPolicy PostgreSQL image pull policy + ## @param image.pullSecrets Specify image pull secrets + ## @param image.debug Specify if debug values should be set + ## + image: + registry: docker.io + repository: bitnami/postgresql + tag: 14.2.0-debian-10-r80 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false + ## Authentication parameters + ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run + ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run + ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run + ## + auth: + ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user + ## + enablePostgresUser: true + ## @param auth.postgresPassword Password for the "postgres" admin user + ## + postgresPassword: "" + ## @param auth.username Name for a custom user to create + ## + username: "" + ## @param auth.password Password for the custom user to create + ## + password: "" + ## @param auth.database Name for a custom database to create + ## + database: "" + ## @param auth.replicationUsername Name of the replication user + ## + replicationUsername: repl_user + ## @param auth.replicationPassword Password for the replication user + ## + replicationPassword: "" + ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials + ## `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret + ## The secret must contain the keys `postgres-password` (which is the password for "postgres" admin user), + ## `password` (which is the password for the custom user to create when `auth.username` is set), + ## and `replication-password` (which is the password for replication user). + ## The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and + ## picked from this secret in this case. + ## The value is evaluated as a template. + ## + existingSecret: "postgres-secret" + ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable + ## + usePasswordFiles: false + ## @param architecture PostgreSQL architecture (`standalone` or `replication`) + ## + architecture: standalone + ## Replication configuration + ## Ignored if `architecture` is `standalone` + ## + replication: + ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` + ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. + ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT + ## + synchronousCommit: "off" + numSynchronousReplicas: 0 + ## @param replication.applicationName Cluster application name. Useful for advanced replication settings + ## + applicationName: my_application + ## @param containerPorts.postgresql PostgreSQL container port + ## + containerPorts: + postgresql: 5432 + ## Audit settings + ## https://github.com/bitnami/bitnami-docker-postgresql#auditing + ## @param audit.logHostname Log client hostnames + ## @param audit.logConnections Add client log-in operations to the log file + ## @param audit.logDisconnections Add client log-outs operations to the log file + ## @param audit.pgAuditLog Add operations to log using the pgAudit extension + ## @param audit.pgAuditLogCatalog Log catalog using pgAudit + ## @param audit.clientMinMessages Message log level to share with the user + ## @param audit.logLinePrefix Template for log line prefix (default if not set) + ## @param audit.logTimezone Timezone for the log timestamps + ## + audit: + logHostname: false + logConnections: false + logDisconnections: false + pgAuditLog: "" + pgAuditLogCatalog: "off" + clientMinMessages: error + logLinePrefix: "" + logTimezone: "" + ## LDAP configuration + ## @param ldap.enabled Enable LDAP support + ## @param ldap.url LDAP URL beginning in the form `ldap[s]://host[:port]/basedn` + ## @param ldap.server IP address or name of the LDAP server. + ## @param ldap.port Port number on the LDAP server to connect to + ## @param ldap.prefix String to prepend to the user name when forming the DN to bind + ## @param ldap.suffix String to append to the user name when forming the DN to bind + ## @param ldap.baseDN Root DN to begin the search for the user in + ## @param ldap.bindDN DN of user to bind to LDAP + ## @param ldap.bind_password Password for the user to bind to LDAP + ## @param ldap.search_attr Attribute to match against the user name in the search + ## @param ldap.search_filter The search filter to use when doing search+bind authentication + ## @param ldap.scheme Set to `ldaps` to use LDAPS + ## @param ldap.tls Set to `1` to use TLS encryption + ## + ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: "" + search_attr: "" + search_filter: "" + scheme: "" + tls: "" + ## @param postgresqlDataDir PostgreSQL data dir folder + ## + postgresqlDataDir: /bitnami/postgresql/data + ## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list) + ## + postgresqlSharedPreloadLibraries: "pgaudit" + ## Start PostgreSQL pod(s) without limitations on shm memory. + ## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M` + ## ref: https://github.com/docker-library/postgres/issues/416 + ## ref: https://github.com/containerd/containerd/issues/3654 + ## + shmVolume: + ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) + ## + enabled: true + ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs + ## Note: the size of the tmpfs counts against container's memory limit + ## e.g: + ## sizeLimit: 1Gi + ## + sizeLimit: "" + ## TLS configuration + ## + tls: + ## @param tls.enabled Enable TLS traffic support + ## + enabled: false + ## @param tls.autoGenerated Generate automatically self-signed TLS certificates + ## + autoGenerated: false + ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's + ## + preferServerCiphers: true + ## @param tls.certificatesSecret Name of an existing secret that contains the certificates + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html + ## + certCAFilename: "" + ## @param tls.crlFilename File containing a Certificate Revocation List + ## + crlFilename: "" + + ## @section PostgreSQL Primary parameters + ## + primary: + ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap + ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html + ## + configuration: "" + ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration + ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html + ## e.g:# + ## pgHbaConfiguration: |- + ## local all all trust + ## host all all localhost trust + ## host mydatabase mysuser 192.168.0.0/24 md5 + ## + pgHbaConfiguration: "" + ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration + ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored + ## + existingConfigmap: "" + ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + extendedConfiguration: "" + ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration + ## NOTE: `primary.extendedConfiguration` will be ignored + ## + existingExtendedConfigmap: "" + ## Initdb configuration + ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#specifying-initdb-arguments + ## + initdb: + ## @param primary.initdb.args PostgreSQL initdb extra arguments + ## + args: "" + ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log + ## + postgresqlWalDir: "" + ## @param primary.initdb.scripts Dictionary of initdb scripts + ## Specify dictionary of scripts to be run at first boot + ## e.g: + ## scripts: + ## my_init_script.sh: | + ## #!/bin/sh + ## echo "Do something." + ## + scripts: {} + ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot + ## NOTE: This will override `primary.initdb.scripts` + ## + scriptsConfigMap: "" + ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information) + ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap` + ## + scriptsSecret: "" + ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts + ## + user: "" + ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts + ## + password: "" + ## Configure current cluster's primary server to be the standby server in other cluster. + ## This will allow cross cluster replication and provide cross cluster high availability. + ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. + ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not + ## @param primary.standby.primaryHost The Host of replication primary in the other cluster + ## @param primary.standby.primaryPort The Port of replication primary in the other cluster + ## + standby: + enabled: false + primaryHost: "" + primaryPort: "" + ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsSecret: "" + ## @param primary.command Override default container command (useful when using custom images) + ## + command: [] + ## @param primary.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param primary.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL Primary resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param primary.resources.limits The resources limits for the PostgreSQL Primary containers + ## @param primary.resources.requests.memory The requested memory for the PostgreSQL Primary containers + ## @param primary.resources.requests.cpu The requested cpu for the PostgreSQL Primary containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.podSecurityContext.enabled Enable security context + ## @param primary.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.containerSecurityContext.enabled Enable container security context + ## @param primary.containerSecurityContext.runAsUser User ID for the container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param primary.hostAliases PostgreSQL primary pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.labels Map of labels to add to the statefulset (postgresql primary) + ## + labels: {} + ## @param primary.annotations Annotations for PostgreSQL primary pods + ## + annotations: {} + ## @param primary.podLabels Map of labels to add to the pods (postgresql primary) + ## + podLabels: {} + ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary) + ## + podAnnotations: {} + ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param primary.affinity Affinity for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: {} + ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary) + ## + priorityClassName: "" + ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type + ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) + ## + extraVolumes: [] + ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) + ## + extraPodSpec: {} + ## PostgreSQL Primary service configuration + ## + service: + ## @param primary.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param primary.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param primary.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param primary.service.annotations Annotations for PostgreSQL primary service + ## + annotations: {} + ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service + ## + extraPorts: [] + ## PostgreSQL Primary persistence configuration + ## + persistence: + ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC + ## + enabled: true + ## @param primary.persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "" + ## @param primary.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param primary.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "longhorn" + ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param primary.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param primary.persistence.dataSource Custom PVC data source + ## + dataSource: {} + + ## @section PostgreSQL read only replica parameters + ## + readReplicas: + ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas + ## + replicaCount: 1 + ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsCM: "" + ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsSecret: "" + ## @param readReplicas.command Override default container command (useful when using custom images) + ## + command: [] + ## @param readReplicas.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers + ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers + ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers + ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe + ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL read only resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param readReplicas.resources.limits The resources limits for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.memory The requested memory for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.cpu The requested cpu for the PostgreSQL read only containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.podSecurityContext.enabled Enable security context + ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.containerSecurityContext.enabled Enable container security context + ## @param readReplicas.containerSecurityContext.runAsUser User ID for the container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only) + ## + labels: {} + ## @param readReplicas.annotations Annotations for PostgreSQL read only pods + ## + annotations: {} + ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only) + ## + podLabels: {} + ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only) + ## + podAnnotations: {} + ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL read only node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: {} + ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only) + ## + priorityClassName: "" + ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type + ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) + ## + extraVolumeMounts: [] + ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) + ## + extraVolumes: [] + ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s) + ## + extraPodSpec: {} + ## PostgreSQL read only service configuration + ## + service: + ## @param readReplicas.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param readReplicas.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service + ## + annotations: {} + ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service + ## + extraPorts: [] + ## PostgreSQL read only persistence configuration + ## + persistence: + ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC + ## + enabled: true + ## @param readReplicas.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param readReplicas.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param readReplicas.persistence.dataSource Custom PVC data source + ## + dataSource: {} + + ## @section NetworkPolicy parameters + + ## Add networkpolicies + ## + networkPolicy: + ## @param networkPolicy.enabled Enable network policies + ## + enabled: false + ## @param networkPolicy.metrics.enabled Enable network policies for metrics (prometheus) + ## @param networkPolicy.metrics.namespaceSelector [object] Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. + ## @param networkPolicy.metrics.podSelector [object] Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. + ## + metrics: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: monitoring + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: monitoring + ## + podSelector: {} + ## Ingress Rules + ## + ingressRules: + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL primary node. + ## + primaryAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## customRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL read-only nodes. + ## + readReplicasAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## CustomRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + ## @param networkPolicy.egressRules.denyConnectionsToExternal Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). + ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule + ## + egressRules: + # Deny connections to external. This is not compatible with an external database. + denyConnectionsToExternal: false + ## Additional custom egress rules + ## e.g: + ## customRules: + ## - to: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + + ## @section Volume Permissions parameters + + ## Init containers parameters: + ## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node + ## + volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r327 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + + ## @section Other Parameters + + ## Service account for PostgreSQL to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + ## Creates role for ServiceAccount + ## @param rbac.create Create Role and RoleBinding (required for PSP to work) + ## + rbac: + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] + ## Pod Security Policy + ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + psp: + create: false + + ## @section Metrics Parameters + + metrics: + ## @param metrics.enabled Start a prometheus exporter + ## + enabled: false + ## @param metrics.image.registry PostgreSQL Prometheus Exporter image registry + ## @param metrics.image.repository PostgreSQL Prometheus Exporter image repository + ## @param metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) + ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy + ## @param metrics.image.pullSecrets Specify image pull secrets + ## + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.10.1-debian-10-r14 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.customMetrics Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + ## customMetrics: + ## pg_database: + ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + ## metrics: + ## - name: + ## usage: "LABEL" + ## description: "Name of the database" + ## - size_bytes: + ## usage: "GAUGE" + ## description: "Size of the database in bytes" + ## + customMetrics: {} + ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + ## extraEnvVars: + ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + ## value: "true" + ## + extraEnvVars: [] + ## PostgreSQL Prometheus exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.containerSecurityContext.enabled Enable PostgreSQL Prometheus exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set PostgreSQL Prometheus exporter containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsNonRoot Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port + ## + containerPorts: + metrics: 9187 + ## PostgreSQL Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the PostgreSQL Prometheus exporter container + ## @param metrics.resources.requests The requested resources for the PostgreSQL Prometheus exporter container + ## + resources: + limits: {} + requests: {} + ## Service configuration + ## + service: + ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port + ## + ports: + metrics: 9187 + ## @param metrics.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + labels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "common.names.fullname" .) }}"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ include "common.names.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] + + postgresql: + image: + repository: bitnami/postgresql + tag: 14.5.0 + auth: + existingSecret: \ No newline at end of file diff --git a/home/redis/Chart.yaml b/home/redis/Chart.yaml new file mode 100644 index 0000000000..dd184e42af --- /dev/null +++ b/home/redis/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: redis +version: 0.0.0 +dependencies: + - name: redis + version: 17.3.7 + repository: https://charts.bitnami.com/bitnami \ No newline at end of file diff --git a/home/redis/values.yaml b/home/redis/values.yaml new file mode 100644 index 0000000000..35f998cd3e --- /dev/null +++ b/home/redis/values.yaml @@ -0,0 +1,1486 @@ +redis: + ## @section Global parameters + ## Global Docker image parameters + ## Please, note that this will override the image parameters, including dependencies, configured to use the global value + ## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + ## + + ## @param global.imageRegistry Global Docker image registry + ## @param global.imagePullSecrets Global Docker registry secret names as an array + ## @param global.storageClass Global StorageClass for Persistent Volume(s) + ## @param global.redis.password Global Redis™ password (overrides `auth.password`) + ## + global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + redis: + password: "" + + ## @section Common parameters + ## + + ## @param kubeVersion Override Kubernetes version + ## + kubeVersion: "" + ## @param nameOverride String to partially override common.names.fullname + ## + nameOverride: "" + ## @param fullnameOverride String to fully override common.names.fullname + ## + fullnameOverride: "" + ## @param commonLabels Labels to add to all deployed objects + ## + commonLabels: {} + ## @param commonAnnotations Annotations to add to all deployed objects + ## + commonAnnotations: {} + ## @param clusterDomain Kubernetes cluster domain name + ## + clusterDomain: cluster.local + ## @param extraDeploy Array of extra objects to deploy with the release + ## + extraDeploy: [] + + ## Enable diagnostic mode in the deployment + ## + diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + + ## @section Redis™ Image parameters + ## + + ## Bitnami Redis™ image + ## ref: https://hub.docker.com/r/bitnami/redis/tags/ + ## @param image.registry Redis™ image registry + ## @param image.repository Redis™ image repository + ## @param image.tag Redis™ image tag (immutable tags are recommended) + ## @param image.pullPolicy Redis™ image pull policy + ## @param image.pullSecrets Redis™ image pull secrets + ## @param image.debug Enable image debug mode + ## + image: + registry: docker.io + repository: bitnami/redis + tag: 6.2.6-debian-10-r120 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + + ## @section Redis™ common configuration parameters + ## https://github.com/bitnami/bitnami-docker-redis#configuration + ## + + ## @param architecture Redis™ architecture. Allowed values: `standalone` or `replication` + ## + architecture: replication + ## Redis™ Authentication parameters + ## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run + ## + auth: + ## @param auth.enabled Enable password authentication + ## + enabled: false + ## @param auth.sentinel Enable password authentication on sentinels too + ## + sentinel: true + ## @param auth.password Redis™ password + ## Defaults to a random 10-character alphanumeric string if not set + ## + password: "" + ## @param auth.existingSecret The name of an existing secret with Redis™ credentials + ## NOTE: When it's set, the previous `auth.password` parameter is ignored + ## + existingSecret: "" + ## @param auth.existingSecretPasswordKey Password key to be retrieved from existing secret + ## NOTE: ignored unless `auth.existingSecret` parameter is set + ## + existingSecretPasswordKey: "" + ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable + ## + usePasswordFiles: false + ## @param commonConfiguration [string] Common configuration to be added into the ConfigMap + ## ref: https://redis.io/topics/config + ## + commonConfiguration: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + ## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for Redis™ nodes + ## + existingConfigmap: "" + + ## @section Redis™ master configuration parameters + ## + + master: + ## @param master.configuration Configuration for Redis™ master nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param master.disableCommands Array with Redis™ commands to disable on master nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + ## @param master.command Override default container command (useful when using custom images) + ## + command: [] + ## @param master.args Override default container args (useful when using custom images) + ## + args: [] + ## @param master.preExecCmds Additional commands to run prior to starting Redis™ master + ## + preExecCmds: [] + ## @param master.extraFlags Array with additional command line flags for Redis™ master + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param master.extraEnvVars Array with extra environment variables to add to Redis™ master nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis™ master nodes + ## + extraEnvVarsCM: "" + ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis™ master nodes + ## + extraEnvVarsSecret: "" + ## @param master.containerPorts.redis Container port to open on Redis™ master nodes + ## + containerPorts: + redis: 6379 + ## Configure extra options for Redis™ containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param master.startupProbe.enabled Enable startupProbe on Redis™ master nodes + ## @param master.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param master.startupProbe.periodSeconds Period seconds for startupProbe + ## @param master.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param master.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param master.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.livenessProbe.enabled Enable livenessProbe on Redis™ master nodes + ## @param master.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param master.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param master.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param master.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param master.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.readinessProbe.enabled Enable readinessProbe on Redis™ master nodes + ## @param master.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param master.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param master.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param master.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param master.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param master.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param master.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param master.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis™ master resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param master.resources.limits The resources limits for the Redis™ master containers + ## @param master.resources.requests The requested resources for the Redis™ master containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.podSecurityContext.enabled Enabled Redis™ master pods' Security Context + ## @param master.podSecurityContext.fsGroup Set Redis™ master pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.containerSecurityContext.enabled Enabled Redis™ master containers' Security Context + ## @param master.containerSecurityContext.runAsUser Set Redis™ master containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param master.kind Use either Deployment or StatefulSet (default) + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ + ## + kind: StatefulSet + ## @param master.schedulerName Alternate scheduler for Redis™ master pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param master.updateStrategy.type Redis™ master statefulset strategy type + ## @skip master.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + rollingUpdate: {} + ## @param master.priorityClassName Redis™ master pods' priorityClassName + ## + priorityClassName: "" + ## @param master.hostAliases Redis™ master pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param master.podLabels Extra labels for Redis™ master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param master.podAnnotations Annotations for Redis™ master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param master.shareProcessNamespace Share a single process namespace between all of the containers in Redis™ master pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node master.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set + ## + key: "" + ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param master.affinity Affinity for Redis™ master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param master.nodeSelector Node labels for Redis™ master pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param master.tolerations Tolerations for Redis™ master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param master.topologySpreadConstraints Spread Constraints for Redis™ master pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: {} + ## @param master.lifecycleHooks for the Redis™ master container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param master.extraVolumes Optionally specify extra list of additional volumes for the Redis™ master pod(s) + ## + extraVolumes: [] + ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis™ master container(s) + ## + extraVolumeMounts: [] + ## @param master.sidecars Add additional sidecar containers to the Redis™ master pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param master.initContainers Add additional init containers to the Redis™ master pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence on Redis™ master nodes using Persistent Volume Claims + ## + enabled: true + ## @param master.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param master.persistence.path The path the volume will be mounted at on Redis™ master containers + ## NOTE: Useful when using different Redis™ images + ## + path: /data + ## @param master.persistence.subPath The subdirectory of the volume to mount on Redis™ master containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param master.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "longhorn" + ## @param master.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume size + ## + size: 8Gi + ## @param master.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param master.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param master.persistence.dataSource Custom PVC data source + dataSource: {} + ## @param master.persistence.existingClaim Use a existing PVC which must be created manually before bound + ## NOTE: requires master.persistence.enabled: true + ## + existingClaim: "" + ## Redis™ master service parameters + ## + service: + ## @param master.service.type Redis™ master service type + ## + type: ClusterIP + ## @param master.service.ports.redis Redis™ master service port + ## + ports: + redis: 6379 + ## @param master.service.nodePorts.redis Node port for Redis™ master + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + redis: "" + ## @param master.service.externalTrafficPolicy Redis™ master service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param master.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param master.service.clusterIP Redis™ master service Cluster IP + ## + clusterIP: "" + ## @param master.service.loadBalancerIP Redis™ master service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param master.service.loadBalancerSourceRanges Redis™ master service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param master.service.annotations Additional custom annotations for Redis™ master service + ## + annotations: {} + ## @param master.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-master pods + ## + terminationGracePeriodSeconds: 30 + + ## @section Redis™ replicas configuration parameters + ## + + replica: + ## @param replica.replicaCount Number of Redis™ replicas to deploy + ## + replicaCount: 3 + ## @param replica.configuration Configuration for Redis™ replicas nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param replica.disableCommands Array with Redis™ commands to disable on replicas nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + ## @param replica.command Override default container command (useful when using custom images) + ## + command: [] + ## @param replica.args Override default container args (useful when using custom images) + ## + args: [] + ## @param replica.preExecCmds Additional commands to run prior to starting Redis™ replicas + ## + preExecCmds: [] + ## @param replica.extraFlags Array with additional command line flags for Redis™ replicas + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param replica.extraEnvVars Array with extra environment variables to add to Redis™ replicas nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param replica.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis™ replicas nodes + ## + extraEnvVarsCM: "" + ## @param replica.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis™ replicas nodes + ## + extraEnvVarsSecret: "" + ## @param replica.externalMaster.enabled Use external master for bootstrapping + ## @param replica.externalMaster.host External master host to bootstrap from + ## @param replica.externalMaster.port Port for Redis service external master host + ## + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param replica.containerPorts.redis Container port to open on Redis™ replicas nodes + ## + containerPorts: + redis: 6379 + ## Configure extra options for Redis™ containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param replica.startupProbe.enabled Enable startupProbe on Redis™ replicas nodes + ## @param replica.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param replica.startupProbe.periodSeconds Period seconds for startupProbe + ## @param replica.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param replica.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param replica.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.livenessProbe.enabled Enable livenessProbe on Redis™ replicas nodes + ## @param replica.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param replica.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param replica.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param replica.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param replica.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.readinessProbe.enabled Enable readinessProbe on Redis™ replicas nodes + ## @param replica.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param replica.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param replica.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param replica.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param replica.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param replica.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param replica.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis™ replicas resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param replica.resources.limits The resources limits for the Redis™ replicas containers + ## @param replica.resources.requests The requested resources for the Redis™ replicas containers + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: {} + # cpu: 250m + # memory: 256Mi + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.podSecurityContext.enabled Enabled Redis™ replicas pods' Security Context + ## @param replica.podSecurityContext.fsGroup Set Redis™ replicas pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.containerSecurityContext.enabled Enabled Redis™ replicas containers' Security Context + ## @param replica.containerSecurityContext.runAsUser Set Redis™ replicas containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param replica.schedulerName Alternate scheduler for Redis™ replicas pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param replica.updateStrategy.type Redis™ replicas statefulset strategy type + ## @skip replica.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + rollingUpdate: {} + ## @param replica.priorityClassName Redis™ replicas pods' priorityClassName + ## + priorityClassName: "" + ## @param replica.podManagementPolicy podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "" + ## @param replica.hostAliases Redis™ replicas pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param replica.podLabels Extra labels for Redis™ replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param replica.podAnnotations Annotations for Redis™ replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param replica.shareProcessNamespace Share a single process namespace between all of the containers in Redis™ replicas pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param replica.podAffinityPreset Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param replica.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param replica.nodeAffinityPreset.type Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param replica.nodeAffinityPreset.key Node label key to match. Ignored if `replica.affinity` is set + ## + key: "" + ## @param replica.nodeAffinityPreset.values Node label values to match. Ignored if `replica.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param replica.affinity Affinity for Redis™ replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `replica.podAffinityPreset`, `replica.podAntiAffinityPreset`, and `replica.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param replica.nodeSelector Node labels for Redis™ replicas pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param replica.tolerations Tolerations for Redis™ replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param replica.topologySpreadConstraints Spread Constraints for Redis™ replicas pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: {} + ## @param replica.lifecycleHooks for the Redis™ replica container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param replica.extraVolumes Optionally specify extra list of additional volumes for the Redis™ replicas pod(s) + ## + extraVolumes: [] + ## @param replica.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis™ replicas container(s) + ## + extraVolumeMounts: [] + ## @param replica.sidecars Add additional sidecar containers to the Redis™ replicas pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param replica.initContainers Add additional init containers to the Redis™ replicas pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence Parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param replica.persistence.enabled Enable persistence on Redis™ replicas nodes using Persistent Volume Claims + ## + enabled: true + ## @param replica.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param replica.persistence.path The path the volume will be mounted at on Redis™ replicas containers + ## NOTE: Useful when using different Redis™ images + ## + path: /data + ## @param replica.persistence.subPath The subdirectory of the volume to mount on Redis™ replicas containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param replica.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param replica.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param replica.persistence.size Persistent Volume size + ## + size: 8Gi + ## @param replica.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param replica.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param replica.persistence.dataSource Custom PVC data source + dataSource: {} + ## Redis™ replicas service parameters + ## + service: + ## @param replica.service.type Redis™ replicas service type + ## + type: ClusterIP + ## @param replica.service.ports.redis Redis™ replicas service port + ## + ports: + redis: 6379 + ## @param replica.service.nodePorts.redis Node port for Redis™ replicas + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + redis: "" + ## @param replica.service.externalTrafficPolicy Redis™ replicas service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param replica.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param replica.service.clusterIP Redis™ replicas service Cluster IP + ## + clusterIP: "" + ## @param replica.service.loadBalancerIP Redis™ replicas service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param replica.service.loadBalancerSourceRanges Redis™ replicas service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param replica.service.annotations Additional custom annotations for Redis™ replicas service + ## + annotations: {} + ## @param replica.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-replicas pods + ## + terminationGracePeriodSeconds: 30 + ## Autoscaling configuration + ## + autoscaling: + ## @param replica.autoscaling.enabled Enable replica autoscaling settings + ## + enabled: false + ## @param replica.autoscaling.minReplicas Minimum replicas for the pod autoscaling + ## + minReplicas: 1 + ## @param replica.autoscaling.maxReplicas Maximum replicas for the pod autoscaling + ## + maxReplicas: 11 + ## @param replica.autoscaling.targetCPU Percentage of CPU to consider when autoscaling + ## + targetCPU: "" + ## @param replica.autoscaling.targetMemory Percentage of Memory to consider when autoscaling + ## + targetMemory: "" + + ## @section Redis™ Sentinel configuration parameters + ## + + sentinel: + ## @param sentinel.enabled Use Redis™ Sentinel on Redis™ pods. + ## IMPORTANT: this will disable the master and replicas services and + ## create a single Redis™ service exposing both the Redis and Sentinel ports + ## + enabled: false + ## Bitnami Redis™ Sentinel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## @param sentinel.image.registry Redis™ Sentinel image registry + ## @param sentinel.image.repository Redis™ Sentinel image repository + ## @param sentinel.image.tag Redis™ Sentinel image tag (immutable tags are recommended) + ## @param sentinel.image.pullPolicy Redis™ Sentinel image pull policy + ## @param sentinel.image.pullSecrets Redis™ Sentinel image pull secrets + ## @param sentinel.image.debug Enable image debug mode + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + tag: 6.2.6-debian-10-r118 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + ## @param sentinel.masterSet Master set name + ## + masterSet: mymaster + ## @param sentinel.quorum Sentinel Quorum + ## + quorum: 2 + ## @param sentinel.automateClusterRecovery Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. + ## This also prevents any new replica from starting until the last remaining replica is elected as master to guarantee that it is the one to be elected by Sentinel, and not a newly started replica with no data. + ## NOTE: This feature requires a "downAfterMilliseconds" value less or equal to 2000. + ## + automateClusterRecovery: false + ## Sentinel timing restrictions + ## @param sentinel.downAfterMilliseconds Timeout for detecting a Redis™ node is down + ## @param sentinel.failoverTimeout Timeout for performing a election failover + ## + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + ## @param sentinel.parallelSyncs Number of replicas that can be reconfigured in parallel to use the new master after a failover + ## + parallelSyncs: 1 + ## @param sentinel.configuration Configuration for Redis™ Sentinel nodes + ## ref: https://redis.io/topics/sentinel + ## + configuration: "" + ## @param sentinel.command Override default container command (useful when using custom images) + ## + command: [] + ## @param sentinel.args Override default container args (useful when using custom images) + ## + args: [] + ## @param sentinel.preExecCmds Additional commands to run prior to starting Redis™ Sentinel + ## + preExecCmds: [] + ## @param sentinel.extraEnvVars Array with extra environment variables to add to Redis™ Sentinel nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param sentinel.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis™ Sentinel nodes + ## + extraEnvVarsCM: "" + ## @param sentinel.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis™ Sentinel nodes + ## + extraEnvVarsSecret: "" + ## @param sentinel.externalMaster.enabled Use external master for bootstrapping + ## @param sentinel.externalMaster.host External master host to bootstrap from + ## @param sentinel.externalMaster.port Port for Redis service external master host + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param sentinel.containerPorts.sentinel Container port to open on Redis™ Sentinel nodes + ## + containerPorts: + sentinel: 26379 + ## Configure extra options for Redis™ containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param sentinel.startupProbe.enabled Enable startupProbe on Redis™ Sentinel nodes + ## @param sentinel.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param sentinel.startupProbe.periodSeconds Period seconds for startupProbe + ## @param sentinel.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param sentinel.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param sentinel.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param sentinel.livenessProbe.enabled Enable livenessProbe on Redis™ Sentinel nodes + ## @param sentinel.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param sentinel.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param sentinel.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param sentinel.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param sentinel.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis™ Sentinel nodes + ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param sentinel.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param sentinel.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param sentinel.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param sentinel.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param sentinel.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis™ Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the Redis™ Sentinel containers + ## @param sentinel.resources.requests The requested resources for the Redis™ Sentinel containers + ## + resources: + limits: {} + requests: {} + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param sentinel.containerSecurityContext.enabled Enabled Redis™ Sentinel containers' Security Context + ## @param sentinel.containerSecurityContext.runAsUser Set Redis™ Sentinel containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param sentinel.lifecycleHooks for the Redis™ sentinel container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param sentinel.extraVolumes Optionally specify extra list of additional volumes for the Redis™ Sentinel + ## + extraVolumes: [] + ## @param sentinel.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis™ Sentinel container(s) + ## + extraVolumeMounts: [] + ## Redis™ Sentinel service parameters + ## + service: + ## @param sentinel.service.type Redis™ Sentinel service type + ## + type: ClusterIP + ## @param sentinel.service.ports.redis Redis™ service port for Redis™ + ## @param sentinel.service.ports.sentinel Redis™ service port for Redis™ Sentinel + ports: + redis: 6379 + sentinel: 26379 + ## @param sentinel.service.nodePorts.redis Node port for Redis™ + ## @param sentinel.service.nodePorts.sentinel Node port for Sentinel + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## NOTE: By leaving these values blank, they will be generated by ports-configmap + ## If setting manually, please leave at least replica.replicaCount + 1 in between sentinel.service.nodePorts.redis and sentinel.service.nodePorts.sentinel to take into account the ports that will be created while incrementing that base port + ## + nodePorts: + redis: "" + sentinel: "" + ## @param sentinel.service.externalTrafficPolicy Redis™ Sentinel service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param sentinel.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param sentinel.service.clusterIP Redis™ Sentinel service Cluster IP + ## + clusterIP: "" + ## @param sentinel.service.loadBalancerIP Redis™ Sentinel service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param sentinel.service.loadBalancerSourceRanges Redis™ Sentinel service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param sentinel.service.annotations Additional custom annotations for Redis™ Sentinel service + ## + annotations: {} + ## @param sentinel.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-node pods + ## + terminationGracePeriodSeconds: 30 + + ## @section Other Parameters + ## + + ## Network Policy configuration + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the ports + ## Redis™ is listening on. When true, Redis™ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.extraIngress Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param networkPolicy.extraEgress Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressNSMatchLabels Labels to match to allow traffic from other namespaces + ## @param networkPolicy.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + ## PodSecurityPolicy configuration + ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + create: false + ## @param podSecurityPolicy.enabled Enable PodSecurityPolicy's RBAC rules + ## + enabled: false + ## RBAC configuration + ## + rbac: + ## @param rbac.create Specifies whether RBAC resources should be created + ## + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] + ## ServiceAccount configuration + ## + serviceAccount: + ## @param serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + ## Redis™ Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + ## @param pdb.create Specifies whether a PodDisruptionBudget should be created + ## + create: false + ## @param pdb.minAvailable Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: "" + ## TLS configuration + ## + tls: + ## @param tls.enabled Enable TLS traffic + ## + enabled: false + ## @param tls.authClients Require clients to authenticate + ## + authClients: true + ## @param tls.autoGenerated Enable autogenerated certificates + ## + autoGenerated: false + ## @param tls.existingSecret The name of the existing secret that contains the TLS certificates + ## + existingSecret: "" + ## @param tls.certificatesSecret DEPRECATED. Use existingSecret instead. + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate Key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## + certCAFilename: "" + ## @param tls.dhParamsFilename File containing DH params (in order to support DH based ciphers) + ## + dhParamsFilename: "" + + ## @section Metrics Parameters + ## + + metrics: + ## @param metrics.enabled Start a sidecar prometheus exporter to expose Redis™ metrics + ## + enabled: false + ## Bitnami Redis™ Exporter image + ## ref: https://hub.docker.com/r/bitnami/redis-exporter/tags/ + ## @param metrics.image.registry Redis™ Exporter image registry + ## @param metrics.image.repository Redis™ Exporter image repository + ## @param metrics.image.tag Redis™ Redis™ Exporter image tag (immutable tags are recommended) + ## @param metrics.image.pullPolicy Redis™ Exporter image pull policy + ## @param metrics.image.pullSecrets Redis™ Exporter image pull secrets + ## + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.39.0-debian-10-r2 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.command Override default metrics container init command (useful when using custom images) + ## + command: [] + ## @param metrics.redisTargetHost A way to specify an alternative Redis™ hostname + ## Useful for certificate CN/SAN matching + ## + redisTargetHost: "localhost" + ## @param metrics.extraArgs Extra arguments for Redis™ exporter, for example: + ## e.g.: + ## extraArgs: + ## check-keys: myKey,myOtherKey + ## + extraArgs: {} + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.containerSecurityContext.enabled Enabled Redis™ exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set Redis™ exporter containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the Redis™ metrics sidecar + ## + extraVolumes: [] + ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis™ metrics sidecar + ## + extraVolumeMounts: [] + ## Redis™ exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the Redis™ exporter container + ## @param metrics.resources.requests The requested resources for the Redis™ exporter container + ## + resources: + limits: {} + requests: {} + ## @param metrics.podLabels Extra labels for Redis™ exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param metrics.podAnnotations [object] Annotations for Redis™ exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + ## Redis™ exporter service parameters + ## + service: + ## @param metrics.service.type Redis™ exporter service type + ## + type: ClusterIP + ## @param metrics.service.port Redis™ exporter service port + ## + port: 9121 + ## @param metrics.service.externalTrafficPolicy Redis™ exporter service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param metrics.service.loadBalancerIP Redis™ exporter service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param metrics.service.loadBalancerSourceRanges Redis™ exporter service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param metrics.service.annotations Additional custom annotations for Redis™ exporter service + ## + annotations: {} + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. + ## + relabellings: [] + ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + ## + additionalLabels: {} + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.prometheusRule.namespace The namespace in which the prometheusRule will be created + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Custom Prometheus rules + ## e.g: + ## rules: + ## - alert: RedisDown + ## expr: redis_up{service="{{ template "common.names.fullname" . }}-metrics"} == 0 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis™ instance {{ "{{ $labels.instance }}" }} down + ## description: Redis™ instance {{ "{{ $labels.instance }}" }} is down + ## - alert: RedisMemoryHigh + ## expr: > + ## redis_memory_used_bytes{service="{{ template "common.names.fullname" . }}-metrics"} * 100 + ## / + ## redis_memory_max_bytes{service="{{ template "common.names.fullname" . }}-metrics"} + ## > 90 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis™ instance {{ "{{ $labels.instance }}" }} is using too much memory + ## description: | + ## Redis™ instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + ## - alert: RedisKeyEviction + ## expr: | + ## increase(redis_evicted_keys_total{service="{{ template "common.names.fullname" . }}-metrics"}[5m]) > 0 + ## for: 1s + ## labels: + ## severity: error + ## annotations: + ## summary: Redis™ instance {{ "{{ $labels.instance }}" }} has evicted keys + ## description: | + ## Redis™ instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + ## + rules: [] + + ## @section Init Container Parameters + ## + + ## 'volumePermissions' init container parameters + ## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values + ## based on the *podSecurityContext/*containerSecurityContext parameters + ## + volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` + ## + enabled: false + ## Bitnami Shell image + ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/ + ## @param volumePermissions.image.registry Bitnami Shell image registry + ## @param volumePermissions.image.repository Bitnami Shell image repository + ## @param volumePermissions.image.tag Bitnami Shell image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Bitnami Shell image pull policy + ## @param volumePermissions.image.pullSecrets Bitnami Shell image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r329 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits The resources limits for the init container + ## @param volumePermissions.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + ## Init container Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser + ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the + ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) + ## + containerSecurityContext: + runAsUser: 0 + + ## init-sysctl container parameters + ## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) + ## + sysctl: + ## @param sysctl.enabled Enable init container to modify Kernel settings + ## + enabled: false + ## Bitnami Shell image + ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/ + ## @param sysctl.image.registry Bitnami Shell image registry + ## @param sysctl.image.repository Bitnami Shell image repository + ## @param sysctl.image.tag Bitnami Shell image tag (immutable tags are recommended) + ## @param sysctl.image.pullPolicy Bitnami Shell image pull policy + ## @param sysctl.image.pullSecrets Bitnami Shell image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r329 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param sysctl.command Override default init-sysctl container command (useful when using custom images) + ## + command: [] + ## @param sysctl.mountHostSys Mount the host `/sys` folder to `/host-sys` + ## + mountHostSys: false + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sysctl.resources.limits The resources limits for the init container + ## @param sysctl.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + + ## @section useExternalDNS Parameters + ## + ## @param useExternalDNS.enabled Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. + ## @param useExternalDNS.additionalAnnotations Extra annotations to be utilized when `external-dns` is enabled. + ## @param useExternalDNS.annotationKey The annotation key utilized when `external-dns` is enabled. + ## @param useExternalDNS.suffix The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. + ## + useExternalDNS: + enabled: false + suffix: "" + annotationKey: external-dns.alpha.kubernetes.io/ + additionalAnnotations: {} \ No newline at end of file diff --git a/metal/Makefile b/metal/Makefile index 947aba7ed9..d5aef7d086 100644 --- a/metal/Makefile +++ b/metal/Makefile @@ -2,15 +2,19 @@ env ?= "prod" -default: boot cluster +default: boot os cluster -~/.ssh/id_ed25519: - ssh-keygen -t ed25519 -P '' -f "$@" +# ~/.ssh/id_ed25519: +# ssh-keygen -t ed25519 -f "$@" -boot: ~/.ssh/id_ed25519 - ansible-playbook \ - --inventory inventories/${env}.yml \ - boot.yml +# boot: ~/.ssh/id_ed25519 +# ansible-playbook \ +# --inventory inventories/${env}.yml \ +# boot.yml + +# picm: ansible-playbook \ +# --inventory inventories/${env}.yml \ +# cluster.yml cluster: ansible-playbook \ diff --git a/metal/TODO.md b/metal/TODO.md new file mode 100644 index 0000000000..6b5b7b7bf3 --- /dev/null +++ b/metal/TODO.md @@ -0,0 +1,16 @@ +TODO Metal + +Europa +[ ] Auto install ubuntu +[ ] Disable GUI +[ ] Make ssh root posible +[ ] Install basic package +[ ] Disable GUI +[ ] Mount script for fstab +[ ] Zpool init for host +[ ] Set hostname +[ ] Install iscsi + +PI's +[x] Set hostname +[ ] Log2ram diff --git a/metal/ansible.cfg b/metal/ansible.cfg index 080a58c076..1695510ead 100644 --- a/metal/ansible.cfg +++ b/metal/ansible.cfg @@ -3,3 +3,10 @@ host_key_checking=false stdout_callback=debug stderr_callback=debug force_color=true + +[persistent_connection] +command_timeout=600 + +[paramiko_connection] +host_key_auto_add = True +look_for_keys = False diff --git a/metal/cluster.yml b/metal/cluster.yml index ef7e3f6bfe..1aaacbe242 100644 --- a/metal/cluster.yml +++ b/metal/cluster.yml @@ -2,7 +2,6 @@ hosts: metal roles: - k3s - - automatic_upgrade - name: Create some basic config hosts: localhost diff --git a/metal/group_vars/all.yml b/metal/group_vars/all.yml index 39fb773776..8ba4fb4ab5 100644 --- a/metal/group_vars/all.yml +++ b/metal/group_vars/all.yml @@ -2,3 +2,4 @@ ansible_user: root ansible_ssh_private_key_file: ~/.ssh/id_ed25519 ssh_public_key: "{{ lookup('file', '~/.ssh/id_ed25519.pub') }}" dns_server: "8.8.8.8" +ansible_python_interpreter: "/usr/bin/python3" diff --git a/metal/inventories/prod.yml b/metal/inventories/prod.yml index 7f6474ab77..f4e5d6ba4c 100644 --- a/metal/inventories/prod.yml +++ b/metal/inventories/prod.yml @@ -2,9 +2,30 @@ metal: children: masters: hosts: - metal0: {ansible_host: 192.168.1.110, mac: '00:23:24:d1:f3:f0', disk: sda, network_interface: eno1} - metal1: {ansible_host: 192.168.1.111, mac: '00:23:24:d1:f4:d6', disk: sda, network_interface: eno1} - metal2: {ansible_host: 192.168.1.112, mac: '00:23:24:e7:04:60', disk: sda, network_interface: eno1} + europa: + { + ansible_host: 192.168.1.8, + mac: "1c:69:7a:ac:21:f2", + disk: nvme0n1, + network_interface: enp89s0, + } + io-1: { ansible_host: 192.168.1.198, mac: "52:54:00:76:48:d4" } + io-2: { ansible_host: 192.168.1.199, mac: "52:54:00:19:70:a2" } workers: hosts: - metal3: {ansible_host: 192.168.1.113, mac: '00:23:24:d1:f5:69', disk: sda, network_interface: eno1} + leda-1: { ansible_host: 192.168.1.191, mac: "b8:27:eb:f0:aa:b3" } + ersa-2: { ansible_host: 192.168.1.192, mac: "b8:27:eb:f7:3c:52" } + himalia-3: { ansible_host: 192.168.1.193, mac: "b8:27:eb:d9:0e:4c" } + pandi-4: { ansible_host: 192.168.1.194, mac: "b8:27:eb:de:b3:0d" } + lysithe-5: { ansible_host: 192.168.1.195, mac: "b8:27:eb:60:39:5c" } + elara-6: { ansible_host: 192.168.1.196, mac: "b8:27:eb:94:af:0a" } + dia-7: { ansible_host: 192.168.1.197, mac: "b8:27:eb:3d:8d:16" } + picm3: + hosts: + leda-1: { ansible_host: 192.168.1.191, mac: "b8:27:eb:f0:aa:b3" } + ersa-2: { ansible_host: 192.168.1.192, mac: "b8:27:eb:f7:3c:52" } + himalia-3: { ansible_host: 192.168.1.193, mac: "b8:27:eb:d9:0e:4c" } + pandi-4: { ansible_host: 192.168.1.194, mac: "b8:27:eb:de:b3:0d" } + lysithe-5: { ansible_host: 192.168.1.195, mac: "b8:27:eb:60:39:5c" } + elara-6: { ansible_host: 192.168.1.196, mac: "b8:27:eb:94:af:0a" } + dia-7: { ansible_host: 192.168.1.197, mac: "b8:27:eb:3d:8d:16" } diff --git a/metal/k3d-dev.yaml b/metal/k3d-dev.yaml index e1c00a5720..e4aa36ba3d 100644 --- a/metal/k3d-dev.yaml +++ b/metal/k3d-dev.yaml @@ -2,9 +2,9 @@ apiVersion: k3d.io/v1alpha4 kind: Simple metadata: name: homelab-dev -image: docker.io/rancher/k3s:v1.23.4-k3s1 -servers: 1 -agents: 0 +image: docker.io/rancher/k3s:v1.23.6-k3s1 +servers: 3 +agents: 7 options: k3s: extraArgs: diff --git a/metal/picm3.yml b/metal/picm3.yml new file mode 100644 index 0000000000..612ed63d7a --- /dev/null +++ b/metal/picm3.yml @@ -0,0 +1,102 @@ +# - name: Stage one +# hosts: +# - picm3 +# become: yes +# gather_facts: no + +# vars: +# ansible_user: pi +# ansible_ssh_pass: raspberry +# timezone: Europe/Amsterdam + +# vars_prompt: +# - name: "password" +# prompt: "Enter password for root user" +# private: yes + +# tasks: +# - name: Ping all hosts +# ping: + +# - name: Print +# debug: +# msg: "All hosts are up" + +# - name: Set hostname +# hostname: +# name: "{{ inventory_hostname }}" + +# - name: Set the timezone +# file: +# src: /usr/share/zoneinfo/{{ timezone }} +# dest: /etc/localtime +# state: link + +# - name: Set authorized_key for root user +# authorized_key: +# user: root +# state: present +# key: "{{ lookup('file', '~/.ssh/id_ed25519.pub') }}" + +# - name: Set the password for root +# ansible.builtin.user: +# name: root +# state: present +# password: "{{ password | password_hash('sha512') }}" + +# - name: Enable root login +# lineinfile: +# path: /etc/ssh/sshd_config +# regexp: "^PermitRootLogin" +# line: "PermitRootLogin yes" +# state: present + +# - name: Restart ssh daemon +# service: +# name: sshd +# state: restarted + +# - name: Disable the default user 'pi' +# user: +# name: pi +# state: present +# password_lock: true +# shell: "/sbin/nologin" + +# - name: Reboot host and wait for it to restart +# reboot: +# msg: "Reboot initiated by Ansible" +# connect_timeout: 5 +# reboot_timeout: 600 +# pre_reboot_delay: 0 +# post_reboot_delay: 30 +# test_command: whoami + +- name: Stage two + hosts: + - picm3 + become: yes + gather_facts: no + + vars: + timezone: Europe/Amsterdam + + tasks: + - name: Install open-iscsi for longhorn + ansible.builtin.apt: + name: "open-iscsi" + state: latest + update_cache: true + + - name: Enable container features + replace: + path: /boot/cmdline.txt + regexp: '^([\w](?!.*\b{{ item }}\b).*)$' + replace: '\1 {{ item }}' + with_items: + - "cgroup_enable=cpuset" + - "cgroup_memory=1" + - "cgroup_enable=memory" + + - name: Reboot + ansible.builtin.reboot: diff --git a/metal/roles/k3s/defaults/main.yml b/metal/roles/k3s/defaults/main.yml index c408f06d37..07eb1a1a37 100644 --- a/metal/roles/k3s/defaults/main.yml +++ b/metal/roles/k3s/defaults/main.yml @@ -9,3 +9,10 @@ k3s_server_config: - traefik disable-cloud-controller: true secrets-encryption: true + node-label: + - "storage=longhorn" + +k3s_pi_config: + node-label: + - "machine=pi" + - "pi-class=cm3" diff --git a/metal/roles/k3s/tasks/main.yml b/metal/roles/k3s/tasks/main.yml index 5e53b708f3..02fd5948a5 100644 --- a/metal/roles/k3s/tasks/main.yml +++ b/metal/roles/k3s/tasks/main.yml @@ -1,14 +1,31 @@ -- name: Download k3s binary +- name: Get DEB architecture + shell: dpkg --print-architecture + register: deb_architecture + +- name: Download k3s binary (amd64) + when: deb_architecture.stdout == 'amd64' ansible.builtin.get_url: url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s - checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt + checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-{{ deb_architecture.stdout }}.txt dest: "{{ role_path }}/files/bin/k3s" mode: 0755 delegate_to: localhost run_once: true register: k3s_binary -- name: Copy k3s binary to nodes +- name: Download k3s binary (other) + when: deb_architecture.stdout != 'amd64' + ansible.builtin.get_url: + url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-{{ deb_architecture.stdout }} + checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-{{ deb_architecture.stdout }}.txt + dest: "{{ role_path }}/files/bin/k3s-{{ deb_architecture.stdout }}" + mode: 0755 + delegate_to: localhost + run_once: false + register: k3s_binary2 + +- name: Copy k3s binary to nodes (amd64) + when: deb_architecture.stdout == 'amd64' ansible.builtin.copy: src: bin/k3s dest: /usr/local/bin/k3s @@ -16,6 +33,15 @@ group: root mode: 0755 +- name: Copy k3s binary to nodes (other) + when: deb_architecture.stdout != 'amd64' + ansible.builtin.copy: + src: bin/k3s-{{ deb_architecture.stdout }} + dest: /usr/local/bin/k3s + owner: root + group: root + mode: 0755 + - name: Ensure config directories exist ansible.builtin.file: path: "{{ item }}" diff --git a/metal/roles/metallb_config/ippool.yaml b/metal/roles/metallb_config/ippool.yaml new file mode 100644 index 0000000000..aed992b89a --- /dev/null +++ b/metal/roles/metallb_config/ippool.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: traefik + namespace: metallb-system +spec: + addresses: + - 192.168.1.208/28 + autoAssign: false + avoidBuggyIPs: false +--- +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: default + namespace: metallb-system +spec: + addresses: + - 192.168.1.224/28 + autoAssign: true + avoidBuggyIPs: false +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: metallb-advertisement + namespace: metallb-system +spec: + ipAddressPools: + - default + - traefik diff --git a/metal/roles/pxe_server/defaults/main.yml b/metal/roles/pxe_server/defaults/main.yml index 465e497698..b9025eda5f 100644 --- a/metal/roles/pxe_server/defaults/main.yml +++ b/metal/roles/pxe_server/defaults/main.yml @@ -1,4 +1,4 @@ iso_url: "https://download.fedoraproject.org/pub/fedora/linux/releases/36/Server/x86_64/iso/Fedora-Server-dvd-x86_64-36-1.5.iso" iso_checksum: "sha256:5edaf708a52687b09f9810c2b6d2a3432edac1b18f4d8c908c0da6bde0379148" -timezone: Asia/Ho_Chi_Minh +timezone: Europe/Amsterdam dhcp_proxy: true diff --git a/metal/roles/pxe_server/files/data/tftboot/.gitignore b/metal/roles/pxe_server/files/data/tftboot/.gitignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/metal/roles/pxe_server/files/data/tftboot/grub.cfg b/metal/roles/pxe_server/files/data/tftboot/grub.cfg new file mode 100644 index 0000000000..e1c3943ab9 --- /dev/null +++ b/metal/roles/pxe_server/files/data/tftboot/grub.cfg @@ -0,0 +1,16 @@ +default=autoinstall +timeout=30 +timeout_style=menu +menuentry "Focal Live Installer - automated" --id=autoinstall { + echo "Loading Kernel..." + # make sure to escape the ';' or surround argument in quotes + linux /vmlinuz ip=dhcp url=http://192.168.1.105/iso/ubuntu-20.04-live-server-amd64.iso autoinstall ds="nocloud-net;s=http://192.168.1.105/;" root=/dev/ram0 cloud-config-url=/dev/null + echo "Loading Ram Disk..." + initrd /initrd +} +menuentry "Focal Live Installer" --id=install { + echo "Loading Kernel..." + linux /vmlinuz ip=dhcp url=http://192.168.1.105/iso/ubuntu-20.04-live-server-amd64.iso root=/dev/ram0 cloud-config-url=/dev/null + echo "Loading Ram Disk..." + initrd /initrd +} diff --git a/metal/roles/pxe_server/files/data/tftboot/grubx64.efi b/metal/roles/pxe_server/files/data/tftboot/grubx64.efi new file mode 100755 index 0000000000..e69de29bb2 diff --git a/metal/roles/pxe_server/files/data/tftboot/meta-data b/metal/roles/pxe_server/files/data/tftboot/meta-data new file mode 100644 index 0000000000..49d3dd1020 --- /dev/null +++ b/metal/roles/pxe_server/files/data/tftboot/meta-data @@ -0,0 +1 @@ +instance-id: focal-autoinstall \ No newline at end of file diff --git a/metal/roles/pxe_server/files/data/tftboot/user-data b/metal/roles/pxe_server/files/data/tftboot/user-data new file mode 100644 index 0000000000..36d511dd0d --- /dev/null +++ b/metal/roles/pxe_server/files/data/tftboot/user-data @@ -0,0 +1,89 @@ +#cloud-config +autoinstall: + version: 1 + package_upgrade: true + packages: + - zsh + - tmux + - nmap + - curl + - wget + - git + - htop + - iperf + - fail2ban + - vim + - net-tools + apt: + geoip: true + preserve_sources_list: false + apt: + preserve_sources_list: false + primary: + - arches: [amd64, i386] + uri: http://us.archive.ubuntu.com/ubuntu + - arches: [default] + uri: http://ports.ubuntu.com/ubuntu-ports + # r00tme + identity: {hostname: { hostvars[item]['inventory_hostname'] }}, password: $6$.c38i4RIqZeF4RtR$hRu2RFep/.6DziHLnRqGOEImb15JT2i.K/F9ojBkK/79zqY30Ll2/xx6QClQfdelLe.ZjpeVYfE8xBBcyLspa/, + username: jupiter} + keyboard: {layout: us, variant: ''} + locale: en_US.UTF-8 + # interface name will probably be different + network: + network: + version: 2 + ethernets: + enp89s0: + critical: true + dhcp-identifier: mac + dhcp4: true + ssh: + allow-pw: false + authorized-keys: [ + "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILL07EL+5oPvSofRBADx1Of3q0Wj5vSa9KXtnSJzxDyn peter@mein.nl" + ] + install-server: true + # this creates an efi partition, /boot partition, and root(/) lvm volume + storage: + grub: + reorder_uefi: False + swap: + size: 0 + config: + - {ptable: gpt, path: /dev/nvme0n1, preserve: false, name: '', grub_device: false, + type: disk, id: disk-nvme0n1} + - {device: disk-nvme0n1, size: 536870912, wipe: superblock, flag: boot, number: 1, + preserve: false, grub_device: true, type: partition, id: partition-nvme0n11} + - {fstype: fat32, volume: partition-nvme0n11, preserve: false, type: format, id: format-2} + - {device: disk-nvme0n1, size: 1073741824, wipe: superblock, flag: linux, number: 2, + preserve: false, grub_device: false, type: partition, id: partition-nvme0n12} + - {fstype: ext4, volume: partition-nvme0n12, preserve: false, type: format, id: format-0} + - {device: disk-nvme0n1, size: -1, flag: linux, number: 3, preserve: false, + grub_device: false, type: partition, id: partition-nvme0n13} + - name: vg-0 + devices: [partition-nvme0n13] + preserve: false + type: lvm_volgroup + id: lvm-volgroup-vg-0 + - {name: lv-root, volgroup: lvm-volgroup-vg-0, size: 100%, preserve: false, + type: lvm_partition, id: lvm-partition-lv-root} + - {fstype: ext4, volume: lvm-partition-lv-root, preserve: false, type: format, + id: format-1} + - {device: format-1, path: /, type: mount, id: mount-2} + - {device: format-0, path: /boot, type: mount, id: mount-1} + - {device: format-2, path: /boot/efi, type: mount, id: mount-3} +write_files: + # override the kernel package + - path: /run/kernel-meta-package + content: | + linux-virtual + owner: root:root + permissions: "0644" + # attempt to also use an answers file by providing a file at the default path. It did not seem to have any effect + #- path: /subiquity_config/answers.yaml + # content: | + # InstallProgress: + # reboot: no + # owner: root:root + # permissions: "0644" \ No newline at end of file diff --git a/metal/roles/pxe_server/files/dhcp/Dockerfile b/metal/roles/pxe_server/files/dhcp/Dockerfile new file mode 100644 index 0000000000..187a9b23a7 --- /dev/null +++ b/metal/roles/pxe_server/files/dhcp/Dockerfile @@ -0,0 +1,7 @@ +FROM alpine:20220715 + +RUN apk add dhcp + +RUN touch /var/lib/dhcp/dhcpd.leases + +CMD [ "dhcpd", "-d", "-f", "-cf", "/etc/dhcp/dhcpd.conf" ] diff --git a/metal/roles/pxe_server/files/dnsmasq/Dockerfile b/metal/roles/pxe_server/files/dnsmasq/Dockerfile index 21ad9b62a2..9196141879 100644 --- a/metal/roles/pxe_server/files/dnsmasq/Dockerfile +++ b/metal/roles/pxe_server/files/dnsmasq/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.11 +FROM alpine:3.16 RUN apk --no-cache add dnsmasq diff --git a/metal/roles/pxe_server/files/http/Dockerfile b/metal/roles/pxe_server/files/http/Dockerfile index b0a1e81789..26643d7e68 100644 --- a/metal/roles/pxe_server/files/http/Dockerfile +++ b/metal/roles/pxe_server/files/http/Dockerfile @@ -1 +1 @@ -FROM nginx:1.21-alpine +FROM nginx:1.23-alpine diff --git a/metal/roles/pxe_server/files/tftp/Dockerfile b/metal/roles/pxe_server/files/tftp/Dockerfile new file mode 100644 index 0000000000..a2358bf438 --- /dev/null +++ b/metal/roles/pxe_server/files/tftp/Dockerfile @@ -0,0 +1,7 @@ +FROM alpine:20220715 + +RUN apk add busybox tftp-hpa + +ENTRYPOINT [ "/bin/sh", "-c" ] + +CMD [ "busybox syslogd -n -O /dev/stdout & in.tftpd -vvv --foreground --secure /var/lib/tftpboot" ] diff --git a/metal/roles/pxe_server/templates/kickstart.ks.j2.bak b/metal/roles/pxe_server/templates/kickstart.ks.j2.bak new file mode 100644 index 0000000000..4bf19305b1 --- /dev/null +++ b/metal/roles/pxe_server/templates/kickstart.ks.j2.bak @@ -0,0 +1,51 @@ +#version=RHEL8 + +# Do not use graphical install +text + +# Keyboard layouts +keyboard --xlayouts='us' +# System language +lang en_US.UTF-8 + +# Partition clearing information +clearpart --all --drives={{ hostvars[item]['disk'] }} +# Partitioning +ignoredisk --only-use={{ hostvars[item]['disk'] }} +autopart + +# Network information +network --bootproto=static --device={{ hostvars[item]['network_interface'] }} --ip={{ hostvars[item]['ansible_host'] }} --gateway={{ ansible_default_ipv4.gateway }} --nameserver={{ dns_server }} --netmask={{ ansible_default_ipv4.netmask }} --ipv6=auto --hostname={{ hostvars[item]['inventory_hostname'] }} --activate + +# Use network installation +repo --name="Minimal" --baseurl=http://{{ ansible_default_ipv4.address }}/os/Minimal +url --url="http://{{ ansible_default_ipv4.address }}/os" +# Disable Setup Agent on first boot +firstboot --disable +# Do not configure the X Window System +skipx +# Enable NTP +services --enabled="chronyd" +# System timezone +timezone {{ timezone }} --isUtc + +# Create user (locked by default) +user --groups=wheel --name=admin +# Add SSH key +sshkey --username=root "{{ ssh_public_key }}" + +# Disable SELinux +selinux --disabled + +# Disable firewall +firewall --disabled + +%packages +@^minimal-environment +iscsi-initiator-utils +%end + +# Enable iSCSI for Kubernetes storage +services --enable=iscsid + +reboot diff --git a/metal/roles/pxe_server/templates/meta-data.j2 b/metal/roles/pxe_server/templates/meta-data.j2 new file mode 100644 index 0000000000..49d3dd1020 --- /dev/null +++ b/metal/roles/pxe_server/templates/meta-data.j2 @@ -0,0 +1 @@ +instance-id: focal-autoinstall \ No newline at end of file diff --git a/metal/roles/pxe_server/templates/user-data.j2 b/metal/roles/pxe_server/templates/user-data.j2 new file mode 100644 index 0000000000..88a8f83b50 --- /dev/null +++ b/metal/roles/pxe_server/templates/user-data.j2 @@ -0,0 +1,89 @@ +#cloud-config +autoinstall: + version: 1 + package_upgrade: true + packages: + - zsh + - tmux + - nmap + - curl + - wget + - git + - htop + - iperf + - fail2ban + - vim + - net-tools + apt: + geoip: true + preserve_sources_list: false + apt: + preserve_sources_list: false + primary: + - arches: [amd64, i386] + uri: http://us.archive.ubuntu.com/ubuntu + - arches: [default] + uri: http://ports.ubuntu.com/ubuntu-ports + # r00tme + identity: {hostname: { hostvars[item]['inventory_hostname'] }}, password: $6$.c38i4RIqZeF4RtR$hRu2RFep/.6DziHLnRqGOEImb15JT2i.K/F9ojBkK/79zqY30Ll2/xx6QClQfdelLe.ZjpeVYfE8xBBcyLspa/, + username: jupiter} + keyboard: {layout: us, variant: ''} + locale: en_US.UTF-8 + # interface name will probably be different + network: + network: + version: 2 + ethernets: + {{ hostvars[item]['network_interface'] }}: + critical: true + dhcp-identifier: mac + dhcp4: true + ssh: + allow-pw: false + authorized-keys: [ + "{{ ssh_public_key }}" + ] + install-server: true + # this creates an efi partition, /boot partition, and root(/) lvm volume + storage: + grub: + reorder_uefi: False + swap: + size: 0 + config: + - {ptable: gpt, path: /dev/{{ hostvars[item]['disk'] }}, preserve: false, name: '', grub_device: false, + type: disk, id: disk-{{ hostvars[item]['disk'] }}} + - {device: disk-{{ hostvars[item]['disk'] }}, size: 536870912, wipe: superblock, flag: boot, number: 1, + preserve: false, grub_device: true, type: partition, id: partition-{{ hostvars[item]['disk'] }}1} + - {fstype: fat32, volume: partition-{{ hostvars[item]['disk'] }}1, preserve: false, type: format, id: format-2} + - {device: disk-{{ hostvars[item]['disk'] }}, size: 1073741824, wipe: superblock, flag: linux, number: 2, + preserve: false, grub_device: false, type: partition, id: partition-{{ hostvars[item]['disk'] }}2} + - {fstype: ext4, volume: partition-{{ hostvars[item]['disk'] }}2, preserve: false, type: format, id: format-0} + - {device: disk-{{ hostvars[item]['disk'] }}, size: -1, flag: linux, number: 3, preserve: false, + grub_device: false, type: partition, id: partition-{{ hostvars[item]['disk'] }}3} + - name: vg-0 + devices: [partition-{{ hostvars[item]['disk'] }}3] + preserve: false + type: lvm_volgroup + id: lvm-volgroup-vg-0 + - {name: lv-root, volgroup: lvm-volgroup-vg-0, size: 100%, preserve: false, + type: lvm_partition, id: lvm-partition-lv-root} + - {fstype: ext4, volume: lvm-partition-lv-root, preserve: false, type: format, + id: format-1} + - {device: format-1, path: /, type: mount, id: mount-2} + - {device: format-0, path: /boot, type: mount, id: mount-1} + - {device: format-2, path: /boot/efi, type: mount, id: mount-3} +write_files: + # override the kernel package + - path: /run/kernel-meta-package + content: | + linux-virtual + owner: root:root + permissions: "0644" + # attempt to also use an answers file by providing a file at the default path. It did not seem to have any effect + #- path: /subiquity_config/answers.yaml + # content: | + # InstallProgress: + # reboot: no + # owner: root:root + # permissions: "0644" \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 2fc550c902..d30f659674 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,9 +1,8 @@ # yaml-language-server: $schema=https://squidfunk.github.io/mkdocs-material/schema.json -site_name: Khue's Homelab -copyright: Copyright © 2020 - 2022 Khue Doan +site_name: Meins's Homelab -repo_url: https://github.com/khuedoan/homelab +repo_url: https://github.com/petermein/homelab theme: name: material diff --git a/platform/dex/.gitignore b/platform/dex/.gitignore new file mode 100644 index 0000000000..a539470003 --- /dev/null +++ b/platform/dex/.gitignore @@ -0,0 +1 @@ +config.yaml \ No newline at end of file diff --git a/platform/dex/Chart.yaml b/platform/dex/Chart.yaml index c30c4b35c2..23913af1ee 100644 --- a/platform/dex/Chart.yaml +++ b/platform/dex/Chart.yaml @@ -3,5 +3,5 @@ name: dex version: 0.0.0 dependencies: - name: dex - version: 0.8.2 + version: 0.12.1 repository: https://charts.dexidp.io diff --git a/platform/dex/values.yaml b/platform/dex/values.yaml index 45783c9ba1..159b099baf 100644 --- a/platform/dex/values.yaml +++ b/platform/dex/values.yaml @@ -1,6 +1,6 @@ dex: config: - issuer: https://dex.khuedoan.com + issuer: https://dex.jupiter.mein.nl storage: type: kubernetes config: @@ -14,13 +14,13 @@ dex: config: clientID: $GITEA_CLIENT_ID clientSecret: $GITEA_CLIENT_SECRET - redirectURI: https://dex.khuedoan.com/callback - baseURL: https://git.khuedoan.com + redirectURI: https://dex.jupiter.mein.nl/callback + baseURL: https://git.jupiter.mein.nl staticClients: - id: grafana-sso name: Grafana redirectURIs: - - 'https://grafana.khuedoan.com/login/generic_oauth' + - 'https://grafana.jupiter.mein.nl/login/generic_oauth' secretEnv: GRAFANA_SSO_CLIENT_SECRET envFrom: - secretRef: @@ -32,7 +32,7 @@ dex: cert-manager.io/cluster-issuer: letsencrypt-prod hajimari.io/enable: 'false' hosts: - - host: &host dex.khuedoan.com + - host: &host dex.jupiter.mein.nl paths: - path: / pathType: ImplementationSpecific diff --git a/platform/external-secrets/Chart.yaml b/platform/external-secrets/Chart.yaml index a88e328e8f..bd6fb32ed6 100644 --- a/platform/external-secrets/Chart.yaml +++ b/platform/external-secrets/Chart.yaml @@ -3,5 +3,5 @@ name: external-secrets version: 0.0.0 dependencies: - name: external-secrets - version: 0.5.6 + version: 0.6.1 repository: https://charts.external-secrets.io diff --git a/platform/gitea/files/config/config.yaml b/platform/gitea/files/config/config.yaml index b093dbff7d..f3940c7467 100644 --- a/platform/gitea/files/config/config.yaml +++ b/platform/gitea/files/config/config.yaml @@ -16,18 +16,5 @@ repositories: owner: ops private: false migrate: - source: https://github.com/khuedoan/homelab + source: https://github.com/petermein/homelab mirror: false - # TODO create webhook (use a global one?) - # webhooks: - # - http://gitea-webhook.tekton-pipelines:3000 - - name: blog - owner: khuedoan - migrate: - source: https://github.com/khuedoan/blog - mirror: true - - name: backstage - owner: khuedoan - migrate: - source: https://github.com/khuedoan/backstage - mirror: true diff --git a/platform/gitea/files/config/go.mod b/platform/gitea/files/config/go.mod index 4e91d99a6e..de975dfb85 100644 --- a/platform/gitea/files/config/go.mod +++ b/platform/gitea/files/config/go.mod @@ -1,6 +1,6 @@ -module git.khuedoan.com/khuedoan/homelab/gitea/config +module git.jupiter.mein.nl/khuedoan/homelab/gitea/config -go 1.18 +go 1.19 require ( code.gitea.io/sdk/gitea v0.15.1 // indirect diff --git a/platform/gitea/files/init_gitops_repo.py b/platform/gitea/files/init_gitops_repo.py new file mode 100755 index 0000000000..a5a66265a2 --- /dev/null +++ b/platform/gitea/files/init_gitops_repo.py @@ -0,0 +1,64 @@ +#!/usr/bin/python + +import json +import os +import subprocess +import sys + +subprocess.check_call([sys.executable, "-m", "pip", "install", "requests"]) + +import requests + +gitea_host = os.getenv('GITEA_HOST', "gitea-http:3000") +gitea_user = os.environ['GITEA_USER'] +gitea_pass = os.environ['GITEA_PASSWORD'] +seed_repo = "https://github.com/petermein/homelab" +org = "ops" +repo = "homelab" +gitea_url = f"http://{gitea_user}:{gitea_pass}@{gitea_host}" + +headers = { + 'Content-Type': 'application/json' +} + +data_org = json.dumps({ + 'username': org +}) + +data_repo = json.dumps({ + 'clone_addr': seed_repo, + 'uid': 1, + 'repo_owner': org, + 'repo_name': repo, + 'mirror': True +}) + +resp = requests.post( + url=f"{gitea_url}/api/v1/admin/users/{gitea_user}/orgs", + headers=headers, + data=data_org +) + +if resp.status_code == 201: + print(f"Created organization {org}") +elif resp.status_code == 422: + print(f"Organization already exists") +else: + print(f"Error creating organization {org} ({resp.status_code})") + print(resp.content) + sys.exit(1) + +resp = requests.post( + url=f"{gitea_url}/api/v1/repos/migrate", + headers=headers, + data=data_repo +) + +if resp.status_code == 201: + print(f"Created repository {json.loads(str(resp.content, 'utf8'))['html_url']}") +elif resp.status_code == 409: + print(f"Repository already exists") +else: + print(f"Error creating git repository ({resp.status_code})") + print(resp.content) + sys.exit(1) diff --git a/platform/gitea/values.yaml b/platform/gitea/values.yaml index 613a04a517..131012a9a5 100644 --- a/platform/gitea/values.yaml +++ b/platform/gitea/values.yaml @@ -3,11 +3,12 @@ gitea: enabled: true className: nginx annotations: + external-dns.alpha.kubernetes.io/exclude: 'true' cert-manager.io/cluster-issuer: letsencrypt-prod hajimari.io/appName: Gitea hajimari.io/icon: git hosts: - - host: &host git.khuedoan.com + - host: &host git.jupiter.mein.nl paths: - path: / pathType: Prefix @@ -21,7 +22,7 @@ gitea: config: server: LANDING_PAGE: explore - ROOT_URL: https://git.khuedoan.com + ROOT_URL: https://git.jupiter.mein.nl persistence: storageClass: longhorn postgresql: diff --git a/platform/renovate/Chart.yaml b/platform/renovate/Chart.yaml.disabled similarity index 86% rename from platform/renovate/Chart.yaml rename to platform/renovate/Chart.yaml.disabled index fe110d10d6..852d104dc1 100644 --- a/platform/renovate/Chart.yaml +++ b/platform/renovate/Chart.yaml.disabled @@ -3,5 +3,5 @@ name: renovate version: 0.0.0 dependencies: - name: renovate - version: 31.97.3 + version: 32.0.1 repository: https://docs.renovatebot.com/helm-charts diff --git a/platform/renovate/values.yaml b/platform/renovate/values.yaml index 577f76fac2..085df72807 100644 --- a/platform/renovate/values.yaml +++ b/platform/renovate/values.yaml @@ -1,11 +1,16 @@ renovate: cronjob: - schedule: '0 9 * * *' # Everyday at 09:00 + schedule: '0 9 * * *' # Everyday at 09:00 suspend: false + concurrencyPolicy: 'Forbid' + failedJobsHistoryLimit: 5 + successfulJobsHistoryLimit: 5 + jobRestartPolicy: Never + jobBackoffLimit: 5 renovate: config: | { "platform": "gitea", - "endpoint": "https://git.khuedoan.com/api/v1", + "endpoint": "https://git.jupiter.mein.nl/api/v1", "gitAuthor": "Renovate Bot ", "autodiscover": true } diff --git a/platform/tekton-pipelines/ingress.yaml b/platform/tekton-pipelines/ingress.yaml index c8761ea475..9aa7da9af9 100644 --- a/platform/tekton-pipelines/ingress.yaml +++ b/platform/tekton-pipelines/ingress.yaml @@ -10,7 +10,7 @@ metadata: spec: ingressClassName: nginx rules: - - host: &host tekton.khuedoan.com + - host: &host tekton.jupiter.mein.nl http: paths: - pathType: ImplementationSpecific diff --git a/platform/tekton-pipelines/kaniko.yaml b/platform/tekton-pipelines/kaniko.yaml new file mode 100644 index 0000000000..896394ce75 --- /dev/null +++ b/platform/tekton-pipelines/kaniko.yaml @@ -0,0 +1,62 @@ +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: kaniko + labels: + app.kubernetes.io/version: "0.5" + annotations: + tekton.dev/pipelines.minVersion: "0.17.0" + tekton.dev/categories: Image Build + tekton.dev/tags: image-build + tekton.dev/displayName: "Build and upload container image using Kaniko" + tekton.dev/platforms: "linux/amd64" +spec: + description: >- + This Task builds source into a container image using Google's kaniko tool. + + Kaniko doesn't depend on a Docker daemon and executes each + command within a Dockerfile completely in userspace. This enables + building container images in environments that can't easily or + securely run a Docker daemon, such as a standard Kubernetes cluster. + + params: + - name: IMAGE + description: Name (reference) of the image to build. + - name: DOCKERFILE + description: Path to the Dockerfile to build. + default: ./Dockerfile + - name: CONTEXT + description: The build context used by Kaniko. + default: ./ + - name: EXTRA_ARGS + type: array + default: [] + - name: BUILDER_IMAGE + description: The image on which builds will run (default is v1.5.1) + default: gcr.io/kaniko-project/executor:v1.5.1@sha256:c6166717f7fe0b7da44908c986137ecfeab21f31ec3992f6e128fff8a94be8a5 + workspaces: + - name: source + description: Holds the context and docker file + - name: dockerconfig + description: Includes a docker `config.json` + optional: true + mountPath: /kaniko/.docker + results: + - name: IMAGE-DIGEST + description: Digest of the image just built. + + steps: + - name: build-and-push + workingDir: $(workspaces.source.path) + image: $(params.BUILDER_IMAGE) + args: + - $(params.EXTRA_ARGS[*]) + - --dockerfile=$(params.DOCKERFILE) + - --context=$(workspaces.source.path)/$(params.CONTEXT) # The user does not need to care the workspace and the source. + - --destination=$(params.IMAGE) + - --digest-file=/tekton/results/IMAGE-DIGEST + # kaniko assumes it is running as root, which means this example fails on platforms + # that default to run containers as random uid (like OpenShift). Adding this securityContext + # makes it explicit that it needs to run as root. + securityContext: + runAsUser: 0 diff --git a/platform/trow/templates/ingress.yaml b/platform/trow/templates/ingress.yaml index 43eba949c2..01ac6eca5c 100644 --- a/platform/trow/templates/ingress.yaml +++ b/platform/trow/templates/ingress.yaml @@ -9,7 +9,7 @@ metadata: spec: ingressClassName: nginx rules: - - host: &host registry.khuedoan.com + - host: &host registry.jupiter.mein.nl http: paths: - pathType: ImplementationSpecific diff --git a/platform/trow/values.yaml b/platform/trow/values.yaml index 30a2c2c9ea..becb1efbf2 100644 --- a/platform/trow/values.yaml +++ b/platform/trow/values.yaml @@ -1,6 +1,6 @@ trow: trow: - domain: registry.khuedoan.com + domain: registry.jupiter.mein.nl # user: user # password: password validation: @@ -13,6 +13,7 @@ trow: disallowLocalImages: [] proxyDockerHub: false + ingress: enabled: false # TODO https://github.com/ContainerSolutions/trow/issues/282 annotations: @@ -20,7 +21,7 @@ trow: nginx.ingress.kubernetes.io/proxy-body-size: '0' hajimari.io/enable: 'false' hosts: - - host: &host registry.khuedoan.com + - host: &host registry.jupiter.mein.nl paths: - / tls: @@ -29,3 +30,6 @@ trow: - *host volumeClaim: storageClassName: longhorn + + service: + port: 8085 diff --git a/platform/vault/Chart.yaml b/platform/vault/Chart.yaml index f33c82f68c..7ecada2f42 100644 --- a/platform/vault/Chart.yaml +++ b/platform/vault/Chart.yaml @@ -3,5 +3,5 @@ name: vault version: 0.0.0 dependencies: - name: vault-operator - version: 1.15.6 + version: 1.16.1 repository: https://kubernetes-charts.banzaicloud.com diff --git a/platform/vault/files/generate-secrets/config.yaml b/platform/vault/files/generate-secrets/config.yaml index 4b9b29d714..21202fad8d 100644 --- a/platform/vault/files/generate-secrets/config.yaml +++ b/platform/vault/files/generate-secrets/config.yaml @@ -18,3 +18,21 @@ - key: password length: 32 special: true + +- path: pihole/admin + data: + - key: password + length: 32 + special: true + +- path: mariadb/admin + data: + - key: password + length: 32 + special: true + +- path: postgres/admin + data: + - key: password + length: 32 + special: true diff --git a/platform/vault/files/generate-secrets/go.mod b/platform/vault/files/generate-secrets/go.mod index b68821ce1b..7927c4f722 100644 --- a/platform/vault/files/generate-secrets/go.mod +++ b/platform/vault/files/generate-secrets/go.mod @@ -1,9 +1,9 @@ -module git.khuedoan.com/khuedoan/homelab/vault/init +module git.jupiter.mein.nl/khuedoan/homelab/vault/init -go 1.17 +go 1.19 require ( - github.com/hashicorp/vault/api v1.4.1 + github.com/hashicorp/vault/api v1.8.1 github.com/sethvargo/go-password v0.2.0 gopkg.in/yaml.v2 v2.4.0 ) @@ -24,21 +24,21 @@ require ( github.com/hashicorp/go-retryablehttp v0.6.6 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 // indirect - github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 // indirect - github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hashicorp/go-version v1.2.0 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hashicorp/vault/sdk v0.4.1 // indirect + github.com/hashicorp/vault/sdk v0.6.0 // indirect github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect github.com/mattn/go-colorable v0.1.6 // indirect github.com/mattn/go-isatty v0.0.12 // indirect github.com/mitchellh/copystructure v1.0.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.0.0 // indirect - github.com/mitchellh/mapstructure v1.4.2 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.0 // indirect github.com/oklog/run v1.0.0 // indirect github.com/pierrec/lz4 v2.5.2+incompatible // indirect diff --git a/platform/vault/files/generate-secrets/go.sum b/platform/vault/files/generate-secrets/go.sum index ba166312fe..ad7acdee87 100644 --- a/platform/vault/files/generate-secrets/go.sum +++ b/platform/vault/files/generate-secrets/go.sum @@ -110,9 +110,13 @@ github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5r github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 h1:78ki3QBevHwYrVxnyVeaEz+7WtifHhauYF23es/0KlI= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 h1:nd0HIW15E6FG1MsnArYaHfuw9C2zgzM8LxkG5Ty/788= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= @@ -128,8 +132,20 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/vault/api v1.4.1 h1:mWLfPT0RhxBitjKr6swieCEP2v5pp/M//t70S3kMLRo= github.com/hashicorp/vault/api v1.4.1/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM= +github.com/hashicorp/vault/api v1.5.0 h1:Bp6yc2bn7CWkOrVIzFT/Qurzx528bdavF3nz590eu28= +github.com/hashicorp/vault/api v1.5.0/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM= +github.com/hashicorp/vault/api v1.7.2 h1:kawHE7s/4xwrdKbkmwQi0wYaIeUhk5ueek7ljuezCVQ= +github.com/hashicorp/vault/api v1.7.2/go.mod h1:xbfA+1AvxFseDzxxdWaL0uO99n1+tndus4GCrtouy0M= +github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= +github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= +github.com/hashicorp/vault/api v1.8.1 h1:bMieWIe6dAlqAAPReZO/8zYtXaWUg/21umwqGZpEjCI= +github.com/hashicorp/vault/api v1.8.1/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= github.com/hashicorp/vault/sdk v0.4.1 h1:3SaHOJY687jY1fnB61PtL0cOkKItphrbLmux7T92HBo= github.com/hashicorp/vault/sdk v0.4.1/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0= +github.com/hashicorp/vault/sdk v0.5.1 h1:zly/TmNgOXCGgWIRA8GojyXzG817POtVh3uzIwzZx+8= +github.com/hashicorp/vault/sdk v0.5.1/go.mod h1:DoGraE9kKGNcVgPmTuX357Fm6WAx1Okvde8Vp3dPDoU= +github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= +github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -167,6 +183,8 @@ github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUb github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/platform/vault/templates/cr.yaml b/platform/vault/templates/cr.yaml index a490f334fb..0ba2cbe2a6 100644 --- a/platform/vault/templates/cr.yaml +++ b/platform/vault/templates/cr.yaml @@ -40,8 +40,8 @@ spec: # Support for pod nodeSelector rules to control which nodes can be chosen to run # the given pods - # nodeSelector: - # "node-role.kubernetes.io/your_role": "true" + nodeSelector: + "kubernetes.io/hostname": "europa" # Support for node tolerations that work together with node taints to control # the pods that can like on a node @@ -74,7 +74,7 @@ spec: spec: ingressClassName: nginx rules: - - host: &host vault.khuedoan.com + - host: &host vault.jupiter.mein.nl http: paths: - backend: @@ -260,7 +260,7 @@ kind: PersistentVolumeClaim metadata: name: vault-file spec: - storageClassName: longhorn + storageClassName: nfs accessModes: - ReadWriteOnce resources: diff --git a/platform/vault/values.yaml b/platform/vault/values.yaml index e69de29bb2..963c608618 100644 --- a/platform/vault/values.yaml +++ b/platform/vault/values.yaml @@ -0,0 +1,99 @@ +vault-operator: + # Default values for vault-operator. + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + + replicaCount: 1 + + image: + bankVaultsRepository: ghcr.io/banzaicloud/bank-vaults + repository: ghcr.io/banzaicloud/vault-operator + # tag: "" + pullPolicy: IfNotPresent + imagePullSecrets: [] # global.imagePullSecrets is also supported + + service: + name: "" + type: ClusterIP + externalPort: 80 + internalPort: 8080 + annotations: {} + + nameOverride: "" + fullnameOverride: "" + + crdAnnotations: {} + + # The namespace where the operator watches for vault CRD objects, if not defined + # all namespaces are watched + watchNamespace: "" + syncPeriod: "1m" + + labels: {} + # team: banzai + + podLabels: {} + # team: banzai + + podAnnotations: {} + # team: banzai + + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 128Mi + + affinity: {} + + # # if tolerations are used inside cluster, define tolerations as well + tolerations: [] + # - effect: NoSchedule + # key: node_role + # operator: Equal + # value: custom_worker + + # # If needed, define nodeSelector for vault operator + nodeSelector: + "kubernetes.io/hostname": "europa" + + podSecurityContext: {} + + securityContext: {} + + ## Assign a PriorityClassName to pods if set + priorityClassName: "" + + terminationGracePeriodSeconds: 10 + + livenessProbe: + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + + psp: + enabled: false + vaultSA: "vault" + + serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + monitoring: + # Create a Vault Operator ServiceMonitor object + serviceMonitor: + enabled: false + additionalLabels: {} + metricRelabelings: [] + relabelings: [] diff --git a/renovate.json5 b/renovate.json similarity index 64% rename from renovate.json5 rename to renovate.json index 845bcb14f6..3ab06d97fa 100644 --- a/renovate.json5 +++ b/renovate.json @@ -1,9 +1,11 @@ -// TODO switch to YAML https://github.com/renovatebot/renovate/issues/7031 { "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ "config:base" ], + "dependencyDashboard": true, + "ignorePaths": ["disable/**", "system/datadog/**"], + "includeForks": true, "packageRules": [ { "matchPackagePatterns": [ @@ -14,7 +16,8 @@ "patch" ], "groupName": "all non-major dependencies", - "groupSlug": "all-minor-patch" + "groupSlug": "all-minor-patch", + "automerge": true } ] } diff --git a/scripts/configure b/scripts/configure index d7f6afadfd..45b261581a 100755 --- a/scripts/configure +++ b/scripts/configure @@ -14,8 +14,8 @@ import sys from rich.prompt import Confirm, Prompt default_seed_repo = "https://github.com/khuedoan/homelab" -default_domain = "khuedoan.com" -default_timezone = "Asia/Ho_Chi_Minh" +default_domain = "jupiter.mein.nl" +default_timezone = "Europe/Amsterdam" default_terraform_workspace = "khuedoan" diff --git a/scripts/hacks b/scripts/hacks index 9cbd48a029..2a1079d980 100755 --- a/scripts/hacks +++ b/scripts/hacks @@ -13,7 +13,7 @@ import sys from rich.console import Console from kubernetes import client, config -# https://git.khuedoan.com/user/settings/applications +# https://git.jupiter.mein.nl/user/settings/applications # Doing this properly inside the cluster requires: # - Kubernetes service account # - Vault Kubernetes auth diff --git a/scripts/take-screenshots b/scripts/take-screenshots index 81808ae86a..48674522e3 100755 --- a/scripts/take-screenshots +++ b/scripts/take-screenshots @@ -15,31 +15,31 @@ from selenium import webdriver apps = [ { 'name': 'home', - 'url': 'https://home.khuedoan.com' + 'url': 'https://home.jupiter.mein.nl' }, { 'name': 'gitea', - 'url': 'https://git.khuedoan.com/ops/homelab' + 'url': 'https://git.jupiter.mein.nl/ops/homelab' }, { 'name': 'argocd', - 'url': 'https://argocd.khuedoan.com/applications/root' + 'url': 'https://argocd.jupiter.mein.nl/applications/root' }, { 'name': 'vault', - 'url': 'https://vault.khuedoan.com/ui/vault/secrets' + 'url': 'https://vault.jupiter.mein.nl/ui/vault/secrets' }, { 'name': 'tekton', - 'url': 'https://tekton.khuedoan.com/#/namespaces/tekton-pipelines/pipelineruns/homelab?pipelineTask=external&step=plan' + 'url': 'https://tekton.jupiter.mein.nl/#/namespaces/tekton-pipelines/pipelineruns/homelab?pipelineTask=external&step=plan' }, { 'name': 'matrix', - 'url': 'https://chat.khuedoan.com/#/room/#random:matrix.khuedoan.com' + 'url': 'https://chat.jupiter.mein.nl/#/room/#random:matrix.jupiter.mein.nl' }, { 'name': 'grafana', - 'url': 'https://grafana.khuedoan.com/d/efa86fd1d0c121a26444b636a3f509a8/kubernetes-compute-resources-cluster' # wtf is this ID + 'url': 'https://grafana.jupiter.mein.nl/d/efa86fd1d0c121a26444b636a3f509a8/kubernetes-compute-resources-cluster' # wtf is this ID }, ] diff --git a/system/cert-manager/Chart.yaml b/system/cert-manager/Chart.yaml index 373bf29d14..ec927e363a 100644 --- a/system/cert-manager/Chart.yaml +++ b/system/cert-manager/Chart.yaml @@ -3,5 +3,5 @@ name: cert-manager version: 0.0.0 dependencies: - name: cert-manager - version: v1.7.1 + version: v1.10.0 repository: https://charts.jetstack.io diff --git a/system/cert-manager/values.yaml b/system/cert-manager/values.yaml index 9ee893f608..a4119703d4 100644 --- a/system/cert-manager/values.yaml +++ b/system/cert-manager/values.yaml @@ -4,3 +4,6 @@ cert-manager: enabled: true servicemonitor: enabled: true + +issuer: + email: mail@jupiter.mein.nl diff --git a/system/cloudflared/Chart.yaml b/system/cloudflared/Chart.yaml index b14d611d44..f6bae20473 100644 --- a/system/cloudflared/Chart.yaml +++ b/system/cloudflared/Chart.yaml @@ -3,5 +3,5 @@ name: cloudflared version: 0.0.0 dependencies: - name: cloudflared - version: 0.3.3 + version: 0.4.3 repository: https://khuedoan.github.io/charts diff --git a/system/cloudflared/values.yaml b/system/cloudflared/values.yaml index ea08b8c94f..a5d887c0b6 100644 --- a/system/cloudflared/values.yaml +++ b/system/cloudflared/values.yaml @@ -1,11 +1,17 @@ cloudflared: + image: + repository: cloudflare/cloudflared + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "2022.10.3" + credentials: existingSecret: cloudflared-credentials config: tunnel: homelab ingress: - - hostname: '*.khuedoan.com' - service: https://ingress-nginx-controller.ingress-nginx + - hostname: '*.mein.nl' + service: https://ingress-nginx-controller.ingress-nginx:443 originRequest: noTLSVerify: true - service: http_status:404 diff --git a/system/datadog/Chart.yaml b/system/datadog/Chart.yaml new file mode 100644 index 0000000000..0feccf6148 --- /dev/null +++ b/system/datadog/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +name: datadog +version: 0.0.1 +dependencies: + - name: datadog + version: v2.30.20 + repository: https://helm.datadoghq.com + + + + diff --git a/system/datadog/values.yaml b/system/datadog/values.yaml new file mode 100644 index 0000000000..7c78785901 --- /dev/null +++ b/system/datadog/values.yaml @@ -0,0 +1,1507 @@ +datadog: + ## Default values for Datadog Agent + ## See Datadog helm documentation to learn more: + ## https://docs.datadoghq.com/agent/kubernetes/helm/ + + # nameOverride -- Override name of app + nameOverride: # "" + + # fullnameOverride -- Override the full qualified app name + fullnameOverride: # "" + + # targetSystem -- Target OS for this deployment (possible values: linux, windows) + targetSystem: "linux" + + # registry -- Registry to use for all Agent images (default gcr.io) + ## Currently we offer Datadog Agent images on: + ## GCR - use gcr.io/datadoghq (default) + ## DockerHub - use docker.io/datadog + ## AWS - use public.ecr.aws/datadog + registry: gcr.io/datadoghq + + datadog: + # datadog.apiKey -- Your Datadog API key + # ref: https://app.datadoghq.com/account/settings#agent/kubernetes + apiKey: # + + # datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret. + ## If set, this parameter takes precedence over "apiKey". + apiKeyExistingSecret: "datadog-secret" # + + # datadog.appKey -- Datadog APP key required to use metricsProvider + ## If you are using clusterAgent.metricsProvider.enabled = true, you must set + ## a Datadog application key for read access to your metrics. + appKey: # + + # datadog.appKeyExistingSecret -- Use existing Secret which stores APP key instead of creating a new one. The value should be set with the `app-key` key inside the secret. + ## If set, this parameter takes precedence over "appKey". + appKeyExistingSecret: #"datadog-secret" # + + ## Configure the secret backend feature https://docs.datadoghq.com/agent/guide/secrets-management + ## Examples: https://docs.datadoghq.com/agent/guide/secrets-management/#setup-examples-1 + secretBackend: + # datadog.secretBackend.command -- Configure the secret backend command, path to the secret backend binary. + ## Note: If the command value is "/readsecret_multiple_providers.sh" the agents will have permissions to get secret objects. + ## Read more about "/readsecret_multiple_providers.sh": https://docs.datadoghq.com/agent/guide/secrets-management/#script-for-reading-from-multiple-secret-providers-readsecret_multiple_providerssh + command: # "/readsecret.sh" or "/readsecret_multiple_providers.sh" or any custom binary path + + # datadog.secretBackend.arguments -- Configure the secret backend command arguments (space-separated strings). + arguments: # "/etc/secret-volume" or any other custom arguments + + # datadog.secretBackend.timeout -- Configure the secret backend command timeout in seconds. + timeout: # 30 + + # datadog.securityContext -- Allows you to overwrite the default PodSecurityContext on the Daemonset or Deployment + securityContext: + runAsUser: 0 + # seLinuxOptions: + # user: "system_u" + # role: "system_r" + # type: "spc_t" + # level: "s0" + + # datadog.hostVolumeMountPropagation -- Allow to specify the `mountPropagation` value on all volumeMounts using HostPath + ## ref: https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation + hostVolumeMountPropagation: None + + # datadog.clusterName -- Set a unique cluster name to allow scoping hosts and Cluster Checks easily + ## The name must be unique and must be dot-separated tokens with the following restrictions: + ## * Lowercase letters, numbers, and githyphens only. + ## * Must start with a letter. + ## * Must end with a number or a letter. + ## * Overall length should not be higher than 80 characters. + ## Compared to the rules of GKE, dots are allowed whereas they are not allowed on GKE: + ## https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#Cluster.FIELDS.name + clusterName: "jupiter.mein.nl" # + + # datadog.site -- The site of the Datadog intake to send Agent data to + ## Set to 'datadoghq.eu' to send data to the EU site. + site: datadoghq.eu # datadoghq.com + + # datadog.dd_url -- The host of the Datadog intake server to send Agent data to, only set this option if you need the Agent to send data to a custom URL + ## Overrides the site setting defined in "site". + dd_url: # https://app.datadoghq.com + + # datadog.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, off + logLevel: INFO + + # datadog.kubeStateMetricsEnabled -- If true, deploys the kube-state-metrics deployment + ## ref: https://github.com/kubernetes/kube-state-metrics/tree/kube-state-metrics-helm-chart-2.13.2/charts/kube-state-metrics + kubeStateMetricsEnabled: true + + kubeStateMetricsNetworkPolicy: + # datadog.kubeStateMetricsNetworkPolicy.create -- If true, create a NetworkPolicy for kube state metrics + create: false + + kubeStateMetricsCore: + # datadog.kubeStateMetricsCore.enabled -- Enable the kubernetes_state_core check in the Cluster Agent (Requires Cluster Agent 1.12.0+) + ## ref: https://docs.datadoghq.com/integrations/kubernetes_state_core + enabled: false + + # datadog.kubeStateMetricsCore.ignoreLegacyKSMCheck -- Disable the auto-configuration of legacy kubernetes_state check (taken into account only when datadog.kubeStateMetricsCore.enabled is true) + ## Disabling this field is not recommended as it results in enabling both checks, it can be useful though during the migration phase. + ## Migration guide: https://docs.datadoghq.com/integrations/kubernetes_state_core/?tab=helm#migration-from-kubernetes_state-to-kubernetes_state_core + ignoreLegacyKSMCheck: true + + # datadog.kubeStateMetricsCore.collectSecretMetrics -- Enable watching secret objects and collecting their corresponding metrics kubernetes_state.secret.* + ## Configuring this field will change the default kubernetes_state_core check configuration and the RBACs granted to Datadog Cluster Agent to run the kubernetes_state_core check. + collectSecretMetrics: true + + # datadog.kubeStateMetricsCore.useClusterCheckRunners -- For large clusters where the Kubernetes State Metrics Check Core needs to be distributed on dedicated workers. + ## Configuring this field will create a separate deployment which will run Cluster Checks, including Kubernetes State Metrics Core. + ## ref: https://docs.datadoghq.com/agent/cluster_agent/clusterchecksrunner?tab=helm + useClusterCheckRunners: false + + # datadog.kubeStateMetricsCore.labelsAsTags -- Extra labels to collect from resources and to turn into datadog tag. + ## It has the following structure: + ## labelsAsTags: + ## : # can be pod, deployment, node, etc. + ## : # where is the kubernetes label and is the datadog tag + ## : + ## : + ## : + ## + ## Warning: the label must match the transformation done by kube-state-metrics, + ## for example tags.datadoghq.com/version becomes label_tags_datadoghq_com_version. + labelsAsTags: {} + # pod: + # app: app + # node: + # zone: zone + # team: team + + ## Manage Cluster checks feature + ## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/ + ## Autodiscovery via Kube Service annotations is automatically enabled + clusterChecks: + # datadog.clusterChecks.enabled -- Enable the Cluster Checks feature on both the cluster-agents and the daemonset + enabled: true + # datadog.clusterChecks.shareProcessNamespace -- Set the process namespace sharing on the cluster checks agent + shareProcessNamespace: false + + # datadog.nodeLabelsAsTags -- Provide a mapping of Kubernetes Node Labels to Datadog Tags + nodeLabelsAsTags: {} + # beta.kubernetes.io/instance-type: aws-instance-type + # kubernetes.io/role: kube_role + # : + + # datadog.podLabelsAsTags -- Provide a mapping of Kubernetes Labels to Datadog Tags + podLabelsAsTags: {} + # app: kube_app + # release: helm_release + # : + + # datadog.podAnnotationsAsTags -- Provide a mapping of Kubernetes Annotations to Datadog Tags + podAnnotationsAsTags: {} + # iam.amazonaws.com/role: kube_iamrole + # : + + # datadog.namespaceLabelsAsTags -- Provide a mapping of Kubernetes Namespace Labels to Datadog Tags + namespaceLabelsAsTags: {} + # env: environment + # : + + # datadog.tags -- List of static tags to attach to every metric, event and service check collected by this Agent. + ## Learn more about tagging: https://docs.datadoghq.com/tagging/ + tags: [] + # - ":" + # - ":" + + # datadog.checksCardinality -- Sets the tag cardinality for the checks run by the Agent. + ## https://docs.datadoghq.com/getting_started/tagging/assigning_tags/?tab=containerizedenvironments#environment-variables + checksCardinality: # low, orchestrator or high (not set by default to avoid overriding existing DD_CHECKS_TAG_CARDINALITY configurations, the default value in the Agent is low) + + # kubelet configuration + kubelet: + # datadog.kubelet.host -- Override kubelet IP + host: + valueFrom: + fieldRef: + fieldPath: status.hostIP + # datadog.kubelet.tlsVerify -- Toggle kubelet TLS verification + # @default -- true + tlsVerify: # false + # datadog.kubelet.hostCAPath -- Path (on host) where the Kubelet CA certificate is stored + # @default -- None (no mount from host) + hostCAPath: + # datadog.kubelet.agentCAPath -- Path (inside Agent containers) where the Kubelet CA certificate is stored + # @default -- /var/run/host-kubelet-ca.crt if hostCAPath else /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + agentCAPath: + + # datadog.expvarPort -- Specify the port to expose pprof and expvar to not interfer with the agentmetrics port from the cluster-agent, which defaults to 5000 + expvarPort: 6000 + + ## dogstatsd configuration + ## ref: https://docs.datadoghq.com/agent/kubernetes/dogstatsd/ + ## To emit custom metrics from your Kubernetes application, use DogStatsD. + dogstatsd: + # datadog.dogstatsd.port -- Override the Agent DogStatsD port + ## Note: Make sure your client is sending to the same UDP port. + port: 8125 + + # datadog.dogstatsd.originDetection -- Enable origin detection for container tagging + ## https://docs.datadoghq.com/developers/dogstatsd/unix_socket/#using-origin-detection-for-container-tagging + originDetection: false + + # datadog.dogstatsd.tags -- List of static tags to attach to every custom metric, event and service check collected by Dogstatsd. + ## Learn more about tagging: https://docs.datadoghq.com/tagging/ + tags: [] + # - ":" + # - ":" + + # datadog.dogstatsd.tagCardinality -- Sets the tag cardinality relative to the origin detection + ## https://docs.datadoghq.com/developers/dogstatsd/unix_socket/#using-origin-detection-for-container-tagging + tagCardinality: low + + # datadog.dogstatsd.useSocketVolume -- Enable dogstatsd over Unix Domain Socket with an HostVolume + ## ref: https://docs.datadoghq.com/developers/dogstatsd/unix_socket/ + useSocketVolume: true + + # datadog.dogstatsd.socketPath -- Path to the DogStatsD socket + socketPath: /var/run/datadog/dsd.socket + + # datadog.dogstatsd.hostSocketPath -- Host path to the DogStatsD socket + hostSocketPath: /var/run/datadog/ + + # datadog.dogstatsd.useHostPort -- Sets the hostPort to the same value of the container port + ## Needs to be used for sending custom metrics. + ## The ports need to be available on all hosts. + ## + ## WARNING: Make sure that hosts using this are properly firewalled otherwise + ## metrics and traces are accepted from any host able to connect to this host. + useHostPort: true + + # datadog.dogstatsd.useHostPID -- Run the agent in the host's PID namespace + ## This is required for Dogstatsd origin detection to work. + ## See https://docs.datadoghq.com/developers/dogstatsd/unix_socket/ + useHostPID: false + + # datadog.dogstatsd.nonLocalTraffic -- Enable this to make each node accept non-local statsd traffic (from outside of the pod) + ## ref: https://github.com/DataDog/docker-dd-agent#environment-variables + nonLocalTraffic: true + + # datadog.collectEvents -- Enables this to start event collection from the kubernetes API + ## ref: https://docs.datadoghq.com/agent/kubernetes/#event-collection + collectEvents: true + + # datadog.leaderElection -- Enables leader election mechanism for event collection + leaderElection: true + + # datadog.leaderLeaseDuration -- Set the lease time for leader election in second + leaderLeaseDuration: # 60 + + ## Enable logs agent and provide custom configs + logs: + # datadog.logs.enabled -- Enables this to activate Datadog Agent log collection + ## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup + enabled: false + + # datadog.logs.containerCollectAll -- Enable this to allow log collection for all containers + ## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup + containerCollectAll: false + + # datadog.logs.containerCollectUsingFiles -- Collect logs from files in /var/log/pods instead of using container runtime API + ## It's usually the most efficient way of collecting logs. + ## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup + containerCollectUsingFiles: true + + # datadog.logs.autoMultiLineDetection -- Allows the Agent to detect common multi-line patterns automatically. + ## ref: https://docs.datadoghq.com/agent/logs/advanced_log_collection/?tab=configurationfile#automatic-multi-line-aggregation + autoMultiLineDetection: false + + ## Enable apm agent and provide custom configs + apm: + # datadog.apm.socketEnabled -- Enable APM over Socket (Unix Socket or windows named pipe) + ## ref: https://docs.datadoghq.com/agent/kubernetes/apm/ + socketEnabled: true + + # datadog.apm.portEnabled -- Enable APM over TCP communication (port 8126 by default) + ## ref: https://docs.datadoghq.com/agent/kubernetes/apm/ + portEnabled: false + + # datadog.apm.enabled -- Enable this to enable APM and tracing, on port 8126 + # DEPRECATED. Use datadog.apm.portEnabled instead + ## ref: https://github.com/DataDog/docker-dd-agent#tracing-from-the-host + enabled: false + + # datadog.apm.port -- Override the trace Agent port + ## Note: Make sure your client is sending to the same UDP port. + port: 8126 + + # datadog.apm.useSocketVolume -- Enable APM over Unix Domain Socket + # DEPRECATED. Use datadog.apm.socketEnabled instead + ## ref: https://docs.datadoghq.com/agent/kubernetes/apm/ + useSocketVolume: false + + # datadog.apm.socketPath -- Path to the trace-agent socket + socketPath: /var/run/datadog/apm.socket + + # datadog.apm.hostSocketPath -- Host path to the trace-agent socket + hostSocketPath: /var/run/datadog/ + + # datadog.envFrom -- Set environment variables for all Agents directly from configMaps and/or secrets + ## envFrom to pass configmaps or secrets as environment + envFrom: [] + # - configMapRef: + # name: + # - secretRef: + # name: + + # datadog.env -- Set environment variables for all Agents + ## The Datadog Agent supports many environment variables. + ## ref: https://docs.datadoghq.com/agent/docker/?tab=standard#environment-variables + env: [] + # - name: + # value: + + # datadog.confd -- Provide additional check configurations (static and Autodiscovery) + ## Each key becomes a file in /conf.d + ## ref: https://github.com/DataDog/datadog-agent/tree/main/Dockerfiles/agent#optional-volumes + ## ref: https://docs.datadoghq.com/agent/autodiscovery/ + confd: + disk.yaml: |- + init_config: + + instances: + - use_mount: false + file_system_blacklist: + - autofs$ + mount_point_blacklist: + - /proc/sys/fs/binfmt_misc + - /host/proc/sys/fs/binfmt_misc + + # datadog.checksd -- Provide additional custom checks as python code + ## Each key becomes a file in /checks.d + ## ref: https://github.com/DataDog/datadog-agent/tree/main/Dockerfiles/agent#optional-volumes + checksd: {} + # service.py: |- + + # datadog.dockerSocketPath -- Path to the docker socket + dockerSocketPath: # /var/run/docker.sock + + # datadog.criSocketPath -- Path to the container runtime socket (if different from Docker) + criSocketPath: # /var/run/containerd/containerd.sock + + # Configure how the agent interact with the host's container runtime + containerRuntimeSupport: + # datadog.containerRuntimeSupport.enabled -- Set this to false to disable agent access to container runtime. + enabled: true + + ## Enable process agent and provide custom configs + processAgent: + # datadog.processAgent.enabled -- Set this to true to enable live process monitoring agent + ## Note: /etc/passwd is automatically mounted to allow username resolution. + ## ref: https://docs.datadoghq.com/graphing/infrastructure/process/#kubernetes-daemonset + enabled: true + + # datadog.processAgent.processCollection -- Set this to true to enable process collection in process monitoring agent + ## Requires processAgent.enabled to be set to true to have any effect + processCollection: true + + # datadog.processAgent.stripProcessArguments -- Set this to scrub all arguments from collected processes + ## Requires processAgent.enabled and processAgent.processCollection to be set to true to have any effect + ## ref: https://docs.datadoghq.com/infrastructure/process/?tab=linuxwindows#process-arguments-scrubbing + stripProcessArguments: false + + # datadog.processAgent.processDiscovery -- Enables or disables autodiscovery of integrations + processDiscovery: false + + ## Enable systemProbe agent and provide custom configs + systemProbe: + # datadog.systemProbe.debugPort -- Specify the port to expose pprof and expvar for system-probe agent + debugPort: 0 + + # datadog.systemProbe.enableConntrack -- Enable the system-probe agent to connect to the netlink/conntrack subsystem to add NAT information to connection data + ## Ref: http://conntrack-tools.netfilter.org/ + enableConntrack: true + + # datadog.systemProbe.seccomp -- Apply an ad-hoc seccomp profile to the system-probe agent to restrict its privileges + ## Note that this will break `kubectl exec … -c system-probe -- /bin/bash` + seccomp: localhost/system-probe + + # datadog.systemProbe.seccompRoot -- Specify the seccomp profile root directory + seccompRoot: /var/lib/kubelet/seccomp + + # datadog.systemProbe.bpfDebug -- Enable logging for kernel debug + bpfDebug: false + + # datadog.systemProbe.apparmor -- Specify a apparmor profile for system-probe + apparmor: unconfined + + # datadog.systemProbe.enableTCPQueueLength -- Enable the TCP queue length eBPF-based check + enableTCPQueueLength: false + + # datadog.systemProbe.enableOOMKill -- Enable the OOM kill eBPF-based check + enableOOMKill: false + + # datadog.systemProbe.enableRuntimeCompiler -- Enable the runtime compiler for eBPF probes + enableRuntimeCompiler: false + + # datadog.systemProbe.mountPackageManagementDirs -- Enables mounting of specific package management directories when runtime compilation is enabled + mountPackageManagementDirs: [] + ## For runtime compilation to be able to download kernel headers, the host's package management folders + ## must be mounted to the /host directory. For example, for Ubuntu & Debian the following mount would be necessary: + # - name: "apt-config-dir" + # hostPath: /etc/apt + # mountPath: /host/etc/apt + ## If this list is empty, then all necessary package management directories (for all supported OSs) will be mounted. + + # datadog.systemProbe.osReleasePath -- Specify the path to your os-release file if you don't want to attempt mounting all `/etc/*-release` file by default + osReleasePath: + + # datadog.systemProbe.runtimeCompilationAssetDir -- Specify a directory for runtime compilation assets to live in + runtimeCompilationAssetDir: /var/tmp/datadog-agent/system-probe + + # datadog.systemProbe.collectDNSStats -- Enable DNS stat collection + collectDNSStats: true + + # datadog.systemProbe.maxTrackedConnections -- the maximum number of tracked connections + maxTrackedConnections: 131072 + + # datadog.systemProbe.conntrackMaxStateSize -- the maximum size of the userspace conntrack cache + conntrackMaxStateSize: 131072 # 2 * maxTrackedConnections by default, per https://github.com/DataDog/datadog-agent/blob/d1c5de31e1bba72dfac459aed5ff9562c3fdcc20/pkg/process/config/config.go#L229 + + # datadog.systemProbe.conntrackInitTimeout -- the time to wait for conntrack to initialize before failing + conntrackInitTimeout: 10s + + orchestratorExplorer: + # datadog.orchestratorExplorer.enabled -- Set this to false to disable the orchestrator explorer + ## This requires processAgent.enabled and clusterAgent.enabled to be set to true + ## ref: TODO - add doc link + enabled: true + + # datadog.orchestratorExplorer.container_scrubbing -- Enable the scrubbing of containers in the kubernetes resource YAML for sensitive information + ## The container scrubbing is taking significant resources during data collection. + ## If you notice that the cluster-agent uses too much CPU in larger clusters + ## turning this option off will improve the situation. + container_scrubbing: + enabled: true + + helmCheck: + # datadog.helmCheck.enabled -- Set this to true to enable the Helm check (Requires Agent 7.35.0+ and Cluster Agent 1.19.0+) + # This requires clusterAgent.enabled to be set to true + enabled: false + + # datadog.helmCheck.collectEvents -- Set this to true to enable event collection in the Helm Check (Requires Agent 7.36.0+ and Cluster Agent 1.20.0+) + # This requires datadog.HelmCheck.enabled to be set to true + collectEvents: false + + networkMonitoring: + # datadog.networkMonitoring.enabled -- Enable network performance monitoring + enabled: false + + ## Universal Service Monitoring is currently in private beta. + ## See https://www.datadoghq.com/blog/universal-service-monitoring-datadog/ for more details and private beta signup. + serviceMonitoring: + # datadog.serviceMonitoring.enabled -- Enable Universal Service Monitoring + enabled: false + + ## Enable security agent and provide custom configs + securityAgent: + compliance: + # datadog.securityAgent.compliance.enabled -- Set to true to enable Cloud Security Posture Management (CSPM) + enabled: false + + # datadog.securityAgent.compliance.configMap -- Contains CSPM compliance benchmarks that will be used + configMap: + + # datadog.securityAgent.compliance.checkInterval -- Compliance check run interval + checkInterval: 20m + + runtime: + # datadog.securityAgent.runtime.enabled -- Set to true to enable Cloud Workload Security (CWS) + enabled: false + + policies: + # datadog.securityAgent.runtime.policies.configMap -- Contains CWS policies that will be used + configMap: + + syscallMonitor: + # datadog.securityAgent.runtime.syscallMonitor.enabled -- Set to true to enable the Syscall monitoring (recommended for troubleshooting only) + enabled: false + + ## Manage NetworkPolicy + networkPolicy: + # datadog.networkPolicy.create -- If true, create NetworkPolicy for all the components + create: false + + # datadog.networkPolicy.flavor -- Flavor of the network policy to use. + # Can be: + # * kubernetes for networking.k8s.io/v1/NetworkPolicy + # * cilium for cilium.io/v2/CiliumNetworkPolicy + flavor: kubernetes + + cilium: + # datadog.networkPolicy.cilium.dnsSelector -- Cilium selector of the DNS server entity + # @default -- kube-dns in namespace kube-system + dnsSelector: + toEndpoints: + - matchLabels: + "k8s:io.kubernetes.pod.namespace": kube-system + "k8s:k8s-app": kube-dns + + ## Configure prometheus scraping autodiscovery + ## ref: https://docs.datadoghq.com/agent/kubernetes/prometheus/ + prometheusScrape: + # datadog.prometheusScrape.enabled -- Enable autodiscovering pods and services exposing prometheus metrics. + enabled: false + # datadog.prometheusScrape.serviceEndpoints -- Enable generating dedicated checks for service endpoints. + serviceEndpoints: false + # datadog.prometheusScrape.additionalConfigs -- Allows adding advanced openmetrics check configurations with custom discovery rules. (Requires Agent version 7.27+) + additionalConfigs: + [] + # - + # autodiscovery: + # kubernetes_annotations: + # include: + # custom_include_label: 'true' + # exclude: + # custom_exclude_label: 'true' + # kubernetes_container_names: + # - my-app + # configurations: + # - send_distribution_buckets: true + # timeout: 5 + + # datadog.ignoreAutoConfig -- List of integration to ignore auto_conf.yaml. + ## ref: https://docs.datadoghq.com/agent/faq/auto_conf/ + ignoreAutoConfig: [] + # - redisdb + # - kubernetes_state + + # datadog.containerExclude -- Exclude containers from the Agent + # Autodiscovery, as a space-sepatered list + ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#exclude-containers + containerExclude: # "image:datadog/agent" + + # datadog.containerInclude -- Include containers in the Agent Autodiscovery, + # as a space-separated list. If a container matches an include rule, it’s + # always included in the Autodiscovery + ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#include-containers + containerInclude: + + # datadog.containerExcludeLogs -- Exclude logs from the Agent Autodiscovery, + # as a space-separated list + containerExcludeLogs: + + # datadog.containerIncludeLogs -- Include logs in the Agent Autodiscovery, as + # a space-separated list + containerIncludeLogs: + + # datadog.containerExcludeMetrics -- Exclude metrics from the Agent + # Autodiscovery, as a space-separated list + containerExcludeMetrics: + + # datadog.containerIncludeMetrics -- Include metrics in the Agent + # Autodiscovery, as a space-separated list + containerIncludeMetrics: + + # datadog.excludePauseContainer -- Exclude pause containers from the Agent + # Autodiscovery. + ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#pause-containers + excludePauseContainer: true + + ## This is the Datadog Cluster Agent implementation that handles cluster-wide + ## metrics more cleanly, separates concerns for better rbac, and implements + ## the external metrics API so you can autoscale HPAs based on datadog metrics + ## ref: https://docs.datadoghq.com/agent/kubernetes/cluster/ + clusterAgent: + # clusterAgent.enabled -- Set this to false to disable Datadog Cluster Agent + enabled: true + + # clusterAgent.shareProcessNamespace -- Set the process namespace sharing on the Datadog Cluster Agent + shareProcessNamespace: false + + ## Define the Datadog Cluster-Agent image to work with + image: + # clusterAgent.image.name -- Cluster Agent image name to use (relative to `registry`) + name: cluster-agent + + # clusterAgent.image.tag -- Cluster Agent image tag to use + tag: 1.18.0 + + # clusterAgent.image.repository -- Override default registry + image.name for Cluster Agent + repository: + + # clusterAgent.image.pullPolicy -- Cluster Agent image pullPolicy + pullPolicy: IfNotPresent + + # clusterAgent.image.pullSecrets -- Cluster Agent repository pullSecret (ex: specify docker registry credentials) + ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + pullSecrets: [] + # - name: "" + + # clusterAgent.securityContext -- Allows you to overwrite the default PodSecurityContext on the cluster-agent pods. + securityContext: {} + + containers: + clusterAgent: + # clusterAgent.containers.clusterAgent.securityContext -- Specify securityContext on the cluster-agent container. + securityContext: {} + + # clusterAgent.command -- Command to run in the Cluster Agent container as entrypoint + command: [] + + # clusterAgent.token -- Cluster Agent token is a preshared key between node agents and cluster agent (autogenerated if empty, needs to be at least 32 characters a-zA-z) + token: "" + + # clusterAgent.tokenExistingSecret -- Existing secret name to use for Cluster Agent token + tokenExistingSecret: "" + + # clusterAgent.replicas -- Specify the of cluster agent replicas, if > 1 it allow the cluster agent to work in HA mode. + replicas: 1 + + ## Provide Cluster Agent Deployment pod(s) RBAC configuration + rbac: + # clusterAgent.rbac.create -- If true, create & use RBAC resources + create: true + + # clusterAgent.rbac.serviceAccountName -- Specify a preexisting ServiceAccount to use if clusterAgent.rbac.create is false + serviceAccountName: default + + # clusterAgent.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if clusterAgent.rbac.create is true + serviceAccountAnnotations: {} + + ## Provide Cluster Agent pod security configuration + podSecurity: + podSecurityPolicy: + # clusterAgent.podSecurity.podSecurityPolicy.create -- If true, create a PodSecurityPolicy resource for Cluster Agent pods + create: false + securityContextConstraints: + # clusterAgent.podSecurity.securityContextConstraints.create -- If true, create a SCC resource for Cluster Agent pods + create: false + + # Enable the metricsProvider to be able to scale based on metrics in Datadog + metricsProvider: + # clusterAgent.metricsProvider.enabled -- Set this to true to enable Metrics Provider + enabled: false + + # clusterAgent.metricsProvider.wpaController -- Enable informer and controller of the watermark pod autoscaler + ## NOTE: You need to install the `WatermarkPodAutoscaler` CRD before + wpaController: false + + # clusterAgent.metricsProvider.useDatadogMetrics -- Enable usage of DatadogMetric CRD to autoscale on arbitrary Datadog queries + ## NOTE: It will install DatadogMetrics CRD automatically (it may conflict with previous installations) + useDatadogMetrics: false + + # clusterAgent.metricsProvider.createReaderRbac -- Create `external-metrics-reader` RBAC automatically (to allow HPA to read data from Cluster Agent) + createReaderRbac: true + + # clusterAgent.metricsProvider.aggregator -- Define the aggregator the cluster agent will use to process the metrics. The options are (avg, min, max, sum) + aggregator: avg + + ## Configuration for the service for the cluster-agent metrics server + service: + # clusterAgent.metricsProvider.service.type -- Set type of cluster-agent metrics server service + type: ClusterIP + + # clusterAgent.metricsProvider.service.port -- Set port of cluster-agent metrics server service (Kubernetes >= 1.15) + port: 8443 + + # clusterAgent.metricsProvider.endpoint -- Override the external metrics provider endpoint. If not set, the cluster-agent defaults to `datadog.site` + endpoint: # https://api.datadoghq.com + + # clusterAgent.env -- Set environment variables specific to Cluster Agent + ## The Cluster-Agent supports many additional environment variables + ## ref: https://docs.datadoghq.com/agent/cluster_agent/commands/#cluster-agent-options + env: [] + + # clusterAgent.envFrom -- Set environment variables specific to Cluster Agent from configMaps and/or secrets + ## The Cluster-Agent supports many additional environment variables + ## ref: https://docs.datadoghq.com/agent/cluster_agent/commands/#cluster-agent-options + envFrom: [] + # - configMapRef: + # name: + # - secretRef: + # name: + + admissionController: + # clusterAgent.admissionController.enabled -- Enable the admissionController to be able to inject APM/Dogstatsd config and standard tags (env, service, version) automatically into your pods + enabled: false + + # clusterAgent.admissionController.mutateUnlabelled -- Enable injecting config without having the pod label 'admission.datadoghq.com/enabled="true"' + mutateUnlabelled: false + + # clusterAgent.confd -- Provide additional cluster check configurations. Each key will become a file in /conf.d. + ## ref: https://docs.datadoghq.com/agent/autodiscovery/ + confd: {} + # mysql.yaml: |- + # cluster_check: true + # instances: + # - host: + # port: 3306 + # username: datadog + # password: + + # clusterAgent.advancedConfd -- Provide additional cluster check configurations. Each key is an integration containing several config files. + ## ref: https://docs.datadoghq.com/agent/autodiscovery/ + advancedConfd: {} + # mysql.d: + # 1.yaml: |- + # cluster_check: true + # instances: + # - host: + # port: 3306 + # username: datadog + # password: + # 2.yaml: |- + # cluster_check: true + # instances: + # - host: + # port: 3306 + # username: datadog + # password: + + # clusterAgent.resources -- Datadog cluster-agent resource requests and limits. + resources: {} + # requests: + # cpu: 200m + # memory: 256Mi + # limits: + # cpu: 200m + # memory: 256Mi + + # clusterAgent.priorityClassName -- Name of the priorityClass to apply to the Cluster Agent + priorityClassName: # system-cluster-critical + + # clusterAgent.nodeSelector -- Allow the Cluster Agent Deployment to be scheduled on selected nodes + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + + # clusterAgent.tolerations -- Allow the Cluster Agent Deployment to schedule on tainted nodes ((requires Kubernetes >= 1.6)) + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + tolerations: [] + + # clusterAgent.affinity -- Allow the Cluster Agent Deployment to schedule using affinity rules + ## By default, Cluster Agent Deployment Pods are forced to run on different Nodes. + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + affinity: {} + + # clusterAgent.healthPort -- Port number to use in the Cluster Agent for the healthz endpoint + healthPort: 5556 + + # clusterAgent.livenessProbe -- Override default Cluster Agent liveness probe settings + # @default -- Every 15s / 6 KO / 1 OK + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + + # clusterAgent.readinessProbe -- Override default Cluster Agent readiness probe settings + # @default -- Every 15s / 6 KO / 1 OK + readinessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + + # clusterAgent.strategy -- Allow the Cluster Agent deployment to perform a rolling update on helm update + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + + # clusterAgent.deploymentAnnotations -- Annotations to add to the cluster-agents's deployment + deploymentAnnotations: {} + # key: "value" + + # clusterAgent.podAnnotations -- Annotations to add to the cluster-agents's pod(s) + podAnnotations: {} + # key: "value" + + # clusterAgent.useHostNetwork -- Bind ports on the hostNetwork + ## Useful for CNI networking where hostPort might + ## not be supported. The ports need to be available on all hosts. It can be + ## used for custom metrics instead of a service endpoint. + ## + ## WARNING: Make sure that hosts using this are properly firewalled otherwise + ## metrics and traces are accepted from any host able to connect to this host. + # + useHostNetwork: false + + # clusterAgent.dnsConfig -- Specify dns configuration options for datadog cluster agent containers e.g ndots + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + dnsConfig: {} + # options: + # - name: ndots + # value: "1" + + # clusterAgent.volumes -- Specify additional volumes to mount in the cluster-agent container + volumes: [] + # - hostPath: + # path: + # name: + + # clusterAgent.volumeMounts -- Specify additional volumes to mount in the cluster-agent container + volumeMounts: [] + # - name: + # mountPath: + # readOnly: true + + # clusterAgent.datadog_cluster_yaml -- Specify custom contents for the datadog cluster agent config (datadog-cluster.yaml) + datadog_cluster_yaml: {} + + # clusterAgent.createPodDisruptionBudget -- Create pod disruption budget for Cluster Agent deployments + createPodDisruptionBudget: false + + networkPolicy: + # clusterAgent.networkPolicy.create -- If true, create a NetworkPolicy for the cluster agent. + # DEPRECATED. Use datadog.networkPolicy.create instead + create: false + + # clusterAgent.additionalLabels -- Adds labels to the Cluster Agent deployment and pods + additionalLabels: + {} + # key: "value" + + ## This section lets you configure the agents deployed by this chart to connect to a Cluster Agent + ## deployed independently + existingClusterAgent: + # existingClusterAgent.join -- set this to true if you want the agents deployed by this chart to + # connect to a Cluster Agent deployed independently + join: false + + # existingClusterAgent.tokenSecretName -- Existing secret name to use for external Cluster Agent token + tokenSecretName: # + + # existingClusterAgent.serviceName -- Existing service name to use for reaching the external Cluster Agent + serviceName: # + + # existingClusterAgent.clusterchecksEnabled -- set this to false if you don’t want the agents to run the cluster checks of the joined external cluster agent + clusterchecksEnabled: true + + agents: + # agents.enabled -- You should keep Datadog DaemonSet enabled! + ## The exceptional case could be a situation when you need to run + ## single Datadog pod per every namespace, but you do not need to + ## re-create a DaemonSet for every non-default namespace install. + ## Note: StatsD and DogStatsD work over UDP, so you may not + ## get guaranteed delivery of the metrics in Datadog-per-namespace setup! + # + enabled: true + + # agents.shareProcessNamespace -- Set the process namespace sharing on the Datadog Daemonset + shareProcessNamespace: false + + ## Define the Datadog image to work with + image: + # agents.image.name -- Datadog Agent image name to use (relative to `registry`) + ## use "dogstatsd" for Standalone Datadog Agent DogStatsD 7 + name: agent + + # agents.image.tag -- Define the Agent version to use + tag: 7.34.0 + + # agents.image.tagSuffix -- Suffix to append to Agent tag + ## Ex: + ## jmx to enable jmx fetch collection + ## servercore to get Windows images based on servercore + tagSuffix: "" + + # agents.image.repository -- Override default registry + image.name for Agent + repository: + + # agents.image.doNotCheckTag -- Skip the version<>chart compatibility check + ## By default, the version passed in agents.image.tag is checked + ## for compatibility with the version of the chart. + ## This boolean permits to completely skip this check. + ## This is useful, for example, for custom tags that are not + ## respecting semantic versioning + doNotCheckTag: # false + + # agents.image.pullPolicy -- Datadog Agent image pull policy + pullPolicy: IfNotPresent + + # agents.image.pullSecrets -- Datadog Agent repository pullSecret (ex: specify docker registry credentials) + ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + pullSecrets: [] + # - name: "" + + ## Provide Daemonset RBAC configuration + rbac: + # agents.rbac.create -- If true, create & use RBAC resources + create: true + + # agents.rbac.serviceAccountName -- Specify a preexisting ServiceAccount to use if agents.rbac.create is false + serviceAccountName: default + + # agents.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if agents.rbac.create is true + serviceAccountAnnotations: {} + + ## Provide Daemonset PodSecurityPolicy configuration + podSecurity: + podSecurityPolicy: + # agents.podSecurity.podSecurityPolicy.create -- If true, create a PodSecurityPolicy resource for Agent pods + create: false + + securityContextConstraints: + # agents.podSecurity.securityContextConstraints.create -- If true, create a SecurityContextConstraints resource for Agent pods + create: false + + # agents.podSecurity.seLinuxContext -- Provide seLinuxContext configuration for PSP/SCC + # @default -- Must run as spc_t + seLinuxContext: + rule: MustRunAs + seLinuxOptions: + user: system_u + role: system_r + type: spc_t + level: s0 + + # agents.podSecurity.privileged -- If true, Allow to run privileged containers + privileged: false + + # agents.podSecurity.capabilities -- Allowed capabilities + ## capabilities must contain all agents.containers.*.securityContext.capabilities. + capabilities: + - SYS_ADMIN + - SYS_RESOURCE + - SYS_PTRACE + - NET_ADMIN + - NET_BROADCAST + - NET_RAW + - IPC_LOCK + - CHOWN + - AUDIT_CONTROL + - AUDIT_READ + + # agents.podSecurity.allowedUnsafeSysctls -- Allowed unsafe sysclts + allowedUnsafeSysctls: [] + + # agents.podSecurity.volumes -- Allowed volumes types + volumes: + - configMap + - downwardAPI + - emptyDir + - hostPath + - secret + + # agents.podSecurity.seccompProfiles -- Allowed seccomp profiles + seccompProfiles: + - "runtime/default" + - "localhost/system-probe" + + apparmor: + # agents.podSecurity.apparmor.enabled -- If true, enable apparmor enforcement + ## see: https://kubernetes.io/docs/tutorials/clusters/apparmor/ + enabled: true + + # agents.podSecurity.apparmorProfiles -- Allowed apparmor profiles + apparmorProfiles: + - "runtime/default" + - "unconfined" + + # agents.podSecurity.defaultApparmor -- Default AppArmor profile for all containers but system-probe + defaultApparmor: runtime/default + + containers: + agent: + # agents.containers.agent.env -- Additional environment variables for the agent container + env: [] + + # agents.containers.agent.envFrom -- Set environment variables specific to agent container from configMaps and/or secrets + envFrom: [] + # - configMapRef: + # name: + # - secretRef: + # name: + + # agents.containers.agent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off + ## If not set, fall back to the value of datadog.logLevel. + logLevel: # INFO + + # agents.containers.agent.resources -- Resource requests and limits for the agent container. + resources: {} + # requests: + # cpu: 200m + # memory: 256Mi + # limits: + # cpu: 200m + # memory: 256Mi + + # agents.containers.agent.healthPort -- Port number to use in the node agent for the healthz endpoint + healthPort: 5555 + + # agents.containers.agent.livenessProbe -- Override default agent liveness probe settings + # @default -- Every 15s / 6 KO / 1 OK + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + + # agents.containers.agent.readinessProbe -- Override default agent readiness probe settings + # @default -- Every 15s / 6 KO / 1 OK + readinessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + + # agents.containers.agent.securityContext -- Allows you to overwrite the default container SecurityContext for the agent container. + securityContext: {} + + # agents.containers.agent.ports -- Allows to specify extra ports (hostPorts for instance) for this container + ports: [] + + processAgent: + # agents.containers.processAgent.env -- Additional environment variables for the process-agent container + env: [] + + # agents.containers.processAgent.envFrom -- Set environment variables specific to process-agent from configMaps and/or secrets + envFrom: [] + # - configMapRef: + # name: + # - secretRef: + # name: + + # agents.containers.processAgent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off + ## If not set, fall back to the value of datadog.logLevel. + logLevel: # INFO + + # agents.containers.processAgent.resources -- Resource requests and limits for the process-agent container + resources: {} + # requests: + # cpu: 100m + # memory: 200Mi + # limits: + # cpu: 100m + # memory: 200Mi + + # agents.containers.processAgent.securityContext -- Allows you to overwrite the default container SecurityContext for the process-agent container. + securityContext: {} + + # agents.containers.processAgent.ports -- Allows to specify extra ports (hostPorts for instance) for this container + ports: [] + + traceAgent: + # agents.containers.traceAgent.env -- Additional environment variables for the trace-agent container + env: + + # agents.containers.traceAgent.envFrom -- Set environment variables specific to trace-agent from configMaps and/or secrets + envFrom: [] + # - configMapRef: + # name: + # - secretRef: + # name: + + # agents.containers.traceAgent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off + logLevel: # INFO + + # agents.containers.traceAgent.resources -- Resource requests and limits for the trace-agent container + resources: {} + # requests: + # cpu: 100m + # memory: 200Mi + # limits: + # cpu: 100m + # memory: 200Mi + + # agents.containers.traceAgent.livenessProbe -- Override default agent liveness probe settings + # @default -- Every 15s + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 5 + + # agents.containers.traceAgent.securityContext -- Allows you to overwrite the default container SecurityContext for the trace-agent container. + securityContext: {} + + # agents.containers.traceAgent.ports -- Allows to specify extra ports (hostPorts for instance) for this container + ports: [] + + systemProbe: + # agents.containers.systemProbe.env -- Additional environment variables for the system-probe container + env: [] + + # agents.containers.systemProbe.envFrom -- Set environment variables specific to system-probe from configMaps and/or secrets + envFrom: [] + # - configMapRef: + # name: + # - secretRef: + # name: + + # agents.containers.systemProbe.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off. + ## If not set, fall back to the value of datadog.logLevel. + logLevel: # INFO + + # agents.containers.systemProbe.resources -- Resource requests and limits for the system-probe container + resources: {} + # requests: + # cpu: 100m + # memory: 200Mi + # limits: + # cpu: 100m + # memory: 200Mi + + # agents.containers.systemProbe.securityContext -- Allows you to overwrite the default container SecurityContext for the system-probe container. + ## agents.podSecurity.capabilities must reflect the changed made in securityContext.capabilities. + securityContext: + privileged: false + capabilities: + add: + [ + "SYS_ADMIN", + "SYS_RESOURCE", + "SYS_PTRACE", + "NET_ADMIN", + "NET_BROADCAST", + "NET_RAW", + "IPC_LOCK", + "CHOWN", + ] + + # agents.containers.systemProbe.ports -- Allows to specify extra ports (hostPorts for instance) for this container + ports: [] + + securityAgent: + # agents.containers.securityAgent.env -- Additional environment variables for the security-agent container + env: + + # agents.containers.securityAgent.envFrom -- Set environment variables specific to security-agent from configMaps and/or secrets + envFrom: [] + # - configMapRef: + # name: + # - secretRef: + # name: + + # agents.containers.securityAgent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off + ## If not set, fall back to the value of datadog.logLevel. + logLevel: # INFO + + # agents.containers.securityAgent.resources -- Resource requests and limits for the security-agent container + resources: {} + # requests: + # cpu: 100m + # memory: 200Mi + # limits: + # cpu: 100m + # memory: 200Mi + + # agents.containers.securityAgent.ports -- Allows to specify extra ports (hostPorts for instance) for this container + ports: [] + + initContainers: + # agents.containers.initContainers.resources -- Resource requests and limits for the init containers + resources: {} + # requests: + # cpu: 100m + # memory: 200Mi + # limits: + # cpu: 100m + # memory: 200Mi + + # agents.volumes -- Specify additional volumes to mount in the dd-agent container + volumes: [] + # - hostPath: + # path: + # name: + + # agents.volumeMounts -- Specify additional volumes to mount in all containers of the agent pod + volumeMounts: [] + # - name: + # mountPath: + # readOnly: true + + # agents.useHostNetwork -- Bind ports on the hostNetwork + ## Useful for CNI networking where hostPort might + ## not be supported. The ports need to be available on all hosts. It Can be + ## used for custom metrics instead of a service endpoint. + ## + ## WARNING: Make sure that hosts using this are properly firewalled otherwise + ## metrics and traces are accepted from any host able to connect to this host. + useHostNetwork: false + + # agents.dnsConfig -- specify dns configuration options for datadog cluster agent containers e.g ndots + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + dnsConfig: {} + # options: + # - name: ndots + # value: "1" + + # agents.daemonsetAnnotations -- Annotations to add to the DaemonSet + daemonsetAnnotations: {} + # key: "value" + + # agents.podAnnotations -- Annotations to add to the DaemonSet's Pods + podAnnotations: {} + # key: "value" + + # agents.tolerations -- Allow the DaemonSet to schedule on tainted nodes (requires Kubernetes >= 1.6) + tolerations: [] + + # agents.nodeSelector -- Allow the DaemonSet to schedule on selected nodes + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + + # agents.affinity -- Allow the DaemonSet to schedule using affinity rules + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + affinity: {} + + # agents.updateStrategy -- Allow the DaemonSet to perform a rolling update on helm update + ## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: "10%" + + # agents.priorityClassCreate -- Creates a priorityClass for the Datadog Agent's Daemonset pods. + priorityClassCreate: false + + # agents.priorityClassName -- Sets PriorityClassName if defined + priorityClassName: + + # agents.priorityClassValue -- Value used to specify the priority of the scheduling of Datadog Agent's Daemonset pods. + ## The PriorityClass uses PreemptLowerPriority. + priorityClassValue: 1000000000 + + # agents.podLabels -- Sets podLabels if defined + # Note: These labels are also used as label selectors so they are immutable. + podLabels: {} + + # agents.additionalLabels -- Adds labels to the Agent daemonset and pods + additionalLabels: + {} + # key: "value" + + # agents.useConfigMap -- Configures a configmap to provide the agent configuration. Use this in combination with the `agents.customAgentConfig` parameter. + useConfigMap: # false + + # agents.customAgentConfig -- Specify custom contents for the datadog agent config (datadog.yaml) + ## ref: https://docs.datadoghq.com/agent/guide/agent-configuration-files/?tab=agentv6 + ## ref: https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml + ## Note the `agents.useConfigMap` needs to be set to `true` for this parameter to be taken into account. + customAgentConfig: {} + # # Autodiscovery for Kubernetes + # listeners: + # - name: kubelet + # config_providers: + # - name: kubelet + # polling: true + # # needed to support legacy docker label config templates + # - name: docker + # polling: true + # + # # Enable java cgroup handling. Only one of those options should be enabled, + # # depending on the agent version you are using along that chart. + # + # # agent version < 6.15 + # # jmx_use_cgroup_memory_limit: true + # + # # agent version >= 6.15 + # # jmx_use_container_support: true + + networkPolicy: + # agents.networkPolicy.create -- If true, create a NetworkPolicy for the agents. + # DEPRECATED. Use datadog.networkPolicy.create instead + create: false + + localService: + # agents.localService.overrideName -- Name of the internal traffic service to target the agent running on the local node + overrideName: "" + + # agents.localService.forceLocalServiceEnabled -- Force the creation of the internal traffic policy service to target the agent running on the local node. + # By default, the internal traffic service is created only on Kubernetes 1.22+ where the feature became beta and enabled by default. + # This option allows to force the creation of the internal traffic service on kubernetes 1.21 where the feature was alpha and required a feature gate to be explicitly enabled. + forceLocalServiceEnabled: false + + clusterChecksRunner: + # clusterChecksRunner.enabled -- If true, deploys agent dedicated for running the Cluster Checks instead of running in the Daemonset's agents. + ## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/ + enabled: false + + ## Define the Datadog image to work with. + image: + # clusterChecksRunner.image.name -- Datadog Agent image name to use (relative to `registry`) + name: agent + + # clusterChecksRunner.image.tag -- Define the Agent version to use + tag: 7.34.0 + + # clusterChecksRunner.image.tagSuffix -- Suffix to append to Agent tag + ## Ex: + ## jmx to enable jmx fetch collection + ## servercore to get Windows images based on servercore + tagSuffix: "" + + # clusterChecksRunner.image.repository -- Override default registry + image.name for Cluster Check Runners + repository: + + # clusterChecksRunner.image.pullPolicy -- Datadog Agent image pull policy + pullPolicy: IfNotPresent + + # clusterChecksRunner.image.pullSecrets -- Datadog Agent repository pullSecret (ex: specify docker registry credentials) + ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + pullSecrets: [] + # - name: "" + + # clusterChecksRunner.createPodDisruptionBudget -- Create the pod disruption budget to apply to the cluster checks agents + createPodDisruptionBudget: false + + # Provide Cluster Checks Deployment pods RBAC configuration + rbac: + # clusterChecksRunner.rbac.create -- If true, create & use RBAC resources + create: true + + # clusterChecksRunner.rbac.dedicated -- If true, use a dedicated RBAC resource for the cluster checks agent(s) + dedicated: false + + # clusterChecksRunner.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if clusterChecksRunner.rbac.dedicated is true + serviceAccountAnnotations: {} + + # clusterChecksRunner.rbac.serviceAccountName -- Specify a preexisting ServiceAccount to use if clusterChecksRunner.rbac.create is false + serviceAccountName: default + + # clusterChecksRunner.replicas -- Number of Cluster Checks Runner instances + ## If you want to deploy the clusterChecks agent in HA, keep at least clusterChecksRunner.replicas set to 2. + ## And increase the clusterChecksRunner.replicas according to the number of Cluster Checks. + replicas: 2 + + # clusterChecksRunner.resources -- Datadog clusterchecks-agent resource requests and limits. + resources: {} + # requests: + # cpu: 200m + # memory: 500Mi + # limits: + # cpu: 200m + # memory: 500Mi + + # clusterChecksRunner.affinity -- Allow the ClusterChecks Deployment to schedule using affinity rules. + ## By default, ClusterChecks Deployment Pods are preferred to run on different Nodes. + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + affinity: {} + + # clusterChecksRunner.strategy -- Allow the ClusterChecks deployment to perform a rolling update on helm update + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + + # clusterChecksRunner.dnsConfig -- specify dns configuration options for datadog cluster agent containers e.g ndots + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + dnsConfig: {} + # options: + # - name: ndots + # value: "1" + + # clusterChecksRunner.priorityClassName -- Name of the priorityClass to apply to the Cluster checks runners + priorityClassName: # system-cluster-critical + + # clusterChecksRunner.nodeSelector -- Allow the ClusterChecks Deployment to schedule on selected nodes + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + + # clusterChecksRunner.tolerations -- Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + # + tolerations: [] + + # clusterChecksRunner.healthPort -- Port number to use in the Cluster Checks Runner for the healthz endpoint + healthPort: 5557 + + # clusterChecksRunner.livenessProbe -- Override default agent liveness probe settings + # @default -- Every 15s / 6 KO / 1 OK + ## In case of issues with the probe, you can disable it with the + ## following values, to allow easier investigating: + # + # livenessProbe: + # exec: + # command: ["/bin/true"] + # + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + + # clusterChecksRunner.readinessProbe -- Override default agent readiness probe settings + # @default -- Every 15s / 6 KO / 1 OK + ## In case of issues with the probe, you can disable it with the + ## following values, to allow easier investigating: + # + # readinessProbe: + # exec: + # command: ["/bin/true"] + # + readinessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + + # clusterChecksRunner.deploymentAnnotations -- Annotations to add to the cluster-checks-runner's Deployment + deploymentAnnotations: {} + # key: "value" + + # clusterChecksRunner.podAnnotations -- Annotations to add to the cluster-checks-runner's pod(s) + podAnnotations: {} + # key: "value" + + # clusterChecksRunner.env -- Environment variables specific to Cluster Checks Runner + ## ref: https://github.com/DataDog/datadog-agent/tree/main/Dockerfiles/agent#environment-variables + env: [] + # - name: + # value: + + # clusterChecksRunner.envFrom -- Set environment variables specific to Cluster Checks Runner from configMaps and/or secrets + ## envFrom to pass configmaps or secrets as environment + ## ref: https://github.com/DataDog/datadog-agent/tree/main/Dockerfiles/agent#environment-variables + envFrom: [] + # - configMapRef: + # name: + # - secretRef: + # name: + + # clusterChecksRunner.volumes -- Specify additional volumes to mount in the cluster checks container + volumes: [] + # - hostPath: + # path: + # name: + + # clusterChecksRunner.volumeMounts -- Specify additional volumes to mount in the cluster checks container + volumeMounts: [] + # - name: + # mountPath: + # readOnly: true + + networkPolicy: + # clusterChecksRunner.networkPolicy.create -- If true, create a NetworkPolicy for the cluster checks runners. + # DEPRECATED. Use datadog.networkPolicy.create instead + create: false + + # clusterChecksRunner.additionalLabels -- Adds labels to the cluster checks runner deployment and pods + additionalLabels: + {} + # key: "value" + + # clusterChecksRunner.securityContext -- Allows you to overwrite the default PodSecurityContext on the clusterchecks pods. + securityContext: {} + + # clusterChecksRunner.ports -- Allows to specify extra ports (hostPorts for instance) for this container + ports: [] + + datadog-crds: + crds: + # datadog-crds.crds.datadogMetrics -- Set to true to deploy the DatadogMetrics CRD + datadogMetrics: true + + kube-state-metrics: + rbac: + # kube-state-metrics.rbac.create -- If true, create & use RBAC resources + create: true + + serviceAccount: + # kube-state-metrics.serviceAccount.create -- If true, create ServiceAccount, require rbac kube-state-metrics.rbac.create true + create: true + + # kube-state-metrics.serviceAccount.name -- The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + + # kube-state-metrics.resources -- Resource requests and limits for the kube-state-metrics container. + resources: {} + # requests: + # cpu: 200m + # memory: 256Mi + # limits: + # cpu: 200m + # memory: 256Mi + + # kube-state-metrics.nodeSelector -- Node selector for KSM. KSM only supports Linux. + nodeSelector: + kubernetes.io/os: linux + + # # kube-state-metrics.image -- Override default image information for the kube-state-metrics container. + # image: + # # kube-state-metrics.repository -- Override default image registry for the kube-state-metrics container. + # repository: k8s.gcr.io/kube-state-metrics/kube-state-metrics + # # kube-state-metrics.tag -- Override default image tag for the kube-state-metrics container. + # tag: v1.9.8 + # # kube-state-metrics.pullPolicy -- Override default image pullPolicy for the kube-state-metrics container. + # pullPolicy: IfNotPresent + + providers: + gke: + # providers.gke.autopilot -- Enables Datadog Agent deployment on GKE Autopilot + autopilot: false + + eks: + ec2: + # providers.eks.ec2.useHostnameFromFile -- Use hostname from EC2 filesystem instead of fetching from metadata endpoint. + ## When deploying to EC2-backed EKS infrastructure, there are situations where the + ## IMDS metadata endpoint is not accesible to containers. This flag mounts the host's + ## `/var/lib/cloud/data/instance-id` and uses that for Agent's hostname instead. + useHostnameFromFile: false diff --git a/system/descheduler/Chart.yaml b/system/descheduler/Chart.yaml index 866417de25..e2c8eb0a40 100644 --- a/system/descheduler/Chart.yaml +++ b/system/descheduler/Chart.yaml @@ -3,5 +3,5 @@ name: descheduler version: 0.0.0 dependencies: - name: descheduler - version: 0.23.2 + version: 0.25.2 repository: https://kubernetes-sigs.github.io/descheduler/ diff --git a/system/external-dns/Chart.yaml b/system/external-dns/Chart.yaml index af442b2831..09b460be76 100644 --- a/system/external-dns/Chart.yaml +++ b/system/external-dns/Chart.yaml @@ -3,5 +3,5 @@ name: external-dns version: 0.0.0 dependencies: - name: external-dns - version: 1.7.1 + version: 1.11.0 repository: https://kubernetes-sigs.github.io/external-dns/ diff --git a/system/ingress-nginx/Chart.yaml b/system/ingress-nginx/Chart.yaml index 6f4911d9e8..f847d4921e 100644 --- a/system/ingress-nginx/Chart.yaml +++ b/system/ingress-nginx/Chart.yaml @@ -3,5 +3,5 @@ name: ingress-nginx version: 0.0.0 dependencies: - name: ingress-nginx - version: 4.0.18 + version: 4.3.0 repository: https://kubernetes.github.io/ingress-nginx diff --git a/system/ingress-nginx/values.yaml b/system/ingress-nginx/values.yaml index 75be6ef188..f11ead332b 100644 --- a/system/ingress-nginx/values.yaml +++ b/system/ingress-nginx/values.yaml @@ -1,5 +1,7 @@ ingress-nginx: controller: + config: + #whitelist-source-range: 213.34.1.38/32, 37.97.253.87/32, 192.168.1.0/24, 10.42.0.0/18 admissionWebhooks: timeoutSeconds: 30 metrics: diff --git a/system/ingress-traefik/Chart.yaml b/system/ingress-traefik/Chart.yaml new file mode 100644 index 0000000000..ce2f5f1030 --- /dev/null +++ b/system/ingress-traefik/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: ingress-traefik +version: 0.0.0 +dependencies: + - name: traefik + version: 17.0.5 + repository: https://helm.traefik.io/traefik diff --git a/system/ingress-traefik/values.yaml b/system/ingress-traefik/values.yaml new file mode 100644 index 0000000000..3a897b3d3e --- /dev/null +++ b/system/ingress-traefik/values.yaml @@ -0,0 +1,545 @@ +ingress-traefik: + # Default values for Traefik + image: + name: traefik + # defaults to appVersion + tag: "" + pullPolicy: IfNotPresent + + # + # Configure the deployment + # + deployment: + enabled: true + # Can be either Deployment or DaemonSet + kind: Deployment + # Number of pods of the deployment (only applies when kind == Deployment) + replicas: 2 + # Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10) + # revisionHistoryLimit: 1 + # Amount of time (in seconds) before Kubernetes will send the SIGKILL signal if Traefik does not shut down + terminationGracePeriodSeconds: 60 + # The minimum number of seconds Traefik needs to be up and running before the DaemonSet/Deployment controller considers it available + minReadySeconds: 0 + # Additional deployment annotations (e.g. for jaeger-operator sidecar injection) + annotations: {} + # Additional deployment labels (e.g. for filtering deployment by custom labels) + labels: {} + # Additional pod annotations (e.g. for mesh injection or prometheus scraping) + podAnnotations: {} + # Additional Pod labels (e.g. for filtering Pod by custom labels) + podLabels: {} + # Additional containers (e.g. for metric offloading sidecars) + additionalContainers: + [] + # https://docs.datadoghq.com/developers/dogstatsd/unix_socket/?tab=host + # - name: socat-proxy + # image: alpine/socat:1.0.5 + # args: ["-s", "-u", "udp-recv:8125", "unix-sendto:/socket/socket"] + # volumeMounts: + # - name: dsdsocket + # mountPath: /socket + # Additional volumes available for use with initContainers and additionalContainers + additionalVolumes: + [] + # - name: dsdsocket + # hostPath: + # path: /var/run/statsd-exporter + # Additional initContainers (e.g. for setting file permission as shown below) + initContainers: + [] + # The "volume-permissions" init container is required if you run into permission issues. + # Related issue: https://github.com/traefik/traefik/issues/6972 + # - name: volume-permissions + # image: busybox:1.31.1 + # command: ["sh", "-c", "chmod -Rv 600 /data/*"] + # volumeMounts: + # - name: data + # mountPath: /data + # Use process namespace sharing + shareProcessNamespace: false + # Custom pod DNS policy. Apply if `hostNetwork: true` + # dnsPolicy: ClusterFirstWithHostNet + # Additional imagePullSecrets + imagePullSecrets: + [] + # - name: myRegistryKeySecretName + + # Pod disruption budget + podDisruptionBudget: + enabled: false + # maxUnavailable: 1 + # maxUnavailable: 33% + # minAvailable: 0 + # minAvailable: 25% + + # Use ingressClass. Ignored if Traefik version < 2.3 / kubernetes < 1.18.x + ingressClass: + # true is not unit-testable yet, pending https://github.com/rancher/helm-unittest/pull/12 + enabled: true + isDefaultClass: false + # Use to force a networking.k8s.io API Version for certain CI/CD applications. E.g. "v1beta1" + fallbackApiVersion: "" + + # Activate Pilot integration + pilot: + enabled: false + token: "" + # Toggle Pilot Dashboard + # dashboard: false + + # Enable experimental features + experimental: + http3: + enabled: false + plugins: + enabled: false + kubernetesGateway: + enabled: false + # certificate: + # group: "core" + # kind: "Secret" + # name: "mysecret" + # By default, Gateway would be created to the Namespace you are deploying Traefik to. + # You may create that Gateway in another namespace, setting its name below: + # namespace: default + + # Create an IngressRoute for the dashboard + ingressRoute: + dashboard: + enabled: true + # Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class) + annotations: + kubernetes.io/ingress.class: "nginx" + # Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels) + labels: {} + + rollingUpdate: + maxUnavailable: 1 + maxSurge: 1 + + # Customize liveness and readiness probe values. + readinessProbe: + failureThreshold: 1 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + + # + # Configure providers + # + providers: + kubernetesCRD: + enabled: true + allowCrossNamespace: false + allowExternalNameServices: false + allowEmptyServices: false + # ingressClass: traefik-internal + # labelSelector: environment=production,method=traefik + namespaces: + [] + # - "default" + + kubernetesIngress: + enabled: true + allowExternalNameServices: false + allowEmptyServices: false + # ingressClass: traefik-internal + # labelSelector: environment=production,method=traefik + namespaces: + [] + # - "default" + # IP used for Kubernetes Ingress endpoints + publishedService: + enabled: false + # Published Kubernetes Service to copy status from. Format: namespace/servicename + # By default this Traefik service + # pathOverride: "" + + # + # Add volumes to the traefik pod. The volume name will be passed to tpl. + # This can be used to mount a cert pair or a configmap that holds a config.toml file. + # After the volume has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg: + # additionalArguments: + # - "--providers.file.filename=/config/dynamic.toml" + # - "--ping" + # - "--ping.entrypoint=web" + volumes: [] + # - name: public-cert + # mountPath: "/certs" + # type: secret + # - name: '{{ printf "%s-configs" .Release.Name }}' + # mountPath: "/config" + # type: configMap + + # Additional volumeMounts to add to the Traefik container + additionalVolumeMounts: + [] + # For instance when using a logshipper for access logs + # - name: traefik-logs + # mountPath: /var/log/traefik + + # Logs + # https://docs.traefik.io/observability/logs/ + logs: + # Traefik logs concern everything that happens to Traefik itself (startup, configuration, events, shutdown, and so on). + general: + # By default, the logs use a text format (common), but you can + # also ask for the json format in the format option + # format: json + # By default, the level is set to ERROR. Alternative logging levels are DEBUG, PANIC, FATAL, ERROR, WARN, and INFO. + level: INFO + access: + # To enable access logs + enabled: true + # By default, logs are written using the Common Log Format (CLF). + # To write logs in JSON, use json in the format option. + # If the given format is unsupported, the default (CLF) is used instead. + # format: json + # To write the logs in an asynchronous fashion, specify a bufferingSize option. + # This option represents the number of log lines Traefik will keep in memory before writing + # them to the selected output. In some cases, this option can greatly help performances. + # bufferingSize: 100 + # Filtering https://docs.traefik.io/observability/access-logs/#filtering + filters: + {} + # statuscodes: "200,300-302" + # retryattempts: true + # minduration: 10ms + # Fields + # https://docs.traefik.io/observability/access-logs/#limiting-the-fieldsincluding-headers + fields: + general: + defaultmode: keep + names: + {} + # Examples: + # ClientUsername: drop + headers: + defaultmode: drop + names: + {} + # Examples: + # User-Agent: redact + # Authorization: drop + # Content-Type: keep + + metrics: + # datadog: + # address: 127.0.0.1:8125 + # influxdb: + # address: localhost:8089 + # protocol: udp + prometheus: + entryPoint: metrics + # addRoutersLabels: true + # statsd: + # address: localhost:8125 + + tracing: + {} + # instana: + # enabled: true + # datadog: + # localAgentHostPort: 127.0.0.1:8126 + # debug: false + # globalTag: "" + # prioritySampling: false + + globalArguments: + - "--global.checknewversion" + - "--global.sendanonymoususage" + + # + # Configure Traefik static configuration + # Additional arguments to be passed at Traefik's binary + # All available options available on https://docs.traefik.io/reference/static-configuration/cli/ + ## Use curly braces to pass values: `helm install --set="additionalArguments={--providers.kubernetesingress.ingressclass=traefik-internal,--log.level=DEBUG}"` + additionalArguments: [] + # - "--providers.kubernetesingress.ingressclass=traefik-internal" + # - "--log.level=DEBUG" + + # Environment variables to be passed to Traefik's binary + env: [] + # - name: SOME_VAR + # value: some-var-value + # - name: SOME_VAR_FROM_CONFIG_MAP + # valueFrom: + # configMapRef: + # name: configmap-name + # key: config-key + # - name: SOME_SECRET + # valueFrom: + # secretKeyRef: + # name: secret-name + # key: secret-key + + envFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # Configure ports + ports: + # The name of this one can't be changed as it is used for the readiness and + # liveness probes, but you can adjust its config to your liking + traefik: + port: 9000 + # Use hostPort if set. + # hostPort: 9000 + # + # Use hostIP if set. If not set, Kubernetes will default to 0.0.0.0, which + # means it's listening on all your interfaces and all your IPs. You may want + # to set this value if you need traefik to listen on specific interface + # only. + # hostIP: 192.168.100.10 + + # Override the liveness/readiness port. This is useful to integrate traefik + # with an external Load Balancer that performs healthchecks. + # healthchecksPort: 9000 + + # Defines whether the port is exposed if service.type is LoadBalancer or + # NodePort. + # + # You SHOULD NOT expose the traefik port on production deployments. + # If you want to access it from outside of your cluster, + # use `kubectl port-forward` or create a secure ingress + expose: false + # The exposed port for this service + exposedPort: 9000 + # The port protocol (TCP/UDP) + protocol: TCP + web: + port: 8000 + # hostPort: 8000 + expose: true + exposedPort: 80 + # The port protocol (TCP/UDP) + protocol: TCP + # Use nodeport if set. This is useful if you have configured Traefik in a + # LoadBalancer + # nodePort: 32080 + # Port Redirections + # Added in 2.2, you can make permanent redirects via entrypoints. + # https://docs.traefik.io/routing/entrypoints/#redirection + # redirectTo: websecure + websecure: + port: 8443 + # hostPort: 8443 + expose: true + exposedPort: 443 + # The port protocol (TCP/UDP) + protocol: TCP + # nodePort: 32443 + # Enable HTTP/3. + # Requires enabling experimental http3 feature and tls. + # Note that you cannot have a UDP entrypoint with the same port. + # http3: true + # Set TLS at the entrypoint + # https://doc.traefik.io/traefik/routing/entrypoints/#tls + tls: + enabled: true + # this is the name of a TLSOption definition + options: "" + certResolver: "" + domains: [] + # - main: example.com + # sans: + # - foo.example.com + # - bar.example.com + metrics: + port: 9100 + # hostPort: 9100 + # Defines whether the port is exposed if service.type is LoadBalancer or + # NodePort. + # + # You may not want to expose the metrics port on production deployments. + # If you want to access it from outside of your cluster, + # use `kubectl port-forward` or create a secure ingress + expose: false + # The exposed port for this service + exposedPort: 9100 + # The port protocol (TCP/UDP) + protocol: TCP + + # TLS Options are created as TLSOption CRDs + # https://doc.traefik.io/traefik/https/tls/#tls-options + # Example: + # tlsOptions: + # default: + # sniStrict: true + # preferServerCipherSuites: true + # foobar: + # curvePreferences: + # - CurveP521 + # - CurveP384 + tlsOptions: {} + + # Options for the main traefik service, where the entrypoints traffic comes + # from. + service: + enabled: true + type: LoadBalancer + # Additional annotations applied to both TCP and UDP services (e.g. for cloud provider specific config) + annotations: + metallb.universe.tf/address-pool: traefik + # Additional annotations for TCP service only + annotationsTCP: + metallb.universe.tf/address-pool: traefik + # Additional annotations for UDP service only + annotationsUDP: + metallb.universe.tf/address-pool: traefik + + # Additional service labels (e.g. for filtering Service by custom labels) + labels: {} + # Additional entries here will be added to the service spec. + # Cannot contain type, selector or ports entries. + spec: + {} + # externalTrafficPolicy: Cluster + # loadBalancerIP: "1.2.3.4" + # clusterIP: "2.3.4.5" + loadBalancerSourceRanges: + - 192.168.0.1/32 + # - 172.16.0.0/16 + externalIPs: + [] + # - 1.2.3.4 + # One of SingleStack, PreferDualStack, or RequireDualStack. + # ipFamilyPolicy: SingleStack + # List of IP families (e.g. IPv4 and/or IPv6). + # ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + # ipFamilies: + # - IPv4 + # - IPv6 + + ## Create HorizontalPodAutoscaler object. + ## + autoscaling: + enabled: false + # minReplicas: 1 + # maxReplicas: 10 + # metrics: + # - type: Resource + # resource: + # name: cpu + # targetAverageUtilization: 60 + # - type: Resource + # resource: + # name: memory + # targetAverageUtilization: 60 + + # Enable persistence using Persistent Volume Claims + # ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + # After the pvc has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg: + # additionalArguments: + # - "--certificatesresolvers.le.acme.storage=/data/acme.json" + # It will persist TLS certificates. + persistence: + enabled: false + name: data + # existingClaim: "" + accessMode: ReadWriteOnce + size: 128Mi + # storageClass: "" + path: /data + annotations: {} + # subPath: "" # only mount a subpath of the Volume into the pod + + certResolvers: {} + # letsencrypt: + # # for challenge options cf. https://doc.traefik.io/traefik/https/acme/ + # email: email@example.com + # dnsChallenge: + # # also add the provider's required configuration under env + # # or expand then from secrets/configmaps with envfrom + # # cf. https://doc.traefik.io/traefik/https/acme/#providers + # provider: digitalocean + # # add futher options for the dns challenge as needed + # # cf. https://doc.traefik.io/traefik/https/acme/#dnschallenge + # delayBeforeCheck: 30 + # resolvers: + # - 1.1.1.1 + # - 8.8.8.8 + # tlsChallenge: true + # httpChallenge: + # entryPoint: "web" + # # match the path to persistence + # storage: /data/acme.json + + # If hostNetwork is true, runs traefik in the host network namespace + # To prevent unschedulabel pods due to port collisions, if hostNetwork=true + # and replicas>1, a pod anti-affinity is recommended and will be set if the + # affinity is left as default. + hostNetwork: false + + # Whether Role Based Access Control objects like roles and rolebindings should be created + rbac: + enabled: true + + # If set to false, installs ClusterRole and ClusterRoleBinding so Traefik can be used across namespaces. + # If set to true, installs namespace-specific Role and RoleBinding and requires provider configuration be set to that same namespace + namespaced: false + + # Enable to create a PodSecurityPolicy and assign it to the Service Account via RoleBinding or ClusterRoleBinding + podSecurityPolicy: + enabled: false + + # The service account the pods will use to interact with the Kubernetes API + serviceAccount: + # If set, an existing service account is used + # If not set, a service account is created automatically using the fullname template + name: "" + + # Additional serviceAccount annotations (e.g. for oidc authentication) + serviceAccountAnnotations: {} + + resources: + {} + # requests: + # cpu: "100m" + # memory: "50Mi" + # limits: + # cpu: "300m" + # memory: "150Mi" + affinity: {} + # # This example pod anti-affinity forces the scheduler to put traefik pods + # # on nodes where no other traefik pods are scheduled. + # # It should be used when hostNetwork: true to prevent port conflicts + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - {{ template "traefik.name" . }} + # topologyKey: kubernetes.io/hostname + nodeSelector: {} + tolerations: [] + + # Pods can have priority. + # Priority indicates the importance of a Pod relative to other Pods. + priorityClassName: "" + + # Set the container security context + # To run the container with ports below 1024 this will need to be adjust to run as root + securityContext: + capabilities: + drop: [ALL] + readOnlyRootFilesystem: true + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + + podSecurityContext: + fsGroup: 65532 diff --git a/system/k8up-operator/schedule.yaml b/system/k8up-operator/schedule.yaml index 244c32e1cc..042842d6a7 100644 --- a/system/k8up-operator/schedule.yaml +++ b/system/k8up-operator/schedule.yaml @@ -6,7 +6,7 @@ spec: backend: s3: endpoint: s3.us-west-002.backblazeb2.com - bucket: khuedoan-homelab-backup + bucket: infratron-homelab accessKeyIDSecretRef: name: backblaze-credentials key: application-key-id @@ -17,7 +17,7 @@ spec: name: backblaze-credentials key: repo-password backup: - schedule: '0 8 * * *' + schedule: '*/5 * * * *' failedJobsHistoryLimit: 2 successfulJobsHistoryLimit: 2 # TODO diff --git a/system/kured/Chart.yaml b/system/kured/Chart.yaml deleted file mode 100644 index ed52c94f1e..0000000000 --- a/system/kured/Chart.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v2 -name: kured -version: 0.0.0 -dependencies: - - name: kured - version: 2.12.0 - repository: https://weaveworks.github.io/kured diff --git a/system/loki/Chart.yaml b/system/loki/Chart.yaml index e532189833..7fc43aa20d 100644 --- a/system/loki/Chart.yaml +++ b/system/loki/Chart.yaml @@ -3,5 +3,5 @@ name: loki version: 0.0.0 dependencies: - name: loki-stack - version: 2.6.1 - repository: https://grafana.github.io/helm-charts + version: 2.8.3 + repository: https://grafana.github.io/helm-charts \ No newline at end of file diff --git a/system/loki/values.yaml b/system/loki/values.yaml index fe3126cb1e..1edd09bc4d 100644 --- a/system/loki/values.yaml +++ b/system/loki/values.yaml @@ -1,4 +1,4 @@ loki-stack: loki: serviceMonitor: - enabled: true + enabled: true \ No newline at end of file diff --git a/system/longhorn-system/Chart.yaml b/system/longhorn-system/Chart.yaml index d29698e5fa..fe551d7974 100644 --- a/system/longhorn-system/Chart.yaml +++ b/system/longhorn-system/Chart.yaml @@ -3,5 +3,5 @@ name: longhorn version: 0.0.0 dependencies: - name: longhorn - version: 1.3.0 + version: 1.3.2 repository: https://charts.longhorn.io diff --git a/system/longhorn-system/values.yaml b/system/longhorn-system/values.yaml index ec3ba4eef1..b15dc3e73b 100644 --- a/system/longhorn-system/values.yaml +++ b/system/longhorn-system/values.yaml @@ -1,6 +1,29 @@ longhorn: defaultSettings: nodeDownPodDeletionPolicy: delete-both-statefulset-and-deployment-pod + systemManagedComponentsNodeSelector: "storage:longhorn" persistence: # If you have three or more nodes for storage, use 3; otherwise use 2 - defaultClassReplicaCount: 2 # TODO run DR test to see if we actually need 3 + defaultClassReplicaCount: 3 # TODO run DR test to see if we actually need 3 + guaranteedEngineManagerCPU: 15 + guaranteedReplicaManagerCPU: 15 + replicaSoftAntiAffinity: true + + longhornManager: + nodeSelector: + storage: "longhorn" + longhornDriver: + nodeSelector: + storage: "longhorn" + longhornUI: + nodeSelector: + storage: "longhorn" + + ingress: + enabled: true + ingressClassName: nginx + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + host: longhorn.jupiter.mein.nl + tls: true + tlsSecret: longhorn-tls-certificate diff --git a/system/metallb-system/Chart.yaml b/system/metallb-system/Chart.yaml index c3b22b2a96..c66b89bda2 100644 --- a/system/metallb-system/Chart.yaml +++ b/system/metallb-system/Chart.yaml @@ -3,5 +3,5 @@ name: metallb version: 0.0.0 dependencies: - name: metallb - version: 0.12.1 - repository: https://metallb.github.io/metallb + version: 4.1.10 + repository: https://charts.bitnami.com/bitnami diff --git a/system/metallb-system/values.yaml b/system/metallb-system/values.yaml index 468a8567ff..b3054866af 100644 --- a/system/metallb-system/values.yaml +++ b/system/metallb-system/values.yaml @@ -1,2 +1,7 @@ metallb: existingConfigMap: metallb-config + + speaker: + image: + repository: metallb/speaker + tag: main diff --git a/system/monitoring-system/Chart.yaml b/system/monitoring-system/Chart.yaml index bd6007f99b..ecee0c473b 100644 --- a/system/monitoring-system/Chart.yaml +++ b/system/monitoring-system/Chart.yaml @@ -3,5 +3,5 @@ name: kube-prometheus-stack version: 0.0.0 dependencies: - name: kube-prometheus-stack - version: 19.3.0 # TODO upgrade https://github.com/prometheus-community/helm-charts/issues/1500 + version: 41.7.0 # TODO upgrade https://github.com/prometheus-community/helm-charts/issues/1500 repository: https://prometheus-community.github.io/helm-charts diff --git a/system/monitoring-system/values.yaml b/system/monitoring-system/values.yaml index b0aed36a97..68840a3b2c 100644 --- a/system/monitoring-system/values.yaml +++ b/system/monitoring-system/values.yaml @@ -9,7 +9,7 @@ kube-prometheus-stack: hajimari.io/appName: Grafana hajimari.io/icon: chart-bar hosts: - - &host grafana.khuedoan.com + - &host grafana.jupiter.mein.nl tls: - secretName: grafana-general-tls hosts: @@ -21,7 +21,7 @@ kube-prometheus-stack: envFromSecret: grafana-secrets grafana.ini: server: - root_url: https://grafana.khuedoan.com + root_url: https://grafana.jupiter.mein.nl auth.generic_oauth: enabled: true allow_sign_up: true @@ -29,9 +29,9 @@ kube-prometheus-stack: client_id: grafana-sso client_secret: $__env{GRAFANA_SSO_CLIENT_SECRET} scopes: openid profile email groups - auth_url: https://dex.khuedoan.com/auth - token_url: https://dex.khuedoan.com/token - api_url: https://dex.khuedoan.com/userinfo + auth_url: https://dex.jupiter.mein.nl/auth + token_url: https://dex.jupiter.mein.nl/token + api_url: https://dex.jupiter.mein.nl/userinfo prometheus: prometheusSpec: diff --git a/system/nfs-system/Chart.yaml b/system/nfs-system/Chart.yaml new file mode 100644 index 0000000000..49a901d251 --- /dev/null +++ b/system/nfs-system/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: nfs-subdir-external-provisioner +version: 0.0.0 +dependencies: + - name: nfs-subdir-external-provisioner + version: 4.0.2 + repository: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner diff --git a/system/nfs-system/values.yaml b/system/nfs-system/values.yaml new file mode 100644 index 0000000000..bcd20aa91a --- /dev/null +++ b/system/nfs-system/values.yaml @@ -0,0 +1,112 @@ +nfs-subdir-external-provisioner: + replicaCount: 1 + strategyType: Recreate + + image: + repository: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner + tag: v4.0.2 + pullPolicy: IfNotPresent + imagePullSecrets: [] + + nfs: + server: 192.168.1.8 + path: /pool0/nfs + mountOptions: + volumeName: nfs-subdir-external-provisioner-root + # Reclaim policy for the main nfs volume + reclaimPolicy: Retain + + # For creating the StorageClass automatically: + storageClass: + create: true + + # Set a provisioner name. If unset, a name will be generated. + # provisionerName: + + # Set StorageClass as the default StorageClass + # Ignored if storageClass.create is false + defaultClass: false + + # Set a StorageClass name + # Ignored if storageClass.create is false + name: nfs + + # Allow volume to be expanded dynamically + allowVolumeExpansion: true + + # Method used to reclaim an obsoleted volume + reclaimPolicy: Delete + + # When set to false your PVs will not be archived by the provisioner upon deletion of the PVC. + archiveOnDelete: true + + # If it exists and has 'delete' value, delete the directory. If it exists and has 'retain' value, save the directory. + # Overrides archiveOnDelete. + # Ignored if value not set. + onDelete: + + # Specifies a template for creating a directory path via PVC metadata's such as labels, annotations, name or namespace. + # Ignored if value not set. + pathPattern: + + # Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany + accessModes: ReadWriteOnce + + # Set volume bindinng mode - Immediate or WaitForFirstConsumer + volumeBindingMode: Immediate + + # Storage class annotations + annotations: {} + + leaderElection: + # When set to false leader election will be disabled + enabled: true + + ## For RBAC support: + rbac: + # Specifies whether RBAC resources should be created + create: true + + # If true, create & use Pod Security Policy resources + # https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + podSecurityPolicy: + enabled: false + + # Deployment pod annotations + podAnnotations: {} + + ## Set pod priorityClassName + # priorityClassName: "" + + podSecurityContext: {} + + securityContext: {} + + serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + + # Annotations to add to the service account + annotations: {} + + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + + resources: + {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + # Additional labels for any resource created + labels: {} diff --git a/system/system-upgrade/k3s/agent.yaml b/system/system-upgrade/k3s/agent.yaml deleted file mode 100644 index bd27216012..0000000000 --- a/system/system-upgrade/k3s/agent.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: upgrade.cattle.io/v1 -kind: Plan -metadata: - name: k3s-agent - namespace: system-upgrade - labels: - k3s-upgrade: agent -spec: - nodeSelector: - matchExpressions: - - key: node-role.kubernetes.io/control-plane - operator: DoesNotExist - serviceAccountName: system-upgrade - prepare: - image: rancher/k3s-upgrade - args: - - prepare - - k3s-server - concurrency: 1 - cordon: true - drain: - force: true - skipWaitForDeleteTimeout: 300 # Honor PodDisruptionBudgets - upgrade: - image: rancher/k3s-upgrade - channel: https://update.k3s.io/v1-release/channels/v1.23 diff --git a/system/system-upgrade/k3s/kustomization.yaml b/system/system-upgrade/k3s/kustomization.yaml deleted file mode 100644 index 688a489e0a..0000000000 --- a/system/system-upgrade/k3s/kustomization.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: - - server.yaml - - agent.yaml - -commonAnnotations: - # TODO https://github.com/rancher/system-upgrade-controller/issues/172 - argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true diff --git a/system/system-upgrade/k3s/server.yaml b/system/system-upgrade/k3s/server.yaml deleted file mode 100644 index f8d6d00b5c..0000000000 --- a/system/system-upgrade/k3s/server.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: upgrade.cattle.io/v1 -kind: Plan -metadata: - name: k3s-server - namespace: system-upgrade - labels: - k3s-upgrade: server -spec: - nodeSelector: - matchExpressions: - - key: node-role.kubernetes.io/control-plane - operator: Exists - serviceAccountName: system-upgrade - concurrency: 1 - cordon: true - drain: - force: true - skipWaitForDeleteTimeout: 300 # Honor PodDisruptionBudgets - upgrade: - image: rancher/k3s-upgrade - channel: https://update.k3s.io/v1-release/channels/v1.23 diff --git a/system/system-upgrade/kustomization.yaml b/system/system-upgrade/kustomization.yaml deleted file mode 100644 index 4bb6075308..0000000000 --- a/system/system-upgrade/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: - - https://github.com/rancher/system-upgrade-controller/releases/download/v0.8.1/system-upgrade-controller.yaml - - k3s diff --git a/test/go.mod b/test/go.mod index e31abaf012..78f8331fc8 100644 --- a/test/go.mod +++ b/test/go.mod @@ -1,8 +1,8 @@ -module git.khuedoan.com/ops/homelab +module git.jupiter.mein.nl/ops/homelab -go 1.18 +go 1.19 -require github.com/gruntwork-io/terratest v0.40.18 +require github.com/gruntwork-io/terratest v0.40.24 require ( cloud.google.com/go v0.83.0 // indirect diff --git a/test/go.sum b/test/go.sum index deca07a877..40ab471a31 100644 --- a/test/go.sum +++ b/test/go.sum @@ -214,6 +214,19 @@ github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3i github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRaxEM6G0ro= github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78= +<<<<<<< HEAD +github.com/gruntwork-io/terratest v0.40.18 h1:xuFaHOf/7kwc5cQN+6FfbmKglneBKesZxPHgISgkUlc= +github.com/gruntwork-io/terratest v0.40.18/go.mod h1:JGeIGgLbxbG9/Oqm06z6YXVr76CfomdmLkV564qov+8= +github.com/gruntwork-io/terratest v0.40.19 h1:slnTF0Amrc9yRVUV/X/fHlVWKNF0H8fwa2OLyeV2IOA= +github.com/gruntwork-io/terratest v0.40.19/go.mod h1:JGeIGgLbxbG9/Oqm06z6YXVr76CfomdmLkV564qov+8= +github.com/gruntwork-io/terratest v0.40.20 h1:pco6s3b62h2Yd13N+HvHQVTAk3aPRz4sdoVwErPCBzQ= +github.com/gruntwork-io/terratest v0.40.20/go.mod h1:JGeIGgLbxbG9/Oqm06z6YXVr76CfomdmLkV564qov+8= +github.com/gruntwork-io/terratest v0.40.22 h1:qHIk+feNFspZQK2UTeH+zeMGfTfSuRduU10RCMMTveg= +github.com/gruntwork-io/terratest v0.40.22/go.mod h1:JGeIGgLbxbG9/Oqm06z6YXVr76CfomdmLkV564qov+8= +github.com/gruntwork-io/terratest v0.40.24 h1:vxVi714rX+joBLrxBVnbMzSYQ2srIfXzjqvImHl6Rtk= +github.com/gruntwork-io/terratest v0.40.24/go.mod h1:JGeIGgLbxbG9/Oqm06z6YXVr76CfomdmLkV564qov+8= +======= +>>>>>>> 686fea57645ab65df8b71c54814dbc01827d48a6 github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= diff --git a/upstream b/upstream new file mode 160000 index 0000000000..686fea5764 --- /dev/null +++ b/upstream @@ -0,0 +1 @@ +Subproject commit 686fea57645ab65df8b71c54814dbc01827d48a6