From 77dc1415513e8e5b2d8211b0df968fa4297ab3e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Petr=20=C5=98eho=C5=99?= Date: Thu, 25 Apr 2024 14:13:10 +0200 Subject: [PATCH] feat: add rook-ceph --- .../main/apps/storage/kustomization.yaml | 3 +- kubernetes/main/apps/storage/openebs/ks.yaml | 4 +- .../storage/rook-ceph/app/helmrelease.yaml | 133 ++++++++++ .../storage/rook-ceph/app/kustomization.yaml | 6 + .../cluster/cephobjectstoreuser.yaml | 10 + .../rook-ceph/cluster/helmrelease.yaml | 228 ++++++++++++++++++ .../rook-ceph/cluster/kustomization.yaml | 6 + .../main/apps/storage/rook-ceph/ks.yaml | 46 ++++ .../apps/storage/snapshot-controller/ks.yaml | 4 +- .../flux/repositories/helm/kustomization.yaml | 1 + .../flux/repositories/helm/rook-ceph.yaml | 9 + 11 files changed, 445 insertions(+), 5 deletions(-) create mode 100644 kubernetes/main/apps/storage/rook-ceph/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/storage/rook-ceph/app/kustomization.yaml create mode 100644 kubernetes/main/apps/storage/rook-ceph/cluster/cephobjectstoreuser.yaml create mode 100644 kubernetes/main/apps/storage/rook-ceph/cluster/helmrelease.yaml create mode 100644 kubernetes/main/apps/storage/rook-ceph/cluster/kustomization.yaml create mode 100644 kubernetes/main/apps/storage/rook-ceph/ks.yaml create mode 100644 kubernetes/main/flux/repositories/helm/rook-ceph.yaml diff --git a/kubernetes/main/apps/storage/kustomization.yaml b/kubernetes/main/apps/storage/kustomization.yaml index cb494746..58daad8c 100644 --- a/kubernetes/main/apps/storage/kustomization.yaml +++ b/kubernetes/main/apps/storage/kustomization.yaml @@ -3,5 +3,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./namespace.yaml - - ./openebs/ks.yaml - ./snapshot-controller/ks.yaml + - ./openebs/ks.yaml + - ./rook-ceph/ks.yaml diff --git a/kubernetes/main/apps/storage/openebs/ks.yaml b/kubernetes/main/apps/storage/openebs/ks.yaml index 27cf346b..a1718e52 100644 --- a/kubernetes/main/apps/storage/openebs/ks.yaml +++ b/kubernetes/main/apps/storage/openebs/ks.yaml @@ -12,11 +12,11 @@ spec: dependsOn: - name: snapshot-controller path: ./kubernetes/main/apps/storage/openebs/app - prune: true + prune: false # never should be deleted sourceRef: kind: GitRepository name: home-ops - wait: false + wait: true interval: 30m retryInterval: 1m timeout: 5m diff --git a/kubernetes/main/apps/storage/rook-ceph/app/helmrelease.yaml b/kubernetes/main/apps/storage/rook-ceph/app/helmrelease.yaml new file mode 100644 index 00000000..de94ee6d --- /dev/null +++ b/kubernetes/main/apps/storage/rook-ceph/app/helmrelease.yaml @@ -0,0 +1,133 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: rook-ceph-operator +spec: + interval: 30m + chart: + spec: + chart: rook-ceph + version: v1.14.2 + sourceRef: + kind: HelmRepository + name: rook-ceph + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + dependsOn: + - name: snapshot-controller + values: + csi: + enableLiveness: true + serviceMonitor: + enabled: true + csiRBDProvisionerResource: | + - name : csi-provisioner + resource: + requests: + memory: 128Mi + cpu: 25m + limits: + memory: 256Mi + - name : csi-resizer + resource: + requests: + memory: 128Mi + cpu: 25m + limits: + memory: 256Mi + - name : csi-attacher + resource: + requests: + memory: 128Mi + cpu: 25m + limits: + memory: 256Mi + - name : csi-snapshotter + resource: + requests: + memory: 128Mi + cpu: 25m + limits: + memory: 256Mi + - name : csi-rbdplugin + resource: + requests: + memory: 512Mi + cpu: 50m + limits: + memory: 1Gi + - name : csi-omap-generator + resource: + requests: + memory: 512Mi + cpu: 50m + limits: + memory: 1Gi + - name : liveness-prometheus + resource: + requests: + memory: 128Mi + cpu: 25m + limits: + memory: 256Mi + + csiCephFSProvisionerResource: | + - name : csi-provisioner + resource: + requests: + memory: 128Mi + cpu: 25m + limits: + memory: 256Mi + - name : csi-resizer + resource: + requests: + memory: 128Mi + cpu: 25m + limits: + memory: 256Mi + - name : csi-attacher + resource: + requests: + memory: 128Mi + cpu: 25m + limits: + memory: 256Mi + - name : csi-snapshotter + resource: + requests: + memory: 128Mi + cpu: 25m + limits: + memory: 256Mi + - name : csi-cephfsplugin + resource: + requests: + memory: 512Mi + cpu: 50m + limits: + memory: 1Gi + - name : liveness-prometheus + resource: + requests: + memory: 128Mi + cpu: 25m + limits: + memory: 256Mi + monitoring: + enabled: true + resources: + limits: + memory: 512Mi + requests: + memory: 128Mi + cpu: 100m + diff --git a/kubernetes/main/apps/storage/rook-ceph/app/kustomization.yaml b/kubernetes/main/apps/storage/rook-ceph/app/kustomization.yaml new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/kubernetes/main/apps/storage/rook-ceph/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/storage/rook-ceph/cluster/cephobjectstoreuser.yaml b/kubernetes/main/apps/storage/rook-ceph/cluster/cephobjectstoreuser.yaml new file mode 100644 index 00000000..319f79e3 --- /dev/null +++ b/kubernetes/main/apps/storage/rook-ceph/cluster/cephobjectstoreuser.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/ceph.rook.io/cephobjectstoreuser_v1.json +apiVersion: ceph.rook.io/v1 +kind: CephObjectStoreUser +metadata: + name: rook-ceph-cluster-admin +spec: + # Ref: https://rook.io/docs/rook/v1.13/Storage-Configuration/Object-Storage-RGW/object-storage/ + store: ceph-objectstore + displayName: Cluster Admin diff --git a/kubernetes/main/apps/storage/rook-ceph/cluster/helmrelease.yaml b/kubernetes/main/apps/storage/rook-ceph/cluster/helmrelease.yaml new file mode 100644 index 00000000..4d506fa0 --- /dev/null +++ b/kubernetes/main/apps/storage/rook-ceph/cluster/helmrelease.yaml @@ -0,0 +1,228 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: rook-ceph-cluster +spec: + interval: 30m + chart: + spec: + chart: rook-ceph-cluster + version: v1.14.2 + sourceRef: + kind: HelmRepository + name: rook-ceph + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: multus + namespace: kube-system + - name: rook-ceph-operator + namespace: storage + values: + toolbox: + enabled: true + + operatorNamespace: storage + + monitoring: + enabled: true + createPrometheusRules: true + + configOverride: | + [global] + bdev_enable_discard = true + bdev_async_discard = true + + cephClusterSpec: + crashCollector: + disable: false + + dashboard: + enabled: true + urlPrefix: / + ssl: false + + network: + provider: host + addressRanges: + public: + - "192.168.1.0/24" + cluster: + - "10.49.0.0/24" + + storage: + useAllNodes: true + useAllDevices: true + deviceFilter: nvme0n1 + config: + osdsPerDevice: "1" + + resources: + mgr: + requests: + cpu: "125m" + memory: "512Mi" + limits: + memory: "2Gi" + mon: + requests: + cpu: "49m" + memory: "512Mi" + limits: + memory: "1Gi" + osd: + requests: + cpu: "442m" + memory: "2Gi" + limits: + memory: "6Gi" + mgr-sidecar: + requests: + cpu: "49m" + memory: "128Mi" + limits: + memory: "256Mi" + crashcollector: + requests: + cpu: "15m" + memory: "64Mi" + limits: + memory: "64Mi" + logcollector: + requests: + cpu: "100m" + memory: "100Mi" + limits: + memory: "1Gi" + cleanup: + requests: + cpu: "250m" + memory: "100Mi" + limits: + memory: "1Gi" + + cephBlockPools: + - name: ceph-blockpool + spec: + failureDomain: host + replicated: + size: 3 + storageClass: + enabled: true + name: ceph-block + isDefault: true + reclaimPolicy: Delete + allowVolumeExpansion: true + volumeBindingMode: "Immediate" + mountOptions: [] + allowedTopologies: [] + parameters: + imageFormat: "2" + imageFeatures: layering + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: storage + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: storage + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: storage + csi.storage.k8s.io/fstype: ext4 + + cephBlockPoolsVolumeSnapshotClass: + enabled: true + name: csi-ceph-blockpool + isDefault: false + deletionPolicy: Delete + + cephFileSystems: + - name: ceph-filesystem + spec: + metadataPool: + replicated: + size: 3 + dataPools: + - failureDomain: host + replicated: + size: 3 + name: data0 + metadataServer: + activeCount: 1 + activeStandby: true + priorityClassName: system-cluster-critical + resources: + requests: + cpu: "35m" + memory: "1Gi" + limits: + memory: "3Gi" + storageClass: + enabled: true + isDefault: false + name: ceph-filesystem + reclaimPolicy: Delete + allowVolumeExpansion: true + mountOptions: [] + parameters: + csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: storage + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: storage + csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node + csi.storage.k8s.io/node-stage-secret-namespace: storage + csi.storage.k8s.io/fstype: ext4 + + cephFileSystemVolumeSnapshotClass: + enabled: true + name: csi-ceph-filesystem + isDefault: false + deletionPolicy: Delete + + cephObjectStores: + - name: ceph-objectstore + spec: + metadataPool: + failureDomain: host + replicated: + size: 3 + dataPool: + failureDomain: host + erasureCoded: + dataChunks: 2 + codingChunks: 1 + preservePoolsOnDelete: true + priorityClassName: system-cluster-critical + gateway: + port: 80 + resources: + requests: + cpu: 100m + memory: 256Mi + limits: + memory: 2Gi + instances: 1 + healthCheck: + bucket: + interval: 60s + storageClass: + enabled: true + name: ceph-bucket + reclaimPolicy: Delete + parameters: [] + + ingress: + dashboard: + ingressClassName: "internal-nginx" + host: + name: &host rook.${SECRET_DOMAIN} + path: "/" + tls: + - hosts: + - *host diff --git a/kubernetes/main/apps/storage/rook-ceph/cluster/kustomization.yaml b/kubernetes/main/apps/storage/rook-ceph/cluster/kustomization.yaml new file mode 100644 index 00000000..17cbc72b --- /dev/null +++ b/kubernetes/main/apps/storage/rook-ceph/cluster/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/storage/rook-ceph/ks.yaml b/kubernetes/main/apps/storage/rook-ceph/ks.yaml new file mode 100644 index 00000000..acd9c99a --- /dev/null +++ b/kubernetes/main/apps/storage/rook-ceph/ks.yaml @@ -0,0 +1,46 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app rook-ceph + namespace: flux-system +spec: + targetNamespace: storage + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: "./kubernetes/main/apps/storage/rook-ceph/app" + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-ops + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app rook-ceph-cluster + namespace: flux-system +spec: + targetNamespace: storage + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: multus + - name: rook-ceph + - name: snapshot-controller + path: "./kubernetes/main/apps/storage/rook-ceph/cluster" + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-ops + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/main/apps/storage/snapshot-controller/ks.yaml b/kubernetes/main/apps/storage/snapshot-controller/ks.yaml index 232bbd21..1520bd83 100644 --- a/kubernetes/main/apps/storage/snapshot-controller/ks.yaml +++ b/kubernetes/main/apps/storage/snapshot-controller/ks.yaml @@ -13,11 +13,11 @@ spec: dependsOn: - name: cert-manager path: "./kubernetes/main/apps/storage/snapshot-controller/app" - prune: true + prune: false # never should be deleted sourceRef: kind: GitRepository name: home-ops - wait: false + wait: true interval: 30m retryInterval: 1m timeout: 5m diff --git a/kubernetes/main/flux/repositories/helm/kustomization.yaml b/kubernetes/main/flux/repositories/helm/kustomization.yaml index 6b3bdb1f..68edf794 100644 --- a/kubernetes/main/flux/repositories/helm/kustomization.yaml +++ b/kubernetes/main/flux/repositories/helm/kustomization.yaml @@ -12,5 +12,6 @@ resources: - ./metrics-server.yaml - ./openebs-zfs-localpv.yaml - ./openebs.yaml + - ./rook-ceph.yaml - ./piraeus.yaml - ./stakater.yaml diff --git a/kubernetes/main/flux/repositories/helm/rook-ceph.yaml b/kubernetes/main/flux/repositories/helm/rook-ceph.yaml new file mode 100644 index 00000000..5a87d971 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/rook-ceph.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: rook-ceph + namespace: flux-system +spec: + interval: 1h + url: https://charts.rook.io/release