diff --git a/addons/clickhouse/configs/00_default_overrides.xml.tpl b/addons/clickhouse/configs/00_default_overrides.xml.tpl index 680583f86..fafe9a571 100644 --- a/addons/clickhouse/configs/00_default_overrides.xml.tpl +++ b/addons/clickhouse/configs/00_default_overrides.xml.tpl @@ -68,9 +68,9 @@ {{- if eq (index $ "TLS_ENABLED") "true" -}} - {{- $CA_FILE := /etc/pki/tls/ca.pem -}} - {{- $CERT_FILE := /etc/pki/tls/cert.pem -}} - {{- $KEY_FILE := /etc/pki/tls/key.pem }} + {{- $CA_FILE := "/etc/pki/tls/ca.pem" -}} + {{- $CERT_FILE := "/etc/pki/tls/cert.pem" -}} + {{- $KEY_FILE := "/etc/pki/tls/key.pem" }} prometheus diff --git a/addons/clickhouse/configs/ch_keeper_00_default_overrides.xml.tpl b/addons/clickhouse/configs/ch_keeper_00_default_overrides.xml.tpl index 1ac45d3a7..49a2aab09 100644 --- a/addons/clickhouse/configs/ch_keeper_00_default_overrides.xml.tpl +++ b/addons/clickhouse/configs/ch_keeper_00_default_overrides.xml.tpl @@ -56,9 +56,9 @@ {{- if eq (index $ "TLS_ENABLED") "true" -}} - {{- $CA_FILE := /etc/pki/tls/ca.pem -}} - {{- $CERT_FILE := /etc/pki/tls/cert.pem -}} - {{- $KEY_FILE := /etc/pki/tls/key.pem -}} + {{- $CA_FILE := "/etc/pki/tls/ca.pem" -}} + {{- $CERT_FILE := "/etc/pki/tls/cert.pem" -}} + {{- $KEY_FILE := "/etc/pki/tls/key.pem" -}} prometheus diff --git a/addons/clickhouse/configs/client.xml.tpl b/addons/clickhouse/configs/client.xml.tpl index 492df57c4..0a736540d 100644 --- a/addons/clickhouse/configs/client.xml.tpl +++ b/addons/clickhouse/configs/client.xml.tpl @@ -2,9 +2,9 @@ admin {{- if eq (index $ "TLS_ENABLED") "true" -}} - {{- $CA_FILE := /etc/pki/tls/ca.pem -}} - {{- $CERT_FILE := /etc/pki/tls/cert.pem -}} - {{- $KEY_FILE := /etc/pki/tls/key.pem }} + {{- $CA_FILE := "/etc/pki/tls/ca.pem" -}} + {{- $CERT_FILE := "/etc/pki/tls/cert.pem" -}} + {{- $KEY_FILE := "/etc/pki/tls/key.pem" }} true diff --git a/addons/clickhouse/templates/cmpd-ch-keeper.yaml b/addons/clickhouse/templates/cmpd-ch-keeper.yaml index ea1e1f2dd..027eba3b2 100644 --- a/addons/clickhouse/templates/cmpd-ch-keeper.yaml +++ b/addons/clickhouse/templates/cmpd-ch-keeper.yaml @@ -160,7 +160,7 @@ spec: volumes: - name: data tls: - volumeName: tls + volumeName: tls mountPath: /etc/pki/tls caFile: ca.pem certFile: cert.pem diff --git a/addons/clickhouse/templates/cmpd-clickhouse.yaml b/addons/clickhouse/templates/cmpd-clickhouse.yaml index fecc8ebe6..324a85c98 100644 --- a/addons/clickhouse/templates/cmpd-clickhouse.yaml +++ b/addons/clickhouse/templates/cmpd-clickhouse.yaml @@ -173,7 +173,7 @@ spec: targetPort: tcp-secure port: 9440 tls: - volumeName: tls + volumeName: tls mountPath: /etc/pki/tls caFile: ca.pem certFile: cert.pem diff --git a/examples/clickhouse/README.md b/examples/clickhouse/README.md index 942ddc629..3eecd2e76 100644 --- a/examples/clickhouse/README.md +++ b/examples/clickhouse/README.md @@ -2,87 +2,279 @@ ClickHouse is an open-source column-oriented OLAP database management system. Use it to boost your database performance while providing linear scalability and hardware efficiency. +There are two key components in the ClickHouse cluster: + +- ClickHouse Server: The ClickHouse server is responsible for processing queries and managing data storage. +- ClickHouse Keeper: The ClickHouse Keeper is responsible for monitoring the health of the ClickHouse server and performing failover operations when necessary, alternative to the Zookeeper. + + +## Features In KubeBlocks + +### Lifecycle Management + +| Topology | Horizontal
scaling | Vertical
scaling | Expand
volume | Restart | Stop/Start | Configure | Expose | Switchover | +|------------------|------------------------|-----------------------|-------------------|-----------|------------|-----------|--------|------------| +| standalone/cluster | Yes | Yes | Yes | Yes | Yes | Yes | No | N/A | + +### Backup and Restore + +| Feature | Method | Description | +|-------------|--------|------------| + +### Versions + +| Major Versions | Description | +|---------------|-------------| +| 24 | 24.8.3| + ## Prerequisites -This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the kubectl command line tool and helm somewhere in your path. Please see the [getting started](https://kubernetes.io/docs/setup/) and [Installing Helm](https://helm.sh/docs/intro/install/) for installation instructions for your platform. +- Kubernetes cluster >= v1.21 +- `kubectl` installed, refer to [K8s Install Tools](https://kubernetes.io/docs/tasks/tools/) +- Helm, refer to [Installing Helm](https://helm.sh/docs/intro/install/) +- KubeBlocks installed and running, refer to [Install Kubeblocks](../docs/prerequisites.md) +- ClickHouse Addon Enabled, refer to [Install Addons](../docs/install-addon.md) + +## Examples + +### Create + +#### Standalone Mode + +Create a ClickHouse cluster with only ClickHouse server: -Also, this example requires kubeblocks installed and running. Here is the steps to install kubeblocks, please replace "`$kb_version`" with the version you want to use. ```bash -# Add Helm repo -helm repo add kubeblocks https://apecloud.github.io/helm-charts -# If github is not accessible or very slow for you, please use following repo instead -helm repo add kubeblocks https://jihulab.com/api/v4/projects/85949/packages/helm/stable +kubectl apply -f examples/clickhouse/cluster-standalone.yaml +``` + +It will create only one ClickHouse server pod with the default configuration. + +To connect to the ClickHouse server, you can use the following command: -# Update helm repo -helm repo update +```bash +clickhouse-client --host --port 9000 --user admin --password +``` -# Get the versions of KubeBlocks and select the one you want to use -helm search repo kubeblocks/kubeblocks --versions -# If you want to obtain the development versions of KubeBlocks, Please add the '--devel' parameter as the following command -helm search repo kubeblocks/kubeblocks --versions --devel +> [!NOTE] +> You may find the password in the secret `-clickhouse-account-admin`. -# Create dependent CRDs -kubectl create -f https://github.com/apecloud/kubeblocks/releases/download/v$kb_version/kubeblocks_crds.yaml -# If github is not accessible or very slow for you, please use following command instead -kubectl create -f https://jihulab.com/api/v4/projects/98723/packages/generic/kubeblocks/v$kb_version/kubeblocks_crds.yaml +e.g. you can get the password by the following command: -# Install KubeBlocks -helm install kubeblocks kubeblocks/kubeblocks --namespace kb-system --create-namespace --version="$kb_version" +```bash +kubectl get secrets clickhouse-cluster-clickhouse-account-admin -n default -oyaml | yq .data.password -r | base64 -d ``` -## Examples +where `clickhouse-cluster-clickhouse-account-admin` is the secret name, it is named after pattern `--account-`, and `password` is the key of the secret. + +#### Cluster Mode + +Create a ClickHouse cluster with ClickHouse servers and ch-keeper: -### [Create](cluster.yaml) -Create a clickhouse cluster with specified cluster definition ```bash kubectl apply -f examples/clickhouse/cluster.yaml ``` -Create a single node clickhouse cluster with specified cluster definition +This example shows the way to override the default accounts' password. + +Option 1. override the rule `passwordCofnig` to generate password + +```yaml + - name: ch-keeper + replicas: 1 + # Overrides system accounts defined in referenced ComponentDefinition. + systemAccounts: + - name: admin # name of the system account + passwordConfig: # config rule to generate password + length: 10 + numDigits: 5 + numSymbols: 0 + letterCase: MixedCases + seed: clickhouse-cluster +``` + +Option 2. specify the secret for the account + +```yaml + - name: clickhouse + replicas: 2 + # Overrides system accounts defined in referenced ComponentDefinition. + systemAccounts: + - name: admin # name of the system account + secretRef: + name: udf-account-info + namespace: default +``` + +Make sure the secret `udf-account-info` exists in the same namespace as the cluster, and has the following data: + +```yaml +apiVersion: v1 +data: + password: # password: required +metadata: + name: udf-account-info +type: Opaque +``` + +#### Cluster Mode with TLS Enabled + +To create one ClickHouse server pod with the default configuration and TLS enabled. + ```bash -kubectl apply -f examples/clickhouse/cluster-single-node.yaml +kubectl apply -f examples/clickhouse/cluster-tls.yaml ``` -### [Horizontal scaling](horizontalscale.yaml) -Horizontal scaling out or in specified components replicas in the cluster +Compared to the default configuration, the only difference is the `tls` and `issuer` fields in the `cluster-tls.yaml` file. + +```yaml +tls: true # enable tls +issuer: # set issuer information + name: KubeBlocks +``` + +To connect to the ClickHouse server, you can use the following command: + +```bash +clickhouse-client --host --port 9440 --secure --user admin --password +``` + +#### Cluster with Multiple Shards + +> [!WARNING] +> The sharding mode is an experimental feature at the moment. + +Create a ClickHouse cluster with ch-keeper and clickhouse servers with multiple shards: + ```bash -kubectl apply -f examples/clickhouse/horizontalscale.yaml +kubectl apply -f examples/clickhouse/cluster-sharding.yaml +``` + +This example creates a clickhouse cluster with 3 shards, each shard has 2 replicas. + +### Horizontal scaling + +#### [Scale-out](scale-out.yaml) + +Horizontal scaling out Clickhouse cluster by adding ONE more replica: + +```bash +kubectl apply -f examples/clickhouse/scale-out.yaml +``` + +#### [Scale-in](scale-in.yaml) + +Horizontal scaling in clickhouse cluster by deleting ONE replica: + +```bash +kubectl apply -f examples/clickhouse/scale-in.yaml +``` + +#### Scale-in/out using Cluster API + +Alternatively, you can update the `replicas` field in the `spec.componentSpecs.replicas` section to your desired non-zero number. + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: clickhouse-cluster + namespace: default +spec: + componentSpecs: + - name: clickhouse + replicas: 2 # Update `replicas` to 1 for scaling in, and to 3 for scaling out ``` ### [Vertical scaling](verticalscale.yaml) + Vertical scaling up or down specified components requests and limits cpu or memory resource in the cluster + ```bash kubectl apply -f examples/clickhouse/verticalscale.yaml ``` ### [Expand volume](volumeexpand.yaml) + +> [!NOTE] +> Make sure the storage class you use supports volume expansion. + +Check the storage class with following command: + +```bash +kubectl get storageclass +``` + +If the `ALLOWVOLUMEEXPANSION` column is `true`, the storage class supports volume expansion. + Increase size of volume storage with the specified components in the cluster + ```bash kubectl apply -f examples/clickhouse/volumeexpand.yaml ``` ### [Restart](restart.yaml) + Restart the specified components in the cluster + ```bash kubectl apply -f examples/clickhouse/restart.yaml ``` ### [Stop](stop.yaml) + Stop the cluster and release all the pods of the cluster, but the storage will be reserved + ```bash kubectl apply -f examples/clickhouse/stop.yaml ``` ### [Start](start.yaml) + Start the stopped cluster + ```bash kubectl apply -f examples/clickhouse/start.yaml ``` +### Observability + +There are various ways to monitor the cluster. Here we use Prometheus and Grafana to demonstrate how to monitor the cluster. + +#### Installing the Prometheus Operator + +You may skip this step if you have already installed the Prometheus Operator. +Or you can follow the steps in [How to install the Prometheus Operator](../docs/install-prometheus.md) to install the Prometheus Operator. + +#### Create PodMonitor + +Apply the `PodMonitor` file to monitor the cluster: + +```bash +kubectl apply -f examples/clickhouse/pod-monitor.yaml +``` + +It sets endpoints as follows: + +```yaml + podMetricsEndpoints: + - path: /metrics + port: http-metrics + scheme: http +``` + +> [!Note] +> Make sure the labels are set correctly in the `PodMonitor` file to match the dashboard. + ### Delete + If you want to delete the cluster and all its resource, you can modify the termination policy and then delete the cluster + ```bash kubectl patch cluster clickhouse-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" kubectl delete cluster clickhouse-cluster + +# delete secret udf-account-info if exists +# kubectl delete secret udf-account-info ``` + diff --git a/examples/clickhouse/README.md.bak b/examples/clickhouse/README.md.bak new file mode 100644 index 000000000..4255d75ff --- /dev/null +++ b/examples/clickhouse/README.md.bak @@ -0,0 +1,280 @@ +# ClickHouse + +ClickHouse is an open-source column-oriented OLAP database management system. Use it to boost your database performance while providing linear scalability and hardware efficiency. + +There are two key components in the ClickHouse cluster: + +- ClickHouse Server: The ClickHouse server is responsible for processing queries and managing data storage. +- ClickHouse Keeper: The ClickHouse Keeper is responsible for monitoring the health of the ClickHouse server and performing failover operations when necessary, alternative to the Zookeeper. + + +## Features In KubeBlocks + +### Lifecycle Management + +| Topology | Horizontal
scaling | Vertical
scaling | Expand
volume | Restart | Stop/Start | Configure | Expose | Switchover | +|------------------|------------------------|-----------------------|-------------------|-----------|------------|-----------|--------|------------| +| standalone/cluster | Yes | Yes | Yes | Yes | Yes | Yes | No | N/A | + +### Backup and Restore + +| Feature | Method | Description | +|-------------|--------|------------| + +### Versions + +| Major Versions | Description | +|---------------|-------------| +| 24 | 24.8.3| + +## Prerequisites + +- Kubernetes cluster >= v1.21 +- `kubectl` installed, refer to [K8s Install Tools](https://kubernetes.io/docs/tasks/tools/) +- Helm, refer to [Installing Helm](https://helm.sh/docs/intro/install/) +- KubeBlocks installed and running, refer to [Install Kubeblocks](../docs/prerequisites.md) +- ClickHosue Addon Enabled, refer to [Install Addons](../docs/install-addon.md) + +## Examples + +### Create + +#### Standalone Mode + +Create a clickhouse cluster with only clickhouse server: + +```bash +kubectl apply -f examples/clickhouse/cluster-standalone.yaml +``` + +It will create only one clickhouse server pod with the default configuration. + +To connect to the clickhouse server, you can use the following command: + +```bash +clickhouse-client --host --port 9000 --user admin --password +``` + +> [!NOTE] +> You may find the password in the secret `-clickhouse-account-admin`. + +e.g. you can get the password by the following command: + +```bash +kubectl get secrets clickhouse-cluster-clickhouse-account-admin -n default -oyaml | yq .data.password -r | base64 -d +``` + +where `clickhouse-cluster-clickhouse-account-admin` is the secret name, it is named after pattern `--account-`, and `password` is the key of the secret. + +#### Cluster Mode + +Create a clickhouse cluster with clickhouse servers and chkeeper: + +```bash +kubectl apply -f examples/clickhouse/cluster.yaml +``` + +This example shows the way to override the default accounts' password. + +Option 1. override the rule `passwordCofnig` to generate password + +```yaml + - name: ch-keeper + replicas: 1 + # Overrides system accounts defined in referenced ComponentDefinition. + systemAccounts: + - name: admin # name of the system account + passwordConfig: # config rule to generate password + length: 10 + numDigits: 5 + numSymbols: 0 + letterCase: MixedCases + seed: clickhouse-cluster +``` + +Option 2. specify the secret for the account + +```yaml + - name: clickhouse + replicas: 2 + # Overrides system accounts defined in referenced ComponentDefinition. + systemAccounts: + - name: admin # name of the system account + secretRef: + name: udf-account-info + namespace: default +``` + +Make sure the secret `udf-account-info` exists in the same namespace as the cluster, and has the following data: + +```yaml +apiVersion: v1 +data: + password: # password: required +metadata: + name: udf-account-info +type: Opaque +``` + +#### Cluster Mode with TLS Enabled + +To create one clickhouse server pod with the default configuration and TLS enabled. + +```bash +kubectl apply -f examples/clickhouse/cluster-tls.yaml +``` + +Compared to the default configuration, the only difference is the `tls` and `issuer` fields in the `cluster-tls.yaml` file. + +```yaml +tls: true # enable tls +issuer: # set issuer information + name: KubeBlocks +``` + +To connect to the clickhouse server, you can use the following command: + +```bash +clickhouse-client --host --port 9440 --secure --user admin --password +``` + +#### Cluster with Multiple Shards + +> [!WARNING] +> The sharding mode is an experimental feature at the moment. + +Create a clickhouse cluster with chkeeper and clickhouse servers with multiple shards: + +```bash +kubectl apply -f examples/clickhouse/cluster-sharding.yaml +``` + +This example creates a clickhouse cluster with 3 shards, each shard has 2 replicas. + +### Horizontal scaling + +#### [Scale-out](scale-out.yaml) + +Horizontal scaling out Clickhouse cluster by adding ONE more replica: + +```bash +kubectl apply -f examples/clickhouse/scale-out.yaml +``` + +#### [Scale-in](scale-in.yaml) + +Horizontal scaling in clickhouse cluster by deleting ONE replica: + +```bash +kubectl apply -f examples/clickhouse/scale-in.yaml +``` + +#### Scale-in/out using Cluster API + +Alternatively, you can update the `replicas` field in the `spec.componentSpecs.replicas` section to your desired non-zero number. + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: clickhouse-cluster + namespace: default +spec: + componentSpecs: + - name: clickhouse + replicas: 2 # Update `replicas` to 1 for scaling in, and to 3 for scaling out +``` + +### [Vertical scaling](verticalscale.yaml) + +Vertical scaling up or down specified components requests and limits cpu or memory resource in the cluster + +```bash +kubectl apply -f examples/clickhouse/verticalscale.yaml +``` + +### [Expand volume](volumeexpand.yaml) + +> [!NOTE] +> Make sure the storage class you use supports volume expansion. + +Check the storage class with following command: + +```bash +kubectl get storageclass +``` + +If the `ALLOWVOLUMEEXPANSION` column is `true`, the storage class supports volume expansion. + +Increase size of volume storage with the specified components in the cluster + +```bash +kubectl apply -f examples/clickhouse/volumeexpand.yaml +``` + +### [Restart](restart.yaml) + +Restart the specified components in the cluster + +```bash +kubectl apply -f examples/clickhouse/restart.yaml +``` + +### [Stop](stop.yaml) + +Stop the cluster and release all the pods of the cluster, but the storage will be reserved + +```bash +kubectl apply -f examples/clickhouse/stop.yaml +``` + +### [Start](start.yaml) + +Start the stopped cluster + +```bash +kubectl apply -f examples/clickhouse/start.yaml +``` + +### Observability + +There are various ways to monitor the cluster. Here we use Prometheus and Grafana to demonstrate how to monitor the cluster. + +#### Installing the Prometheus Operator + +You may skip this step if you have already installed the Prometheus Operator. +Or you can follow the steps in [How to install the Prometheus Operator](../docs/install-prometheus.md) to install the Prometheus Operator. + +#### Create PodMonitor + +Apply the `PodMonitor` file to monitor the cluster: + +```bash +kubectl apply -f examples/clickhouse/pod-monitor.yaml +``` + +It sets endpoints as follows: + +```yaml + podMetricsEndpoints: + - path: /metrics + port: http-metrics + scheme: http +``` + +> [!Note] +> Make sure the labels are set correctly in the `PodMonitor` file to match the dashboard. + +### Delete + +If you want to delete the cluster and all its resource, you can modify the termination policy and then delete the cluster + +```bash +kubectl patch cluster clickhouse-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" + +kubectl delete cluster clickhouse-cluster + +# delete secret udf-account-info if exists +# kubectl delete secret udf-account-info +``` + diff --git a/examples/clickhouse/cluster-sharding.yaml b/examples/clickhouse/cluster-sharding.yaml new file mode 100644 index 000000000..e72491199 --- /dev/null +++ b/examples/clickhouse/cluster-sharding.yaml @@ -0,0 +1,48 @@ +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: cluster-sharding + namespace: default +spec: + terminationPolicy: Delete + componentSpecs: + - name: ch-keeper # create clickhouse keeper + componentDef: clickhouse-keeper-24 + replicas: 1 + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + shardings: + - name: shard + shards: 3 # need 3 shard + template: + name: clickhouse # each shard is a clickhouse component, with 2 replicas + componentDef: clickhouse-24 + replicas: 2 + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi \ No newline at end of file diff --git a/examples/clickhouse/cluster-single-node.yaml b/examples/clickhouse/cluster-standalone.yaml similarity index 86% rename from examples/clickhouse/cluster-single-node.yaml rename to examples/clickhouse/cluster-standalone.yaml index 50b505946..8c7adba78 100644 --- a/examples/clickhouse/cluster-single-node.yaml +++ b/examples/clickhouse/cluster-standalone.yaml @@ -1,7 +1,7 @@ apiVersion: apps.kubeblocks.io/v1 kind: Cluster metadata: - name: clickhouse-cluster + name: clickhouse-standalone namespace: default spec: # Specifies the name of the ClusterDef to use when creating a Cluster. @@ -12,7 +12,6 @@ spec: topology: standalone # Specifies the behavior when a Cluster is deleted. # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. - # - `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), allowing for data preservation while stopping other operations. # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. terminationPolicy: Delete @@ -21,8 +20,6 @@ spec: # ClusterComponentSpec defines the specifications for a Component in a Cluster. componentSpecs: - name: clickhouse - componentDef: clickhouse-24 - serviceAccountName: kb-clickhouse-cluster replicas: 1 resources: limits: diff --git a/examples/clickhouse/cluster-tls.yaml b/examples/clickhouse/cluster-tls.yaml new file mode 100644 index 000000000..1e221ea9b --- /dev/null +++ b/examples/clickhouse/cluster-tls.yaml @@ -0,0 +1,67 @@ +--- +# Source: clickhouse-cluster/templates/cluster.yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: cluster-tls + namespace: default +spec: + terminationPolicy: Delete + clusterDef: clickhouse + topology: cluster + componentSpecs: + - name: ch-keeper + replicas: 1 + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + systemAccounts: + - name: admin + passwordConfig: + length: 10 + numDigits: 5 + numSymbols: 0 + letterCase: MixedCases + seed: cluster-tls + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + - name: clickhouse + replicas: 2 + systemAccounts: + - name: admin + passwordConfig: + length: 10 + numDigits: 5 + numSymbols: 0 + letterCase: MixedCases + seed: cluster-tls + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + tls: true # set TLS to true + issuer: # if TLS is True, this filed is required. + name: KubeBlocks # set Issuer to [KubeBlocks, UserProvided]. + # name: UserProvided # set Issuer to [KubeBlocks, UserProvided]. + # secretRef: secret-name # if name=UserProvided, must set the reference to the secret that contains user-provided certificates diff --git a/examples/clickhouse/cluster.yaml b/examples/clickhouse/cluster.yaml index 5a4fe25e2..586f93d10 100644 --- a/examples/clickhouse/cluster.yaml +++ b/examples/clickhouse/cluster.yaml @@ -6,13 +6,12 @@ metadata: spec: # Specifies the name of the ClusterDef to use when creating a Cluster. clusterDef: clickhouse - # Specifies the clickhouse cluster topology defined in ClusterDefinition.Spec.topologies, support standalone, cluster + # Specifies the clickhouse cluster topology defined in ClusterDefinition.Spec.topologies. # - `standalone`: single clickhouse instance # - `cluster`: clickhouse with ClickHouse Keeper as coordinator topology: cluster # Specifies the behavior when a Cluster is deleted. # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. - # - `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), allowing for data preservation while stopping other operations. # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. terminationPolicy: Delete @@ -21,17 +20,13 @@ spec: # ClusterComponentSpec defines the specifications for a Component in a Cluster. componentSpecs: - name: clickhouse - componentDef: clickhouse-24 - serviceAccountName: kb-clickhouse-cluster replicas: 2 + # Overrides system accounts defined in referenced ComponentDefinition. systemAccounts: - - name: admin - passwordConfig: - length: 10 - numDigits: 5 - numSymbols: 0 - letterCase: MixedCases - seed: clickhouse-cluster + - name: admin # name of the system account + secretRef: + name: udf-account-info + namespace: default resources: limits: cpu: '0.5' @@ -48,12 +43,11 @@ spec: requests: storage: 20Gi - name: ch-keeper - componentDef: clickhouse-keeper-24 - serviceAccountName: kb-clickhouse-cluster replicas: 1 + # Overrides system accounts defined in referenced ComponentDefinition. systemAccounts: - - name: admin - passwordConfig: + - name: admin # name of the system account + passwordConfig: # config rule to generate password length: 10 numDigits: 5 numSymbols: 0 @@ -74,3 +68,12 @@ spec: resources: requests: storage: 20Gi +--- +apiVersion: v1 +kind: Secret +metadata: + name: udf-account-info + namespace: default # optional +type: Opaque +data: + password: cGFzc3dvcmQxMjM= # 'password123' in base64 \ No newline at end of file diff --git a/examples/clickhouse/pod-monitor.yaml b/examples/clickhouse/pod-monitor.yaml new file mode 100644 index 000000000..ef9d612d3 --- /dev/null +++ b/examples/clickhouse/pod-monitor.yaml @@ -0,0 +1,29 @@ + +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: clickhouse-pod-monitor + namespace: monitoring # Note: this is namespace for prometheus operator + labels: # this is labels set in `prometheus.spec.podMonitorSelector` + release: prometheus +spec: + jobLabel: kubeblocks-service + # defines the labels which are transferred from the + # associated Kubernetes `Pod` object onto the ingested metrics + # set the lables w.r.t you own needs + podTargetLabels: + - app.kubernetes.io/instance + - app.kubernetes.io/managed-by + - apps.kubeblocks.io/component-name + - apps.kubeblocks.io/pod-name + podMetricsEndpoints: + - path: /metrics + port: http-metrics + scheme: http + namespaceSelector: + matchNames: + - default + selector: + matchLabels: + app.kubernetes.io/instance: clickhouse-cluster # set cluster name + apps.kubeblocks.io/component-name: clickhouse \ No newline at end of file diff --git a/examples/clickhouse/restart.yaml b/examples/clickhouse/restart.yaml index 59dcf3a9e..ed61ec8b8 100644 --- a/examples/clickhouse/restart.yaml +++ b/examples/clickhouse/restart.yaml @@ -11,5 +11,5 @@ spec: restart: # Specifies the name of the Component. # - clickhouse - # - clickhouse-keeper + # - ch-keeper - componentName: clickhouse diff --git a/examples/clickhouse/horizontalscale.yaml b/examples/clickhouse/scale-in.yaml similarity index 63% rename from examples/clickhouse/horizontalscale.yaml rename to examples/clickhouse/scale-in.yaml index d06579b4e..904883816 100644 --- a/examples/clickhouse/horizontalscale.yaml +++ b/examples/clickhouse/scale-in.yaml @@ -1,7 +1,7 @@ apiVersion: operations.kubeblocks.io/v1alpha1 kind: OpsRequest metadata: - name: clickhouse-horizontalscaling + name: ch-scale-in namespace: default spec: # Specifies the name of the Cluster resource that this operation is targeting. @@ -9,9 +9,12 @@ spec: type: HorizontalScaling # Lists HorizontalScaling objects, each specifying scaling requirements for a Component, including desired total replica counts, configurations for new instances, modifications for existing instances, and instance downscaling options horizontalScaling: + # Specifies the name of the Component. + # - clickhouse + # - ch-keeper - componentName: clickhouse - # Specifies the number of total replicas. - scaleOut: - instances: - - name: clickhouse # Name of the instance - replicaChanges: 1 \ No newline at end of file + # Specifies the replica changes for scaling out components + scaleIn: + # Specifies the replica changes for the component. + # add one more replica to current component + replicaChanges: 1 \ No newline at end of file diff --git a/examples/clickhouse/scale-out.yaml b/examples/clickhouse/scale-out.yaml new file mode 100644 index 000000000..bde288b70 --- /dev/null +++ b/examples/clickhouse/scale-out.yaml @@ -0,0 +1,21 @@ +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: ch-scale-out + namespace: default +spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: clickhouse-cluster + type: HorizontalScaling + # Lists HorizontalScaling objects, each specifying scaling requirements for a Component, including desired total replica counts, configurations for new instances, modifications for existing instances, and instance downscaling options + horizontalScaling: + # Specifies the name of the Component. + # Specifies the name of the Component. + # - clickhouse + # - ch-keeper + - componentName: clickhouse + # Specifies the replica changes for scaling out components + scaleOut: + # Specifies the replica changes for the component. + # add one more replica to current component + replicaChanges: 1 \ No newline at end of file diff --git a/examples/clickhouse/verticalscale.yaml b/examples/clickhouse/verticalscale.yaml index fc82de08f..b8d3b04bc 100644 --- a/examples/clickhouse/verticalscale.yaml +++ b/examples/clickhouse/verticalscale.yaml @@ -9,8 +9,9 @@ spec: type: VerticalScaling # Lists VerticalScaling objects, each specifying a component and its desired compute resources for vertical scaling. verticalScaling: + # Specifies the name of the Component. # - clickhouse - # - clickhouse-keeper + # - ch-keeper - componentName: clickhouse # VerticalScaling refers to the process of adjusting the compute resources (e.g., CPU, memory) allocated to a Component. It defines the parameters required for the operation. requests: diff --git a/examples/clickhouse/volumeexpand.yaml b/examples/clickhouse/volumeexpand.yaml index 633528a08..d13c16ce7 100644 --- a/examples/clickhouse/volumeexpand.yaml +++ b/examples/clickhouse/volumeexpand.yaml @@ -7,11 +7,11 @@ spec: # Specifies the name of the Cluster resource that this operation is targeting. clusterName: clickhouse-cluster type: VolumeExpansion - # Lists VolumeExpansion objects, each specifying a component and its corresponding volumeClaimTemplates that requires storage expansion. + # Lists VolumeExpansion objects, each specifying a component and its corresponding volumeClaimTemplates that requires storage expansion. volumeExpansion: # Specifies the name of the Component. # - clickhouse - # - clickhouse-keeper + # - ch-keeper - componentName: clickhouse # volumeClaimTemplates specifies the storage size and volumeClaimTemplate name. volumeClaimTemplates: diff --git a/examples/milvus/cluster-standalone.yaml b/examples/milvus/cluster-standalone.yaml index dab96cea2..bffab0e98 100644 --- a/examples/milvus/cluster-standalone.yaml +++ b/examples/milvus/cluster-standalone.yaml @@ -75,4 +75,4 @@ spec: - ReadWriteOnce resources: requests: - storage: 10Gi \ No newline at end of file + storage: 10Gi