From d9632429ada829e0bb14e8bfea8125d77ab43960 Mon Sep 17 00:00:00 2001 From: Shanshan Date: Wed, 18 Dec 2024 19:41:40 +0800 Subject: [PATCH] chore: updae elasticsearch examples (#1339) --- addons/elasticsearch/scripts/member-leave.sh | 2 +- examples/elasticsearch/README.md | 389 ++++++++++++++++-- .../elasticsearch/cluster-multi-node.yaml | 82 ++++ .../cluster-single-node-with-sp.yaml | 47 +++ .../elasticsearch/cluster-single-node.yaml | 51 ++- examples/elasticsearch/cluster.yaml | 99 ----- examples/elasticsearch/expose-disable.yaml | 12 +- examples/elasticsearch/expose-enable.yaml | 20 +- examples/elasticsearch/pod-monitor.yaml | 19 + examples/elasticsearch/restart.yaml | 6 +- .../{horizontalscale.yaml => scale-in.yaml} | 13 +- examples/elasticsearch/scale-out.yaml | 22 + examples/elasticsearch/start.yaml | 4 +- examples/elasticsearch/stop.yaml | 4 +- examples/elasticsearch/verticalscale.yaml | 7 +- examples/elasticsearch/volumeexpand.yaml | 10 +- examples/rabbitmq/README.md | 1 - 17 files changed, 588 insertions(+), 200 deletions(-) create mode 100644 examples/elasticsearch/cluster-multi-node.yaml create mode 100644 examples/elasticsearch/cluster-single-node-with-sp.yaml delete mode 100644 examples/elasticsearch/cluster.yaml create mode 100644 examples/elasticsearch/pod-monitor.yaml rename examples/elasticsearch/{horizontalscale.yaml => scale-in.yaml} (64%) create mode 100644 examples/elasticsearch/scale-out.yaml diff --git a/addons/elasticsearch/scripts/member-leave.sh b/addons/elasticsearch/scripts/member-leave.sh index 07961fc70..56633278b 100644 --- a/addons/elasticsearch/scripts/member-leave.sh +++ b/addons/elasticsearch/scripts/member-leave.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env sh # shellcheck disable=SC2034 ut_mode="false" diff --git a/examples/elasticsearch/README.md b/examples/elasticsearch/README.md index 3e6d16501..de52b6cf4 100644 --- a/examples/elasticsearch/README.md +++ b/examples/elasticsearch/README.md @@ -1,100 +1,405 @@ # Elasticsearch Elasticsearch is a distributed, RESTful search engine optimized for speed and relevance on production-scale workloads. +Each Elasticsearch cluster consists of one or more nodes, and each node in a cluster has a role and communicates with other nodes to share data and responsibilities. A node can assume multiple roles up to your requirements. Types of roles inlcude [^1]: + +- master +- data +- data_content +- data_hot +- data_warm +- data_cold +- data_frozen +- ingest +- ml +- remote_cluster_client +- transform + ## Prerequisites -This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the kubectl command line tool and helm somewhere in your path. Please see the [getting started](https://kubernetes.io/docs/setup/) and [Installing Helm](https://helm.sh/docs/intro/install/) for installation instructions for your platform. +- Kubernetes cluster >= v1.21 +- `kubectl` installed, refer to [K8s Install Tools](https://kubernetes.io/docs/tasks/tools/) +- Helm, refer to [Installing Helm](https://helm.sh/docs/intro/install/) +- KubeBlocks installed and running, refer to [Install Kubeblocks](../docs/prerequisites.md) +- Elasticsearch Addon Enabled, refer to [Install Addons](../docs/install-addon.md) + +## Features In KubeBlocks + +### Lifecycle Management + +| Horizontal
scaling | Vertical
scaling | Expand
volume | Restart | Stop/Start | Configure | Expose | Switchover | +|------------------------|-----------------------|-------------------|-----------|------------|-----------|--------|------------| +| Yes | Yes | Yes | Yes | Yes | No | Yes | N/A | + +### Versions + +| Major Versions | Description | +|---------------|-------------| +| 7.x | 7.7.1,7.8.1,7.10.1 | +| 8.x | 8.1.3, 8.8.2 | + +## Examples + +### Create + +#### Create a Single-Node Cluster + +A Single-Node Cluster is a cluster with only one node and this node assume all roles. It is suitable for development and testing purposes. -Also, this example requires kubeblocks installed and running. Here is the steps to install kubeblocks, please replace "`$kb_version`" with the version you want to use. ```bash -# Add Helm repo -helm repo add kubeblocks https://apecloud.github.io/helm-charts -# If github is not accessible or very slow for you, please use following repo instead -helm repo add kubeblocks https://jihulab.com/api/v4/projects/85949/packages/helm/stable +kubectl apply -f examples/elasticsearch/cluster-single-node.yaml +``` -# Update helm repo -helm repo update +The annotation `kubeblocks.io/extra-env: '{"mode":"single-node"}'` is used to specify the mode of the Elasticsearch cluster. -# Get the versions of KubeBlocks and select the one you want to use -helm search repo kubeblocks/kubeblocks --versions -# If you want to obtain the development versions of KubeBlocks, Please add the '--devel' parameter as the following command -helm search repo kubeblocks/kubeblocks --versions --devel +To check the role of the node, you may log in to the pod and run the following command: -# Create dependent CRDs -kubectl create -f https://github.com/apecloud/kubeblocks/releases/download/v$kb_version/kubeblocks_crds.yaml -# If github is not accessible or very slow for you, please use following command instead -kubectl create -f https://jihulab.com/api/v4/projects/98723/packages/generic/kubeblocks/v$kb_version/kubeblocks_crds.yaml +```bash +curl -X GET "http://localhost:9200/_cat/nodes?v&h=name,ip,role" +``` -# Install KubeBlocks -helm install kubeblocks kubeblocks/kubeblocks --namespace kb-system --create-namespace --version="$kb_version" +And the expected output is as follows: + +```text +name ip role +es-single-node-mdit-0 12.345.678 cdfhilmrstw ``` -Enable elasticsearch + +The role is `cdfhilmrstw`. Please frefer to [Elasticsearch Nodes](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html) for more information about the roles. + +#### Create a Multi-Node Cluster + +Create a elasticsearch cluster with multiple nodes and each node assume specified roles. + ```bash -# Add Helm repo -helm repo add kubeblocks-addons https://apecloud.github.io/helm-charts -# If github is not accessible or very slow for you, please use following repo instead -helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packages/helm/stable -# Update helm repo -helm repo update +kubectl apply -f examples/elasticsearch/cluster-multi-node.yaml +``` -# Enable elasticsearch -helm upgrade -i kb-addon-elasticsearch kubeblocks-addons/elasticsearch --version $kb_version -n kb-system -``` +There are four components specified in this cluster, i.e 'master', 'data', 'ingest', and 'tranfrom', and each component has differnt roles. Roles are specified in the annotation: -## Examples +```yaml + annotations: + kubeblocks.io/extra-env: '{"master-roles":"master", "data-roles": "data", "ingest-roles": "ingest", "transform-roles": "transform"}' +``` + +where `-roles` is a comma-separated list of roles that each node will assume. The roles are `master`, `data`, `ingest`, and `transform` in this example. + +> [!NOTE] +> Roles will take effect only when `mode` is set to `multi-node`, or the `mode` is not set. + +If you want to create a cluster with more roles, you can add more components and specify the roles in the annotation. + +- set annotation with new roles. + +```yaml + annotations: + kubeblocks.io/extra-env: '{"master-roles":"master", "data-roles": "data", "ingest-roles": "ingest", "transform-roles": "transform", "-roles": "role1,role2"}' +``` + +where `newCmp` is the name of the new component, and `role1` and `role2` are the roles that each node in the new component will assume (chosen from the list of roles mentioned above). + +- add the new component to the `spec.componentSpecs` field: + +```yaml +spec: + terminationPolicy: Delete + componentSpecs: + - name: master + - name: data + - name: ingest + - name: transform + - name: # set the name to your preferred one + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + replicas: 3 +``` + +### Horizontal scaling + +#### [Scale-out](scale-out.yaml) + +Horizontal scaling out elasticsearch cluster by adding ONE `MASTER` replica: -### [Create](cluster.yaml) -Create a elasticsearch cluster with specified cluster definition ```bash -kubectl apply -f examples/elasticsearch/cluster.yaml +kubectl apply -f examples/elasticsearch/scale-out.yaml ``` -Create a single node elasticsearch cluster with specified cluster definition +#### [Scale-in](scale-in.yaml) + +Horizontal scaling in elasticsearch cluster by deleting ONE `MASTER` replica: + ```bash -kubectl apply -f examples/elasticsearch/cluster-single-node.yaml +kubectl apply -f examples/elasticsearch/scale-in.yaml ``` -### [Horizontal scaling](horizontalscale.yaml) -Horizontal scaling out or in specified components replicas in the cluster +On scaling in, the pod with the highest ordinal number (if not otherwise specified) will be deleted. And it will be cleared from voting configuration exclusions of this cluster before deletion, to make sure the cluster is healthy. + +After scaling in, you can check the cluster health by running the following command: + ```bash -kubectl apply -f examples/elasticsearch/horizontalscale.yaml +curl -X GET "http://:9200/_cluster/health?pretty" # replace with the actual endpoint +``` + +> [!IMPORTANT] +> Make sure there are at least ONE replica for each component +> If you want to scale in the last replica, may be you should consider to `STOP` the cluster. + +#### Scale-in/out using Cluster API + +Alternatively, you can update the `replicas` field in the `spec.componentSpecs.replicas` section to your desired non-zero number. + +```yaml +spec: + terminationPolicy: Delete + componentSpecs: + - name: master + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + replicas: 3 # Update `replicas` to your need. ``` ### [Vertical scaling](verticalscale.yaml) -Vertical scaling up or down specified components requests and limits cpu or memory resource in the cluster + +Vertical scaling involves increasing or decreasing resources to an existing database cluster. +Resources that can be scaled include:, CPU cores/processing power and Memory (RAM). + +To vertical scaling up or down specified component, you can apply the following yaml file: + ```bash kubectl apply -f examples/elasticsearch/verticalscale.yaml ``` +#### Scale-up/down using Cluster API + +Alternatively, you may update `spec.componentSpecs.resources` field to the desired resources for vertical scale. + +```yaml +spec: + terminationPolicy: Delete + componentSpecs: + - name: master + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + resources: + requests: + cpu: "1" # Update the resources to your need. + memory: "2Gi" # Update the resources to your need. + limits: + cpu: "2" # Update the resources to your need. + memory: "4Gi" # Update the resources to your need. +``` + ### [Expand volume](volumeexpand.yaml) -Increase size of volume storage with the specified components in the cluster + +Volume expansion is the ability to increase the size of a Persistent Volume Claim (PVC) after it's created. It is introduced in Kubernetes v1.11 and goes GA in Kubernetes v1.24. It allows Kubernetes users to simply edit their PersistentVolumeClaim objects without requiring any downtime at all if possible. + +> [!NOTE] +> Make sure the storage class you use supports volume expansion. + +Check the storage class with following command: + +```bash +kubectl get storageclass +``` + +If the `ALLOWVOLUMEEXPANSION` column is `true`, the storage class supports volume expansion. + +To increase size of volume storage with the specified components in the cluster + ```bash kubectl apply -f examples/elasticsearch/volumeexpand.yaml ``` +After the operation, you will see the volume size of the specified component is increased to `30Gi` in this case. Once you've done the change, check the `status.conditions` field of the PVC to see if the resize has completed. + +#### Volume expansion using Cluster API + +Alternatively, you may update the `spec.componentSpecs.volumeClaimTemplates.spec.resources.requests.storage` field to the desired size. + +```yaml +spec: + terminationPolicy: Delete + componentSpecs: + - name: master + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + volumeClaimTemplates: + - name: data + spec: + storageClassName: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 30Gi # specify new size, and make sure it is larger than the current size +``` + ### [Restart](restart.yaml) -Restart the specified components in the cluster + +Restart the specified component `data` in the cluster. If not specified, all components will be restarted. + ```bash kubectl apply -f examples/elasticsearch/restart.yaml ``` ### [Stop](stop.yaml) + Stop the cluster and release all the pods of the cluster, but the storage will be reserved + ```bash kubectl apply -f examples/elasticsearch/stop.yaml ``` +#### Stop using Cluster API + +Alternatively, you may stop ONE component by setting the `spec.componentSpecs.stop` field to `true`. + +```yaml +spec: + terminationPolicy: Delete + componentSpecs: + - name: master + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + stop: true # set stop `true` to stop the component + replicas: 3 +``` + ### [Start](start.yaml) + Start the stopped cluster + ```bash kubectl apply -f examples/elasticsearch/start.yaml ``` +#### Start using Cluster API + +Alternatively, you may start the cluster by setting the `spec.componentSpecs.stop` field to `true`. + +```yaml +spec: + terminationPolicy: Delete + componentSpecs: + - name: master + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + stop: false # set to `false` (or remove this field) to start the component + replicas: 3 +``` + +### Expose + +It is recommended to access the Elasticsearch cluster from within the Kubernetes cluster using Kibaba or other tools. However, if you need to access the Elasticsearch cluster from outside the Kubernetes cluster, you can expose the Elasticsearch service using a `LoadBalancer` service type. + +#### [Enable](expose-enable.yaml) + +```bash +kubectl apply -f examples/elasticsearch/expose-enable.yaml +``` + +In this example, a service with type `LoadBalancer` will be created to expose the Elasticsearch cluster. You can access the cluster using the `external IP` of the service. + +#### [Disable](expose-disable.yaml) + +```bash +kubectl apply -f examples/elasticsearch/expose-disable.yaml +``` + +#### Expose SVC using Cluster API + +Alternatively, you may expose service by updating `spec.services` + +```yaml +spec: + # append service to the list + services: + # add annotation for cloud loadbalancer if + # services.spec.type is LoadBalancer + # here we use annotation for alibaba cloud for example + - annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: internet + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-charge-type: "" + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small + componentSelector: master + name: master-internet + serviceName: master-internet + spec: + ports: + - name: es-http + nodePort: 32751 + port: 9200 + protocol: TCP + targetPort: es-http + type: LoadBalancer +``` + +If the service is of type `LoadBalancer`, please add annotations for cloud loadbalancer depending on the cloud provider you are using. Here list annotations for some cloud providers: + +```yaml +# alibaba cloud +service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "internet" # or "intranet" + +# aws +service.beta.kubernetes.io/aws-load-balancer-type: nlb # Use Network Load Balancer +service.beta.kubernetes.io/aws-load-balancer-internal: "true" # or "false" for internet + +# azure +service.beta.kubernetes.io/azure-load-balancer-internal: "true" # or "false" for internet + +# gcp +networking.gke.io/load-balancer-type: "Internal" # for internal access +cloud.google.com/l4-rbs: "enabled" # for internet +``` + +Please consult your cloud provider for more accurate and update-to-date information. + +### Observability + +There are various ways to monitor the cluster. Here we use Prometheus and Grafana to demonstrate how to monitor the cluster. + +#### Installing the Prometheus Operator + +You may skip this step if you have already installed the Prometheus Operator. +Or you can follow the steps in [How to install the Prometheus Operator](../docs/install-prometheus.md) to install the Prometheus Operator. + +##### Step 1. Create PodMonitor + +Apply the `PodMonitor` file to monitor the cluster: + +```bash +kubectl apply -f examples/elasticsearch/pod-monitor.yaml +``` + +It set up the PodMonitor to scrape the metrics (port `9114`) from the Elasticsearch cluster. + +```yaml + - path: /metrics + port: metrics + scheme: http +``` + +##### Step 2. Access the Grafana Dashboard + +Login to the Grafana dashboard and import the dashboard. +You can import the dashboard provided by Grafana or create your own dashboard, e.g. +- https://grafana.com/grafana/dashboards/2322-elasticsearch/ + + +> [!Note] +> Make sure the labels are set correctly in the `PodMonitor` file to match the dashboard. + + ### Delete + If you want to delete the cluster and all its resource, you can modify the termination policy and then delete the cluster ```bash -kubectl patch cluster elasticsearch-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" +kubectl patch cluster es-multinode -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -kubectl delete cluster elasticsearch-cluster +kubectl delete cluster es-multinode ``` + +## References + +[^1]: Elasticsearch Nodes, https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html diff --git a/examples/elasticsearch/cluster-multi-node.yaml b/examples/elasticsearch/cluster-multi-node.yaml new file mode 100644 index 000000000..ac0b98df9 --- /dev/null +++ b/examples/elasticsearch/cluster-multi-node.yaml @@ -0,0 +1,82 @@ +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: es-multinode + namespace: default + annotations: + kubeblocks.io/extra-env: '{"master-roles":"master", "data-roles": "data", "ingest-roles": "ingest", "transform-roles": "transform"}' +spec: + terminationPolicy: Delete + componentSpecs: + - name: master + componentDef: elasticsearch-8-1.0.0 + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 2Gi + requests: + cpu: '0.5' + memory: 2Gi + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: data + componentDef: elasticsearch-8-1.0.0 + replicas: 3 + resources: + limits: + cpu: '0.5' + memory: 2Gi + requests: + cpu: '0.5' + memory: 2Gi + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: ingest + componentDef: elasticsearch-8-1.0.0 + replicas: 1 + resources: + limits: + cpu: '0.5' + memory: 2Gi + requests: + cpu: '0.5' + memory: 2Gi + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + - name: transform + componentDef: elasticsearch-8-1.0.0 + replicas: 1 + resources: + limits: + cpu: '0.5' + memory: 2Gi + requests: + cpu: '0.5' + memory: 2Gi + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi diff --git a/examples/elasticsearch/cluster-single-node-with-sp.yaml b/examples/elasticsearch/cluster-single-node-with-sp.yaml new file mode 100644 index 000000000..b85839659 --- /dev/null +++ b/examples/elasticsearch/cluster-single-node-with-sp.yaml @@ -0,0 +1,47 @@ +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: elasticsearch-cluster + namespace: default + annotations: + kubeblocks.io/extra-env: '{"mode":"single-node"}' +spec: + terminationPolicy: Delete + componentSpecs: + - name: mdit + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + schedulingPolicy: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/instance: es-cluster + apps.kubeblocks.io/component-name: mdit + topologyKey: kubernetes.io/hostname + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: es-cluster + apps.kubeblocks.io/component-name: mdit + topologyKey: kubernetes.io/hostname + replicas: 1 + disableExporter: false + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + volumeClaimTemplates: + - name: data # ref clusterDefinition components.containers.volumeMounts.name + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi \ No newline at end of file diff --git a/examples/elasticsearch/cluster-single-node.yaml b/examples/elasticsearch/cluster-single-node.yaml index 2102292e7..525bc7d59 100644 --- a/examples/elasticsearch/cluster-single-node.yaml +++ b/examples/elasticsearch/cluster-single-node.yaml @@ -1,36 +1,31 @@ apiVersion: apps.kubeblocks.io/v1 kind: Cluster metadata: - name: elasticsearch-cluster + name: es-singlenode namespace: default annotations: - kubeblocks.io/extra-env: '{"elasticsearch-roles":"master,data,ingest,transform"}' + # kubeblokcs.io/extra-env is an reserved annotation + # use 'mode=single-node' to indicate this cluster starts in single-node type. + kubeblocks.io/extra-env: '{"mode":"single-node"}' spec: - # Specifies the behavior when a Cluster is deleted. - # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. - # - `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), allowing for data preservation while stopping other operations. - # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. - # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. terminationPolicy: Delete - # Specifies a list of ClusterComponentSpec objects used to define the individual components that make up a Cluster. This field allows for detailed configuration of each component within the Cluster. Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a cluster. ClusterComponentSpec defines the specifications for a Component in a Cluster. componentSpecs: - - name: elasticsearch - componentDef: elasticsearch-8.8 - disableExporter: true - serviceAccountName: kb-elasticsearch-cluster - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 2Gi - requests: - cpu: '0.5' - memory: 2Gi - volumeClaimTemplates: - - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi + - name: mdit + componentDef: elasticsearch-8 + serviceVersion: 8.8.2 + replicas: 1 + resources: + limits: + cpu: "1" + memory: "2Gi" + requests: + cpu: "1" + memory: "2Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi \ No newline at end of file diff --git a/examples/elasticsearch/cluster.yaml b/examples/elasticsearch/cluster.yaml deleted file mode 100644 index d8ff441c0..000000000 --- a/examples/elasticsearch/cluster.yaml +++ /dev/null @@ -1,99 +0,0 @@ -apiVersion: apps.kubeblocks.io/v1 -kind: Cluster -metadata: - name: elasticsearch-cluster - namespace: default - annotations: - kubeblocks.io/extra-env: '{"master-roles":"master", "data-roles": "data", "ingest-roles": "ingest", "transform-roles": "transform"}' -spec: - # Specifies the behavior when a Cluster is deleted. - # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. - # - `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), allowing for data preservation while stopping other operations. - # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. - # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. - terminationPolicy: Delete - # Specifies a list of ClusterComponentSpec objects used to define the individual components that make up a Cluster. This field allows for detailed configuration of each component within the Cluster. - # Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a cluster. - # ClusterComponentSpec defines the specifications for a Component in a Cluster. - componentSpecs: - - name: master - componentDef: elasticsearch-8.8 - disableExporter: true - serviceAccountName: kb-elasticsearch-cluster - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 2Gi - requests: - cpu: '0.5' - memory: 2Gi - volumeClaimTemplates: - - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - - name: data - componentDef: elasticsearch-8.8 - serviceAccountName: kb-elasticsearch-cluster - disableExporter: true - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 2Gi - requests: - cpu: '0.5' - memory: 2Gi - volumeClaimTemplates: - - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - - name: ingest - componentDef: elasticsearch-8.8 - serviceAccountName: kb-elasticsearch-cluster - disableExporter: true - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 2Gi - requests: - cpu: '0.5' - memory: 2Gi - volumeClaimTemplates: - - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - services: null - - name: transform - componentDef: elasticsearch-8.8 - serviceAccountName: kb-elasticsearch-cluster - disableExporter: true - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 2Gi - requests: - cpu: '0.5' - memory: 2Gi - volumeClaimTemplates: - - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi diff --git a/examples/elasticsearch/expose-disable.yaml b/examples/elasticsearch/expose-disable.yaml index be0c30a12..c5c7137e8 100644 --- a/examples/elasticsearch/expose-disable.yaml +++ b/examples/elasticsearch/expose-disable.yaml @@ -1,15 +1,19 @@ -apiVersion: apps.kubeblocks.io/v1alpha1 +apiVersion: operations.kubeblocks.io/v1alpha1 kind: OpsRequest metadata: name: es-expose-disable namespace: default spec: # Specifies the name of the Cluster resource that this operation is targeting. - clusterName: es-cluster + clusterName: es-multinode # Lists Expose objects, each specifying a Component and its services to be exposed. expose: - # Specifies the name of the Component. - - componentName: mdit + # Specifies the name of the Component. + # - master + # - data + # - ingest + # - transform + - componentName: master # Specifies a list of OpsService. When an OpsService is exposed, a corresponding ClusterService will be added to `cluster.spec.services`. services: - name: internet diff --git a/examples/elasticsearch/expose-enable.yaml b/examples/elasticsearch/expose-enable.yaml index 74cac55f3..d9f4c4c38 100644 --- a/examples/elasticsearch/expose-enable.yaml +++ b/examples/elasticsearch/expose-enable.yaml @@ -1,19 +1,31 @@ -apiVersion: apps.kubeblocks.io/v1alpha1 +apiVersion: operations.kubeblocks.io/v1alpha1 kind: OpsRequest metadata: name: es-expose-enable namespace: default spec: # Specifies the name of the Cluster resource that this operation is targeting. - clusterName: es-cluster + clusterName: es-multinode # Lists Expose objects, each specifying a Component and its services to be exposed. expose: - # Specifies the name of the Component. - - componentName: mdit + # Specifies the name of the Component. + # - master + # - data + # - ingest + # - transform + - componentName: master # Specifies a list of OpsService. When an OpsService is exposed, a corresponding ClusterService will be added to `cluster.spec.services`. services: - name: internet + # Determines how the Service is exposed. Defaults to 'ClusterIP'. + # Valid options are `ClusterIP`, `NodePort`, and `LoadBalancer`. serviceType: LoadBalancer + # Contains cloud provider related parameters if ServiceType is LoadBalancer. + # Following is an example for Aliyun ACK, please adjust the following annotations as needed. + annotations: + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: internet + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-charge-type: "" + service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec: slb.s1.small ports: - name: es-http port: 9200 diff --git a/examples/elasticsearch/pod-monitor.yaml b/examples/elasticsearch/pod-monitor.yaml new file mode 100644 index 000000000..c94453006 --- /dev/null +++ b/examples/elasticsearch/pod-monitor.yaml @@ -0,0 +1,19 @@ + +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: es-cluster-pod-monitor + namespace: monitoring # Note: this is namespace for prometheus operator + labels: # this is labels set in `prometheus.spec.podMonitorSelector` + release: prometheus +spec: + podMetricsEndpoints: + - path: /metrics + port: metrics + scheme: http + namespaceSelector: + matchNames: + - default + selector: + matchLabels: + app.kubernetes.io/instance: es-multinode \ No newline at end of file diff --git a/examples/elasticsearch/restart.yaml b/examples/elasticsearch/restart.yaml index 0c448346a..3e53c1c11 100644 --- a/examples/elasticsearch/restart.yaml +++ b/examples/elasticsearch/restart.yaml @@ -1,15 +1,15 @@ -apiVersion: apps.kubeblocks.io/v1alpha1 +apiVersion: operations.kubeblocks.io/v1alpha1 kind: OpsRequest metadata: name: elasticsearch-restart namespace: default spec: # Specifies the name of the Cluster resource that this operation is targeting. - clusterName: elasticsearch-cluster + clusterName: es-multinode type: Restart # Lists Components to be restarted. ComponentOps specifies the Component to be operated on. restart: - # Specifies the name of the Component. + # Specifies the name of the Component.If not specified, ALL Components will be restarted. # - master # - data # - ingest diff --git a/examples/elasticsearch/horizontalscale.yaml b/examples/elasticsearch/scale-in.yaml similarity index 64% rename from examples/elasticsearch/horizontalscale.yaml rename to examples/elasticsearch/scale-in.yaml index 660ea3a79..a23dc20e3 100644 --- a/examples/elasticsearch/horizontalscale.yaml +++ b/examples/elasticsearch/scale-in.yaml @@ -1,11 +1,11 @@ -apiVersion: apps.kubeblocks.io/v1alpha1 +apiVersion: operations.kubeblocks.io/v1alpha1 kind: OpsRequest metadata: - name: elasticsearch-horizontalscaling + name: es-scale-in namespace: default spec: # Specifies the name of the Cluster resource that this operation is targeting. - clusterName: elasticsearch-cluster + clusterName: es-multinode type: HorizontalScaling # Lists HorizontalScaling objects, each specifying scaling requirements for a Component, including desired total replica counts, configurations for new instances, modifications for existing instances, and instance downscaling options horizontalScaling: @@ -15,5 +15,8 @@ spec: # - ingest # - transform - componentName: master - # Specifies the number of total replicas. - replicas: 2 + # Specifies the replica changes for scaling in components + scaleIn: + # Specifies the replica changes for the component. + # add one more replica to current component + replicaChanges: 1 diff --git a/examples/elasticsearch/scale-out.yaml b/examples/elasticsearch/scale-out.yaml new file mode 100644 index 000000000..8866a3271 --- /dev/null +++ b/examples/elasticsearch/scale-out.yaml @@ -0,0 +1,22 @@ +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: es-scale-out + namespace: default +spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: es-multinode + type: HorizontalScaling + # Lists HorizontalScaling objects, each specifying scaling requirements for a Component, including desired total replica counts, configurations for new instances, modifications for existing instances, and instance downscaling options + horizontalScaling: + # Specifies the name of the Component. + # - master + # - data + # - ingest + # - transform + - componentName: master + # Specifies the replica changes for scaling in components + scaleOut: + # Specifies the replica changes for the component. + # add one more replica to current component + replicaChanges: 1 diff --git a/examples/elasticsearch/start.yaml b/examples/elasticsearch/start.yaml index d67752e86..3261b6f9f 100644 --- a/examples/elasticsearch/start.yaml +++ b/examples/elasticsearch/start.yaml @@ -1,9 +1,9 @@ -apiVersion: apps.kubeblocks.io/v1alpha1 +apiVersion: operations.kubeblocks.io/v1alpha1 kind: OpsRequest metadata: name: elasticsearch-start namespace: default spec: # Specifies the name of the Cluster resource that this operation is targeting. - clusterName: elasticsearch-cluster + clusterName: es-multinode type: Start diff --git a/examples/elasticsearch/stop.yaml b/examples/elasticsearch/stop.yaml index 7c0d56ea2..59d6d1967 100644 --- a/examples/elasticsearch/stop.yaml +++ b/examples/elasticsearch/stop.yaml @@ -1,9 +1,9 @@ -apiVersion: apps.kubeblocks.io/v1alpha1 +apiVersion: operations.kubeblocks.io/v1alpha1 kind: OpsRequest metadata: name: elasticsearch-stop namespace: default spec: # Specifies the name of the Cluster resource that this operation is targeting. - clusterName: elasticsearch-cluster + clusterName: es-multinode type: Stop diff --git a/examples/elasticsearch/verticalscale.yaml b/examples/elasticsearch/verticalscale.yaml index 994e7dbdc..2c14a9134 100644 --- a/examples/elasticsearch/verticalscale.yaml +++ b/examples/elasticsearch/verticalscale.yaml @@ -1,19 +1,20 @@ -apiVersion: apps.kubeblocks.io/v1alpha1 +apiVersion: operations.kubeblocks.io/v1alpha1 kind: OpsRequest metadata: name: elasticsearch-verticalscaling namespace: default spec: # Specifies the name of the Cluster resource that this operation is targeting. - clusterName: elasticsearch-cluster + clusterName: es-multinode type: VerticalScaling # Lists VerticalScaling objects, each specifying a component and its desired compute resources for vertical scaling. verticalScaling: + # Specifies the name of the Component. # - master # - data # - ingest # - transform - - componentName: ingest + - componentName: master # VerticalScaling refers to the process of adjusting the compute resources (e.g., CPU, memory) allocated to a Component. It defines the parameters required for the operation. requests: cpu: '1' diff --git a/examples/elasticsearch/volumeexpand.yaml b/examples/elasticsearch/volumeexpand.yaml index 0080c68e8..94cc29f6f 100644 --- a/examples/elasticsearch/volumeexpand.yaml +++ b/examples/elasticsearch/volumeexpand.yaml @@ -1,24 +1,22 @@ -apiVersion: apps.kubeblocks.io/v1alpha1 +apiVersion: operations.kubeblocks.io/v1alpha1 kind: OpsRequest metadata: name: elasticsearch-volumeexpansion namespace: default spec: # Specifies the name of the Cluster resource that this operation is targeting. - clusterName: elasticsearch-cluster + clusterName: es-multinode type: VolumeExpansion - # Lists VolumeExpansion objects, each specifying a component and its corresponding volumeClaimTemplates that requires storage expansion. + # Lists VolumeExpansion objects, each specifying a component and its corresponding volumeClaimTemplates that requires storage expansion. volumeExpansion: # Specifies the name of the Component. # - master # - data # - ingest # - transform - - componentName: transform + - componentName: data # volumeClaimTemplates specifies the storage size and volumeClaimTemplate name. volumeClaimTemplates: # A reference to the volumeClaimTemplate name from the cluster components. - # - datanode, datanode - # - etcd, etcd-storage - name: data storage: 30Gi diff --git a/examples/rabbitmq/README.md b/examples/rabbitmq/README.md index b4e6f58b8..8f1e0fe53 100644 --- a/examples/rabbitmq/README.md +++ b/examples/rabbitmq/README.md @@ -322,7 +322,6 @@ kubectl port-forward svc/rabbitmq-cluster-rabbitmq 15672:15672 Then log in to the RabbitMQ Management console at `http://:/` with the user and password. - The user and password can be found in the cluster secrets named after `--account-`. In this case, the secret name is `rabbitmq-cluster-rabbitmq-account-root`. ```bash