From 9f7ad3e3d5a8113e1f488fb8d8fb2544391b124c Mon Sep 17 00:00:00 2001 From: Shanshan Date: Mon, 9 Dec 2024 17:58:00 +0800 Subject: [PATCH] chore: update pulsar examples --- addons-cluster/pulsar/templates/_helpers.tpl | 2 +- addons-cluster/pulsar/templates/cluster.yaml | 10 +- addons/pulsar/templates/cmpd-zookeeper-2.yaml | 9 + addons/pulsar/templates/cmpd-zookeeper-3.yaml | 9 + examples/docs/install-addon.md | 7 +- examples/pulsar/README.md | 324 +++++++++++++++--- examples/pulsar/cluster-basic.yaml | 105 ++++++ examples/pulsar/cluster-cmpd.yaml | 136 -------- examples/pulsar/cluster-enhanced.yaml | 119 +++++++ examples/pulsar/cluster-node-port.yaml | 97 ++++++ .../pulsar/cluster-service-descriptor.yaml | 230 ++++--------- examples/pulsar/cluster-service-refer.yaml | 83 +++++ .../pulsar/cluster-zookeeper-separate.yaml | 167 --------- examples/pulsar/cluster.yaml | 142 -------- examples/pulsar/configure.yaml | 14 +- examples/pulsar/restart.yaml | 12 +- .../{horizontalscale.yaml => scale-in.yaml} | 21 +- examples/pulsar/scale-out.yaml | 23 ++ examples/pulsar/start.yaml | 6 +- examples/pulsar/stop.yaml | 13 +- examples/pulsar/verticalscale.yaml | 14 +- examples/pulsar/zookeeper-cluster.yaml | 37 ++ .../pulsar/zookeeper-service-descriptor.yaml | 17 + 23 files changed, 896 insertions(+), 701 deletions(-) create mode 100644 examples/pulsar/cluster-basic.yaml delete mode 100644 examples/pulsar/cluster-cmpd.yaml create mode 100644 examples/pulsar/cluster-enhanced.yaml create mode 100644 examples/pulsar/cluster-node-port.yaml create mode 100644 examples/pulsar/cluster-service-refer.yaml delete mode 100644 examples/pulsar/cluster-zookeeper-separate.yaml delete mode 100644 examples/pulsar/cluster.yaml rename examples/pulsar/{horizontalscale.yaml => scale-in.yaml} (58%) create mode 100644 examples/pulsar/scale-out.yaml create mode 100644 examples/pulsar/zookeeper-cluster.yaml create mode 100644 examples/pulsar/zookeeper-service-descriptor.yaml diff --git a/addons-cluster/pulsar/templates/_helpers.tpl b/addons-cluster/pulsar/templates/_helpers.tpl index 27f094390..262554686 100644 --- a/addons-cluster/pulsar/templates/_helpers.tpl +++ b/addons-cluster/pulsar/templates/_helpers.tpl @@ -101,7 +101,7 @@ serviceRefs: {{- end}} {{- end}} {{- if .Values.serviceReference.zookeeper.serviceDescriptor }} - serviceDescriptor: {{.Values.serviceReference.zookeeper.serviceDescriptor}} + serviceDescriptor: {{.Values.serviceReference.zookeeper.serviceDescriptor}} {{- end }} {{- end }} {{- end}} diff --git a/addons-cluster/pulsar/templates/cluster.yaml b/addons-cluster/pulsar/templates/cluster.yaml index 8cab71e29..4253dfd4d 100644 --- a/addons-cluster/pulsar/templates/cluster.yaml +++ b/addons-cluster/pulsar/templates/cluster.yaml @@ -7,11 +7,6 @@ metadata: {{- if .Values.commonAnnotations }} annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- if eq .Values.version "3.0.2" }} - ## Todo: use cluster api to control the rendering logic of service in component definition - {{- include "pulsar-cluster.brokerAddrFeatureGate" . | nindent 4 }} - "kubeblocks.io/extra-env": '{"KB_PULSAR_BROKER_NODEPORT": "{{ $.Values.nodePortEnabled }}"}' - {{- end }} {{- end }} spec: terminationPolicy: {{ $.Values.terminationPolicy }} @@ -117,6 +112,11 @@ spec: componentDef: pulsar-broker serviceVersion: {{ .Values.version }} {{- end }} + {{- if eq .Values.version "3.0.2" }} + env: + - name: KB_PULSAR_BROKER_NODEPORT + value: "{{ $.Values.nodePortEnabled }}" + {{- end }} {{ include "pulsar-zookeeper-ref" . | nindent 6 }} {{- if .Values.nodePortEnabled }} services: diff --git a/addons/pulsar/templates/cmpd-zookeeper-2.yaml b/addons/pulsar/templates/cmpd-zookeeper-2.yaml index 6749a5744..88ea0b3ae 100644 --- a/addons/pulsar/templates/cmpd-zookeeper-2.yaml +++ b/addons/pulsar/templates/cmpd-zookeeper-2.yaml @@ -12,6 +12,15 @@ spec: serviceKind: pulsar serviceVersion: {{ .Values.defaultServiceVersion.zookeeper.major2 }} updateStrategy: BestEffortParallel + services: + - name: zookeeper + serviceName: zookeeper + spec: + type: ClusterIP + ports: + - name: client + port: 2181 + targetPort: 2181 configs: - name: zookeeper-env templateRef: {{ include "pulsar.zookeeperEnvTplName" . }} diff --git a/addons/pulsar/templates/cmpd-zookeeper-3.yaml b/addons/pulsar/templates/cmpd-zookeeper-3.yaml index e0520dc95..c0a6aea5d 100644 --- a/addons/pulsar/templates/cmpd-zookeeper-3.yaml +++ b/addons/pulsar/templates/cmpd-zookeeper-3.yaml @@ -12,6 +12,15 @@ spec: serviceKind: pulsar serviceVersion: {{ .Values.defaultServiceVersion.zookeeper.major3 }} updateStrategy: BestEffortParallel + services: + - name: zookeeper + serviceName: zookeeper + spec: + type: ClusterIP + ports: + - name: client + port: 2181 + targetPort: 2181 configs: - name: zookeeper-env templateRef: {{ include "pulsar.zookeeperEnvTplName" . }} diff --git a/examples/docs/install-addon.md b/examples/docs/install-addon.md index 80b2e6088..7989bcab0 100644 --- a/examples/docs/install-addon.md +++ b/examples/docs/install-addon.md @@ -13,9 +13,12 @@ helm repo add kubeblocks-addons https://jihulab.com/api/v4/projects/150246/packa # Update helm repo helm repo update # Search versions of the Addon -helm search repo kubeblocks/{addonName} --versions +helm search repo kubeblocks-addons/{addonName} --versions # Install the version you want (replace $version with the one you need) -helm upgrade -i mysql kubeblocks-addons/{addonName} --version $version -n kb-system +helm upgrade -i {release-name} kubeblocks-addons/{addonName} --version $version -n kb-system +## e.g. +## helm upgrade -i kb-addon-pulsar kubeblocks-addons/pulsar --version 0.9.1 -n kb-system +## helm upgrade -i kb-addon-zookeeper kubeblocks-addons/zookeeper --version 0.9.1 -n kb-system ``` ## Using kbcli diff --git a/examples/pulsar/README.md b/examples/pulsar/README.md index 8b25a1138..71a73f9d4 100644 --- a/examples/pulsar/README.md +++ b/examples/pulsar/README.md @@ -1,115 +1,343 @@ # Pulsar ApacheĀ® Pulsarā„¢ is an open-source, distributed messaging and streaming platform built for the cloud. +Pulsar's architecture is designed to provide scalability, reliability, and flexibility. It consists of several key components: -## Prerequisites +- Brokers: These are stateless components responsible for handling incoming messages from producers, dispatching messages to consumers, and managing communication with the configuration store for coordination tasks. They also interface with Bookkeeper instances (bookies) for message storage and rely on a cluster-specific Zookeeper cluster for certain tasks. -This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the kubectl command line tool and helm somewhere in your path. Please see the [getting started](https://kubernetes.io/docs/setup/) and [Installing Helm](https://helm.sh/docs/intro/install/) for installation instructions for your platform. +- Apache Bookkeeper (aka bookies): It handles the persistent storage of messages. Bookkeeper is a distributed write-ahead log (WAL) system that provides several advantages, including the ability to handle many independent logs (ledgers), efficient storage for sequential data, and guarantees read consistency even in the presence of system failures. -Also, this example requires kubeblocks installed and running. Here is the steps to install kubeblocks, please replace "`$kb_version`" with the version you want to use. -```bash -# Add Helm repo -helm repo add kubeblocks https://apecloud.github.io/helm-charts -# If github is not accessible or very slow for you, please use following repo instead -helm repo add kubeblocks https://jihulab.com/api/v4/projects/85949/packages/helm/stable +- Zookeeper: Pulsar uses Zookeeper clusters for coordination tasks between Pulsar clusters and for cluster-level configuration and coordination. -# Update helm repo -helm repo update +Optional components include: -# Get the versions of KubeBlocks and select the one you want to use -helm search repo kubeblocks/kubeblocks --versions -# If you want to obtain the development versions of KubeBlocks, Please add the '--devel' parameter as the following command -helm search repo kubeblocks/kubeblocks --versions --devel +- Pulsar Proxy: It is an optional gateway. It is typically used in scenarios where direct access to brokers is restricted due to network policies or security requirements. The proxy helps in managing client connections and forwarding requests to the appropriate brokers, providing an additional layer of security and simplifying network configurations. -# Create dependent CRDs -kubectl create -f https://github.com/apecloud/kubeblocks/releases/download/v$kb_version/kubeblocks_crds.yaml -# If github is not accessible or very slow for you, please use following command instead -kubectl create -f https://jihulab.com/api/v4/projects/98723/packages/generic/kubeblocks/v$kb_version/kubeblocks_crds.yaml +- Bookies Recovery: It is an optional component that helps in recovering data from failed bookies. It is used in scenarios where a bookie fails and data needs to be recovered from other bookies in the cluster. -# Install KubeBlocks -helm install kubeblocks kubeblocks/kubeblocks --namespace kb-system --create-namespace --version="$kb_version" -``` - +## Features In KubeBlocks + +### Lifecycle Management + +| Topology | Horizontal
scaling | Vertical
scaling | Expand
volume | Restart | Stop/Start | Configure | Expose | Switchover | +|------------------|------------------------|-----------------------|-------------------|-----------|------------|-----------|--------|------------| +| Basic/Enhanced | Yes | Yes | Yes | Yes | Yes | Yes | Yes | No | + +- Basic Mode: Includes the basic features of Pulsar, such as brokers, bookies, and Zookeeper. +- Enhanced Mode: Includes additional components like Pulsar Proxy and Bookies Recovery. + +### Backup and Restore + +| Feature | Method | Description | +|-------------|--------|------------| + +### Versions + +| Versions | +|----------| +| 2.11.2,3.0.2 | + +## Prerequisites + +- Kubernetes cluster >= v1.21 +- `kubectl` installed, refer to [K8s Install Tools](https://kubernetes.io/docs/tasks/tools/) +- Helm, refer to [Installing Helm](https://helm.sh/docs/intro/install/) +- KubeBlocks installed and running, refer to [Install Kubeblocks](../docs/prerequisites.md) +- Pulsar Addon Enabled, refer to [Install Addons](../docs/install-addon.md) ## Examples -### [Create](cluster.yaml) -Create a pulsar cluster with specified cluster definition. +### Create + +#### Basic Mode + +Create a pulsar cluster of `Basic` mode. + ```bash -kubectl apply -f examples/pulsar/cluster.yaml +kubectl apply -f examples/pulsar/cluster-basic.yaml ``` -Create a pulsar cluster with specified `serviceRefs.serviceDescriptor`, when referencing a service provided by external sources. + +A cluster with one brokers, four bookies, and one zookeepers will be created. +The Zookeeper component will be created apriori, and the Broker and Bookies components will be created after the Zookeeper component is 'RUNNING'. + +#### Enhanced Mode + +Create a pulsar cluster of `Enhanced` mode. + ```bash -kubectl apply -f examples/pulsar/cluster-service-descriptor.yaml +kubectl apply -f examples/pulsar/cluster-enhanced.yaml ``` -Create a pulsar cluster with specified `serviceRefs.cluster`, when zookeeper service is provided by another cluster created by the same kubeblocks. +A cluster with one brokers, four bookies, one bookies recovery, three proxy, and one zookeepers will be created. + +And these components will be created in the following order: Zookeeper and Bookies Recovery, Bookies and Brokder, finally Proxy. + +### Horizontal scaling + +> [!IMPORTANT] +> Please check how many replicas are allowed for each component in the cluster before scaling out/in. + +A Pulsar cluster can scale to handle hundreds of brokers, depending on the workload and the resources available. +Suggested practices are: + +- Start with a smaller number (3-5 brokers) for most deployments +- Scale horizontally as needed based on metrics +- Monitor performance and resource utilization + +Here is an example of scale-out and scale-in operations for the Broker component. + +#### [Scale-out](scale-out.yaml) + +Horizontal scaling out by adding ONE more replica for Broker component: + ```bash -kubectl apply -f examples/pulsar/cluster-zookeeper-separate.yaml +kubectl apply -f examples/pulsar/scale-out.yaml ``` -Starting from kubeblocks 0.9.0, we introduced a more flexible cluster creation method based on components, allowing customization of cluster topology, functionalities and scale according to specific requirements. +#### [Scale-in](scale-in.yaml) + +Horizontal scaling in PostgreSQL cluster by deleting ONE replica: + ```bash -kubectl apply -f examples/pulsar/cluster-cmpd.yaml +kubectl apply -f examples/pulsar/scale-in.yaml ``` -### [Horizontal scaling](horizontalscale.yaml) -Horizontal scaling out or in specified components replicas in the cluster -```bash -kubectl apply -f examples/pulsar/horizontalscale.yaml +#### Scale-in/out using Cluster API + +Alternatively, you can update the `replicas` field in the `spec.componentSpecs.replicas` section to your desired non-zero number. + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pulsar-basic-cluster + namespace: default +spec: + terminationPolicy: Delete + componentSpecs: + - name: broker + componentDef: pulsar-broker + serviceVersion: 3.0.2 + replicas: 1 # update to your desired number + ... ``` ### [Vertical scaling](verticalscale.yaml) + Vertical scaling up or down specified components requests and limits cpu or memory resource in the cluster + ```bash kubectl apply -f examples/pulsar/verticalscale.yaml ``` -### [Expand volume](volumeexpand.yaml) -Increase size of volume storage with the specified components in the cluster -```bash -kubectl apply -f examples/pulsar/volumeexpand.yaml +#### Scale-up/down using Cluster API + +Alternatively, you may update `spec.componentSpecs.resources` field to the desired resources for vertical scale. + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pulsar-basic-cluster + namespace: default +spec: + terminationPolicy: Delete + componentSpecs: + - name: broker + componentDef: pulsar-broker + serviceVersion: 3.0.2 + resources: + requests: + cpu: "1" # Update the resources to your need. + memory: "2Gi" # Update the resources to your need. + limits: + cpu: "2" # Update the resources to your need. + memory: "4Gi" # Update the resources to your need. ``` ### [Restart](restart.yaml) + Restart the specified components in the cluster + ```bash kubectl apply -f examples/pulsar/restart.yaml ``` ### [Stop](stop.yaml) -Stop the cluster and release all the pods of the cluster, but the storage will be reserved + +Stop the cluster and release all the pods of the cluster, but the storage will be reserved. + +You may stop specific components or the entire cluster. Here is an example of stopping the cluster. + ```bash kubectl apply -f examples/pulsar/stop.yaml ``` ### [Start](start.yaml) + Start the stopped cluster + ```bash kubectl apply -f examples/pulsar/start.yaml ``` -### [Configure](configure.yaml) +### [Reconfigure](configure.yaml) + Configure parameters with the specified components in the cluster + ```bash kubectl apply -f examples/pulsar/configure.yaml ``` +It sets `lostBookieRecoveryDelay` in bookies to `1000`. +> [!WARNING] +> As `lostBookieRecoveryDelay` is defined as a static parameter, all bookies replicas will be restarted to make sure the reconfiguration takes effect. + ### Delete + If you want to delete the cluster and all its resource, you can modify the termination policy and then delete the cluster + ```bash -kubectl patch cluster pulsar-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" +kubectl patch cluster pulsar-basic-cluster -p '{"spec":{"terminationPolicy":"WipeOut"}}' --type="merge" -kubectl delete cluster pulsar-cluster +kubectl delete cluster pulsar-basic-cluster ``` -Delete `serviceRefs.serviceDescriptor`. +## Appendix -```bash -kubectl delete ServiceDescriptor pulsar-cluster-zookeeper-service +There are some interesting features we developed for Pulsar. + +### 1. Reuse an existing ZK Cluster through service reference + +As mentioned earlier, a Pulsar Cluster needs three components: Broker, Bookies and Zookeeper. As Zookeeper(ZK) is a widely used component, there are cases you already have one or more ZK clusters before creating a Pulsar Cluster, and do not want to create another ZK cluster. To handle such cases, KubeBlocks provides `Service Reference` API to refer a service, provided by either internal/external (KubeBlocks) clusters. + +#### 1.1 Refer to an Internal (to KubeBlocks) Zookeeper Cluster + +Suppose you have created ZK Cluster name 'zk-cluster' managed by Kubeblocks, or you can create one if not: + +```yaml +kubectl create -f examples/pulsar/zookeeper-cluster.yaml +``` + +To create a Pulsar Cluster referring to an existing ZK Cluster, you may use + +```yaml +kubectl create -f examples/pulsar/cluster-service-refer.yaml ``` -Delete `serviceRefs.cluster`. +The key changes are, we add a API `serviceRefs` to express such inter-cluster service reference for each component, and we don't need to specify the Zookeeper component in the Pulsar Cluster. + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pulsar-service-ref + namespace: default +spec: + terminationPolicy: Delete + componentSpecs: + - name: proxy + componentDef: pulsar-proxy + serviceVersion: 3.0.2 + # Defines a list of ServiceRef for a Component, enabling access to both + # external services and + # Services provided by other Clusters. + serviceRefs: + - name: pulsarZookeeper # identifier of the service reference declaration, defined in `componentDefinition.spec.serviceRefDeclarations[*].name` + namespace: default # Specifies the namespace of the referenced Cluster + clusterServiceSelector: # References a service provided by another KubeBlocks Cluster + cluster: zk-cluster # Cluster Name + service: + component: zookeeper # Component Name + service: zookeeper # service name defined in Zookeeper ComponentDefinition + port: "2881" # port + replicas: 3 + ... +``` + +#### 1.2 Refer to an External (to KubeBlocks) Zookeeper Cluster + +Create a `ServiceDescriptor` for the external Zookeeper Cluster, specifying the service name and port. + +```yaml +kubectl apply -f examples/pulsar/zookeeper-service-descriptor.yaml +``` + +Create a pulsar cluster with specified `serviceRefs.serviceDescriptor`, when referencing a service provided by external sources. + ```bash -kubectl delete cluster zookeeperp-cluster +kubectl apply -f examples/pulsar/cluster-service-descriptor.yaml ``` + +The key change is , we add a API `serviceRefs.serviceDescriptor` to express such inter-cluster service reference + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pulsar-service-descriptor + namespace: default +spec: + terminationPolicy: Delete + componentSpecs: + - name: proxy + componentDef: pulsar-proxy + serviceVersion: 3.0.2 + # Defines a list of ServiceRef for a Component, enabling access to both + # external services and + # Services provided by other Clusters. + serviceRefs: + serviceRefs: + - name: pulsarZookeeper # identifier of the service reference declaration, defined in `componentDefinition.spec.serviceRefDeclarations[*].name` + namespace: default # Specifies the namespace of the referenced ServiceDescriptor + serviceDescriptor: zookeeper-sd # ServiceDescriptor Name + ... +``` + +### 2. Enable NodePort for Pulsar + +By default, Pulsar does not expose any service to the external network. If you want to expose the service to the external network, you can enable the NodePort service. + +```yaml +kubectl apply -f examples/pulsar/cluster-nodeport.yaml +``` + +The key difference are: + +1. set service type to `NodePort` (default is `ClusterIP`) +1. set env `KB_PULSAR_BROKER_NODEPORT` to `TRUE`, it will set up the advertised listener to the NodePort service. + +```yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pulsar-node-port + namespace: default +spec: + terminationPolicy: Delete + services: + - name: broker-bootstrap + serviceName: broker-bootstrap + componentSelector: broker + spec: + type: NodePort # set svc type to NodePort + ports: + - name: pulsar + port: 6650 + targetPort: 6650 + - name: zookeeper + componentSpecs: + - name: broker + componentDef: pulsar-broker + serviceVersion: 3.0.2 + env: + - name: KB_PULSAR_BROKER_NODEPORT # set KB_PULSAR_BROKER_NODEPORT to true + value: "true" + services: + - name: advertised-listener + serviceType: NodePort # set svc type to NodePort + podService: true + - name: bookies + - name: zookeeper + ... +``` \ No newline at end of file diff --git a/examples/pulsar/cluster-basic.yaml b/examples/pulsar/cluster-basic.yaml new file mode 100644 index 000000000..909d45530 --- /dev/null +++ b/examples/pulsar/cluster-basic.yaml @@ -0,0 +1,105 @@ +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pulsar-basic-cluster + namespace: default +spec: + # Specifies the behavior when a Cluster is deleted. + # Valid options are: [DoNotTerminate, Delete, WipeOut] (`Halt` is deprecated since KB 0.9) + # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. + # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. + # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. + terminationPolicy: Delete + # Specifies the name of the ClusterDefinition to use when creating a Cluster. + # Note: DO NOT UPDATE THIS FIELD + # The value must be `pulsar` to create a Pulsar Cluster + clusterDef: pulsar + # Specifies the name of the ClusterTopology to be used when creating the + # Cluster. + topology: pulsar-basic-cluster + # Defines a list of additional Services that are exposed by a Cluster. + services: + - name: broker-bootstrap + serviceName: broker-bootstrap + componentSelector: broker + spec: + type: ClusterIP + ports: + - name: pulsar + port: 6650 + targetPort: 6650 + - name: http + port: 80 + targetPort: 8080 + - name: kafka-client + port: 9092 + targetPort: 9092 + - name: zookeeper + serviceName: zookeeper + componentSelector: zookeeper + spec: + type: ClusterIP + ports: + - name: client + port: 2181 + targetPort: 2181 + componentSpecs: + - name: broker + # ServiceVersion specifies the version of the Service expected to be + # provisioned by this Component. + # Valid options are: [2.11.2,3.0.2] + serviceVersion: 3.0.2 + replicas: 1 + env: + - name: KB_PULSAR_BROKER_NODEPORT + value: "false" + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + - name: bookies + serviceVersion: 3.0.2 + replicas: 4 + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + volumeClaimTemplates: + - name: ledgers + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + - name: journal + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + - name: zookeeper + serviceVersion: 3.0.2 + replicas: 1 + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "100m" + memory: "512Mi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi \ No newline at end of file diff --git a/examples/pulsar/cluster-cmpd.yaml b/examples/pulsar/cluster-cmpd.yaml deleted file mode 100644 index 298156352..000000000 --- a/examples/pulsar/cluster-cmpd.yaml +++ /dev/null @@ -1,136 +0,0 @@ -apiVersion: apps.kubeblocks.io/v1 -kind: Cluster -metadata: - name: pulsar-cluster - namespace: default - annotations: - # KB_PULSAR_BROKER_NODEPORT: enable NodePort services - "kubeblocks.io/extra-env": '{"KB_PULSAR_BROKER_NODEPORT": "false"}' -spec: - # Specifies the behavior when a Cluster is deleted. - # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. - # - `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), allowing for data preservation while stopping other operations. - # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. - # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. - terminationPolicy: Delete - # Defines the list of services that are exposed by a Cluster. This field allows selected components, either from `componentSpecs` or `shardingSpecs`, to be exposed as cluster-level services. Services defined here can be referenced by other clusters using the ServiceRefClusterSelector. - services: - - name: proxy - serviceName: proxy - componentSelector: pulsar-proxy - spec: - # - ClusterIP - # - NodePort - type: ClusterIP - ports: - - name: pulsar - port: 6650 - targetPort: 6650 - - name: http - port: 80 - targetPort: 8080 - - name: broker-bootstrap - serviceName: broker-bootstrap - componentSelector: pulsar-broker - spec: - # - ClusterIP - # - NodePort - type: ClusterIP - ports: - - name: pulsar - port: 6650 - targetPort: 6650 - - name: http - port: 80 - targetPort: 8080 - - name: kafka-client - port: 9092 - targetPort: 9092 - # Specifies a list of ClusterComponentSpec objects used to define the individual components that make up a Cluster. This field allows for detailed configuration of each component within the Cluster. - # Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a cluster. - # ClusterComponentSpec defines the specifications for a Component in a Cluster. - componentSpecs: - # Specifies the name of the Component. This name is also part of the Service DNS name and must comply with the IANA service naming rule. - - name: pulsar-broker - componentDef: pulsar-broker - disableExporter: true - serviceAccountName: kb-pulsar-cluster - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - volumeClaimTemplates: - - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - - name: pulsar-proxy - componentDef: pulsar-proxy - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - - name: bookies - componentDef: pulsar-bookkeeper - replicas: 3 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - volumeClaimTemplates: - - name: journal - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - - name: ledgers - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - - name: bookies-recovery - componentDef: pulsar-bookies-recovery - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - - name: zookeeper - componentDef: pulsar-zookeeper - replicas: 3 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - volumeClaimTemplates: - - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi diff --git a/examples/pulsar/cluster-enhanced.yaml b/examples/pulsar/cluster-enhanced.yaml new file mode 100644 index 000000000..88cfa1d45 --- /dev/null +++ b/examples/pulsar/cluster-enhanced.yaml @@ -0,0 +1,119 @@ +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pulsar-enhanced-cluster + namespace: default +spec: + # Specifies the behavior when a Cluster is deleted. + # Valid options are: [DoNotTerminate, Delete, WipeOut] (`Halt` is deprecated since KB 0.9) + # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. + # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. + # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. + terminationPolicy: Delete + # Specifies the name of the ClusterDefinition to use when creating a Cluster. + # Note: DO NOT UPDATE THIS FIELD + # The value must be `pulsar` to create a Pulsar Cluster + clusterDef: pulsar + # Specifies the name of the ClusterTopology to be used when creating the + # Cluster. + topology: pulsar-enhanced-cluster + # Defines a list of additional Services that are exposed by a Cluster. + services: + - name: broker-bootstrap + serviceName: broker-bootstrap + componentSelector: broker + spec: + type: ClusterIP + ports: + - name: pulsar + port: 6650 + targetPort: 6650 + - name: http + port: 80 + targetPort: 8080 + - name: kafka-client + port: 9092 + targetPort: 9092 + - name: zookeeper + serviceName: zookeeper + componentSelector: zookeeper + spec: + type: ClusterIP + ports: + - name: client + port: 2181 + targetPort: 2181 + componentSpecs: + - name: proxy + serviceVersion: 3.0.2 + replicas: 3 + resources: + limits: + cpu: + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + - name: bookies-recovery + serviceVersion: 3.0.2 + replicas: 1 + resources: + limits: + cpu: + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + - name: broker + serviceVersion: 3.0.2 + replicas: 1 + resources: + limits: + cpu: + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + - name: bookies + serviceVersion: 3.0.2 + replicas: 4 + resources: + limits: + cpu: + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + volumeClaimTemplates: + - name: ledgers + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + - name: journal + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + - name: zookeeper + serviceVersion: 3.0.2 + replicas: 1 + resources: + limits: + cpu: + memory: "512Mi" + requests: + cpu: "100m" + memory: "512Mi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi diff --git a/examples/pulsar/cluster-node-port.yaml b/examples/pulsar/cluster-node-port.yaml new file mode 100644 index 000000000..6ff13ca3c --- /dev/null +++ b/examples/pulsar/cluster-node-port.yaml @@ -0,0 +1,97 @@ +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pulsar-node-port + namespace: default +spec: + terminationPolicy: Delete + clusterDef: pulsar + topology: pulsar-basic-cluster + services: + - name: broker-bootstrap + serviceName: broker-bootstrap + componentSelector: broker + spec: + type: NodePort # set svc type to NodePort + ports: + - name: pulsar + port: 6650 + targetPort: 6650 + - name: http + port: 80 + targetPort: 8080 + - name: kafka-client + port: 9092 + targetPort: 9092 + - name: zookeeper + serviceName: zookeeper + componentSelector: zookeeper + spec: + type: ClusterIP + ports: + - name: client + port: 2181 + targetPort: 2181 + componentSpecs: + - name: broker + serviceVersion: 3.0.2 + replicas: 1 + env: + # set to TRUE to set-up advertised listeners for both + # Pulsar and Kafka protocols in a Kubernetes NodePort environment. + - name: KB_PULSAR_BROKER_NODEPORT + value: "true" + services: + - name: advertised-listener + serviceType: NodePort # set svc type to NodePort + podService: true + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + - name: bookies + serviceVersion: 3.0.2 + replicas: 4 + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + volumeClaimTemplates: + - name: ledgers + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + - name: journal + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + - name: zookeeper + serviceVersion: 3.0.2 + replicas: 1 + resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "100m" + memory: "512Mi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi \ No newline at end of file diff --git a/examples/pulsar/cluster-service-descriptor.yaml b/examples/pulsar/cluster-service-descriptor.yaml index 143f4e9c4..73e2d5db9 100644 --- a/examples/pulsar/cluster-service-descriptor.yaml +++ b/examples/pulsar/cluster-service-descriptor.yaml @@ -1,174 +1,72 @@ apiVersion: apps.kubeblocks.io/v1 kind: Cluster metadata: - name: pulsar-cluster + name: pulsar-service-descriptor namespace: default - annotations: - # KB_PULSAR_BROKER_NODEPORT: enable NodePort services - "kubeblocks.io/extra-env": '{"KB_PULSAR_BROKER_NODEPORT": "false"}' spec: - # Specifies the name of the ClusterDefinition to use when creating a Cluster. - clusterDefinitionRef: pulsar - # Refers to the ClusterVersion name. Deprecated since v0.9, use ComponentVersion instead. This field is maintained for backward compatibility and its use is discouraged. Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. - clusterVersionRef: pulsar-3.0.2 - # Specifies the behavior when a Cluster is deleted. - # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. - # - `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), allowing for data preservation while stopping other operations. - # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. - # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. terminationPolicy: Delete services: - - name: proxy - serviceName: proxy - componentSelector: pulsar-proxy - spec: - # - ClusterIP - # - NodePort - type: ClusterIP - ports: - - name: pulsar - port: 6650 - targetPort: 6650 - - name: http - port: 80 - targetPort: 8080 - - name: broker-bootstrap - serviceName: broker-bootstrap - componentSelector: pulsar-broker - spec: - # - ClusterIP - # - NodePort - type: ClusterIP - ports: - - name: pulsar - port: 6650 - targetPort: 6650 - - name: http - port: 80 - targetPort: 8080 - - name: kafka-client - port: 9092 - targetPort: 9092 - # Specifies a list of ClusterComponentSpec objects used to define the individual components that make up a Cluster. This field allows for detailed configuration of each component within the Cluster. Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a cluster. ClusterComponentSpec defines the specifications for a Component in a Cluster. - componentSpecs: - - name: pulsar-broker - componentDefRef: pulsar-broker - serviceRefs: - - name: pulsarZookeeper - namespace: default - serviceDescriptor: pulsar-cluster-zookeeper-service - disableExporter: true - serviceAccountName: kb-pulsar-cluster - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - volumeClaimTemplates: - - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - - name: pulsar-proxy - componentDefRef: pulsar-proxy - serviceRefs: - - name: pulsarZookeeper - namespace: default - serviceDescriptor: pulsar-cluster-zookeeper-service - disableExporter: true - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - - name: bookies - componentDefRef: bookies - serviceRefs: - - name: pulsarZookeeper - namespace: default - serviceDescriptor: pulsar-cluster-zookeeper-service - disableExporter: true - replicas: 3 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - volumeClaimTemplates: - - name: journal - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - - name: ledgers + - name: broker-bootstrap + serviceName: broker-bootstrap + componentSelector: broker spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - - name: bookies-recovery - componentDefRef: bookies-recovery - serviceRefs: - - name: pulsarZookeeper - namespace: default - serviceDescriptor: pulsar-cluster-zookeeper-service - disableExporter: true - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - - name: zookeeper - componentDefRef: zookeeper - disableExporter: true - replicas: 3 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - volumeClaimTemplates: - - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi ---- -apiVersion: apps.kubeblocks.io/v1 -kind: ServiceDescriptor -metadata: - name: pulsar-cluster-zookeeper-service - namespace: default -spec: - # Specifies the type or nature of the service. - # Should represent a well-known application cluster type, such as {mysql, redis, zookeeper}. - serviceKind: zookeeper - # Represents the version of the service reference. - serviceVersion: 3.8.5 - # Represents the endpoint of the service connection credential. - endpoint: - value: "pulsar-cluster-zookeeper" - # Represents the port of the service connection credential. - port: - value: "2181" + type: ClusterIP + ports: + - name: pulsar + port: 6650 + targetPort: 6650 + - name: http + port: 80 + targetPort: 8080 + - name: kafka-client + port: 9092 + targetPort: 9092 + componentSpecs: + - name: broker + componentDef: pulsar-broker + serviceVersion: 3.0.2 + env: + - name: KB_PULSAR_BROKER_NODEPORT + value: "false" + serviceRefs: + - name: pulsarZookeeper + namespace: default + serviceDescriptor: zookeeper-sd + replicas: 1 + resources: + limits: + cpu: + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + - name: bookies + componentDef: pulsar-bookkeeper + serviceVersion: 3.0.2 + serviceRefs: + - name: pulsarZookeeper + namespace: default + serviceDescriptor: zookeeper-sd + replicas: 4 + resources: + limits: + cpu: + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + volumeClaimTemplates: + - name: ledgers + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + - name: journal + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi \ No newline at end of file diff --git a/examples/pulsar/cluster-service-refer.yaml b/examples/pulsar/cluster-service-refer.yaml new file mode 100644 index 000000000..0796f0e8f --- /dev/null +++ b/examples/pulsar/cluster-service-refer.yaml @@ -0,0 +1,83 @@ +# Source: pulsar-cluster/templates/cluster.yaml +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: pulsar-service-ref + namespace: default +spec: + terminationPolicy: Delete + services: + - name: broker-bootstrap + serviceName: broker-bootstrap + componentSelector: broker + spec: + type: ClusterIP + ports: + - name: pulsar + port: 6650 + targetPort: 6650 + - name: http + port: 80 + targetPort: 8080 + - name: kafka-client + port: 9092 + targetPort: 9092 + componentSpecs: + - name: broker + componentDef: pulsar-broker + serviceVersion: 3.0.2 + env: + - name: KB_PULSAR_BROKER_NODEPORT + value: "false" + serviceRefs: + - name: pulsarZookeeper + namespace: default + clusterServiceSelector: + cluster: zk-cluster + service: + component: zookeeper + service: zookeeper + port: "2881" + replicas: 1 + resources: + limits: + cpu: + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + - name: bookies + componentDef: pulsar-bookkeeper + serviceVersion: 3.0.2 + serviceRefs: + - name: pulsarZookeeper + namespace: default + clusterServiceSelector: + cluster: zk-cluster + service: + component: zookeeper + service: zookeeper + port: "2881" + replicas: 4 + resources: + limits: + cpu: + memory: "512Mi" + requests: + cpu: "200m" + memory: "512Mi" + volumeClaimTemplates: + - name: ledgers + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + - name: journal + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi \ No newline at end of file diff --git a/examples/pulsar/cluster-zookeeper-separate.yaml b/examples/pulsar/cluster-zookeeper-separate.yaml deleted file mode 100644 index b0e7404ad..000000000 --- a/examples/pulsar/cluster-zookeeper-separate.yaml +++ /dev/null @@ -1,167 +0,0 @@ -apiVersion: apps.kubeblocks.io/v1 -kind: Cluster -metadata: - name: pulsar-cluster - namespace: default - annotations: - # KB_PULSAR_BROKER_NODEPORT: enable NodePort services - "kubeblocks.io/extra-env": '{"KB_PULSAR_BROKER_NODEPORT": "false"}' -spec: - # Specifies the name of the ClusterDefinition to use when creating a Cluster. - clusterDefinitionRef: pulsar - # Refers to the ClusterVersion name. Deprecated since v0.9, use ComponentVersion instead. This field is maintained for backward compatibility and its use is discouraged. Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. - clusterVersionRef: pulsar-3.0.2 - # Specifies the behavior when a Cluster is deleted. - # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. - # - `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), allowing for data preservation while stopping other operations. - # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. - # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. - terminationPolicy: Delete - services: - - name: proxy - serviceName: proxy - componentSelector: pulsar-proxy - spec: - # - ClusterIP - # - NodePort - type: ClusterIP - ports: - - name: pulsar - port: 6650 - targetPort: 6650 - - name: http - port: 80 - targetPort: 8080 - - name: broker-bootstrap - serviceName: broker-bootstrap - componentSelector: pulsar-broker - spec: - # - ClusterIP - # - NodePort - type: ClusterIP - ports: - - name: pulsar - port: 6650 - targetPort: 6650 - - name: http - port: 80 - targetPort: 8080 - - name: kafka-client - port: 9092 - targetPort: 9092 - # Specifies a list of ClusterComponentSpec objects used to define the individual components that make up a Cluster. This field allows for detailed configuration of each component within the Cluster. Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a cluster. ClusterComponentSpec defines the specifications for a Component in a Cluster. - componentSpecs: - - name: pulsar-broker - componentDefRef: pulsar-broker - serviceRefs: - - name: pulsarZookeeper - namespace: default - cluster: zookeeperp-cluster - disableExporter: true - serviceAccountName: kb-pulsar-cluster - replicas: 3 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - volumeClaimTemplates: - - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - - name: pulsar-proxy - componentDefRef: pulsar-proxy - serviceRefs: - - name: pulsarZookeeper - namespace: default - cluster: zookeeperp-cluster - disableExporter: false - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - - name: bookies - componentDefRef: bookies - serviceRefs: - - name: pulsarZookeeper - namespace: default - cluster: zookeeperp-cluster - disableExporter: false - replicas: 3 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - volumeClaimTemplates: - - name: journal - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - - name: ledgers - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - - name: bookies-recovery - componentDefRef: bookies-recovery - serviceRefs: - - name: pulsarZookeeper - namespace: default - cluster: zookeeperp-cluster - disableExporter: false - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi ---- -apiVersion: apps.kubeblocks.io/v1 -kind: Cluster -metadata: - name: zookeeperp-cluster - namespace: default -spec: - clusterDefinitionRef: pulsar-zookeeper - clusterVersionRef: pulsar-3.0.2 - terminationPolicy: WipeOut - componentSpecs: - - name: zookeeper - componentDefRef: zookeeper - serviceAccountName: kb-zookeeperp-cluster - replicas: 3 - resources: - limits: - cpu: "0.5" - memory: "0.5Gi" - requests: - cpu: "0.5" - memory: "0.5Gi" - volumeClaimTemplates: - - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi diff --git a/examples/pulsar/cluster.yaml b/examples/pulsar/cluster.yaml deleted file mode 100644 index 20acce895..000000000 --- a/examples/pulsar/cluster.yaml +++ /dev/null @@ -1,142 +0,0 @@ -apiVersion: apps.kubeblocks.io/v1 -kind: Cluster -metadata: - name: pulsar-cluster - namespace: default - annotations: - # KB_PULSAR_BROKER_NODEPORT: enable NodePort services - "kubeblocks.io/extra-env": '{"KB_PULSAR_BROKER_NODEPORT": "false"}' -spec: - # Specifies the name of the ClusterDefinition to use when creating a Cluster. - clusterDefinitionRef: pulsar - # Refers to the ClusterVersion name. Deprecated since v0.9, use ComponentVersion instead. This field is maintained for backward compatibility and its use is discouraged. Existing usage should be updated to the current preferred approach to avoid compatibility issues in future releases. - clusterVersionRef: pulsar-3.0.2 - # Specifies the behavior when a Cluster is deleted. - # - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. - # - `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), allowing for data preservation while stopping other operations. - # - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. - # - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. - terminationPolicy: Delete - services: - - name: proxy - serviceName: proxy - componentSelector: pulsar-proxy - spec: - # - ClusterIP - # - NodePort - type: ClusterIP - ports: - - name: pulsar - port: 6650 - targetPort: 6650 - - name: http - port: 80 - targetPort: 8080 - - name: broker-bootstrap - serviceName: broker-bootstrap - componentSelector: pulsar-broker - spec: - # - ClusterIP - # - NodePort - type: ClusterIP - ports: - - name: pulsar - port: 6650 - targetPort: 6650 - - name: http - port: 80 - targetPort: 8080 - - name: kafka-client - port: 9092 - targetPort: 9092 - # Specifies a list of ClusterComponentSpec objects used to define the individual components that make up a Cluster. This field allows for detailed configuration of each component within the Cluster. - # Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a cluster. - # ClusterComponentSpec defines the specifications for a Component in a Cluster. - componentSpecs: - - name: pulsar-broker - componentDefRef: pulsar-broker - disableExporter: true - serviceAccountName: kb-pulsar-cluster - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - volumeClaimTemplates: - - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - - name: pulsar-proxy - componentDefRef: pulsar-proxy - disableExporter: true - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - - name: bookies - componentDefRef: bookies - disableExporter: true - replicas: 3 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - volumeClaimTemplates: - - name: journal - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - - name: ledgers - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - - name: bookies-recovery - componentDefRef: bookies-recovery - disableExporter: true - replicas: 1 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - - name: zookeeper - componentDefRef: zookeeper - disableExporter: true - replicas: 3 - resources: - limits: - cpu: '0.5' - memory: 0.5Gi - requests: - cpu: '0.5' - memory: 0.5Gi - volumeClaimTemplates: - - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi diff --git a/examples/pulsar/configure.yaml b/examples/pulsar/configure.yaml index 1de867e22..fc947091b 100644 --- a/examples/pulsar/configure.yaml +++ b/examples/pulsar/configure.yaml @@ -1,22 +1,22 @@ -apiVersion: apps.kubeblocks.io/v1alpha1 +apiVersion: operations.kubeblocks.io/v1alpha1 kind: OpsRequest metadata: name: pulsar-reconfiguring namespace: default spec: # Specifies the name of the Cluster resource that this operation is targeting. - clusterName: pulsar-cluster + clusterName: pulsar-basic-cluster # Instructs the system to bypass pre-checks (including cluster state checks and customized pre-conditions hooks) and immediately execute the opsRequest, except for the opsRequest of 'Start' type, which will still undergo pre-checks even if `force` is true. Note: Once set, the `force` field is immutable and cannot be updated. force: false # Specifies a component and its configuration updates. This field is deprecated and replaced by `reconfigures`. - reconfigure: + reconfigures: # Specifies the name of the Component. - # - pulsar-broker - # - pulsar-proxy - # - bookies + # - proxy # - bookies-recovery + # - broker + # - bookies # - zookeeper - componentName: bookies + - componentName: bookies # Contains a list of ConfigurationItem objects, specifying the Component's configuration template name, upgrade policy, and parameter key-value pairs to be updated. configurations: # Sets the parameters to be updated. It should contain at least one item. diff --git a/examples/pulsar/restart.yaml b/examples/pulsar/restart.yaml index 9a721aa4a..c69de4d69 100644 --- a/examples/pulsar/restart.yaml +++ b/examples/pulsar/restart.yaml @@ -1,18 +1,18 @@ -apiVersion: apps.kubeblocks.io/v1alpha1 +apiVersion: operations.kubeblocks.io/v1alpha1 kind: OpsRequest metadata: name: pulsar-restart namespace: default spec: # Specifies the name of the Cluster resource that this operation is targeting. - clusterName: pulsar-cluster + clusterName: pulsar-basic-cluster type: Restart # Lists Components to be restarted. ComponentOps specifies the Component to be operated on. restart: # Specifies the name of the Component. - # - pulsar-broker - # - pulsar-proxy - # - bookies + # - proxy # - bookies-recovery + # - broker + # - bookies # - zookeeper - - componentName: bookies + - componentName: broker diff --git a/examples/pulsar/horizontalscale.yaml b/examples/pulsar/scale-in.yaml similarity index 58% rename from examples/pulsar/horizontalscale.yaml rename to examples/pulsar/scale-in.yaml index b3633c0c5..5f75f302a 100644 --- a/examples/pulsar/horizontalscale.yaml +++ b/examples/pulsar/scale-in.yaml @@ -1,20 +1,23 @@ -apiVersion: apps.kubeblocks.io/v1alpha1 +apiVersion: operations.kubeblocks.io/v1alpha1 kind: OpsRequest metadata: - name: pulsar-horizontalscaling-out + name: pulsar-broker-scale-in namespace: default spec: # Specifies the name of the Cluster resource that this operation is targeting. - clusterName: pulsar-cluster + clusterName: pulsar-basic-cluster type: HorizontalScaling # Lists HorizontalScaling objects, each specifying scaling requirements for a Component, including desired total replica counts, configurations for new instances, modifications for existing instances, and instance downscaling options horizontalScaling: # Specifies the name of the Component. - # - pulsar-broker - # - pulsar-proxy - # - bookies + # - proxy # - bookies-recovery + # - broker + # - bookies # - zookeeper - - componentName: pulsar-proxy - # Specifies the number of total replicas. - replicas: 2 + - componentName: broker + # Specifies the replica changes for scaling in components + scaleIn: + # Specifies the replica changes for the component. + # add one more replica to current component + replicaChanges: 1 diff --git a/examples/pulsar/scale-out.yaml b/examples/pulsar/scale-out.yaml new file mode 100644 index 000000000..d1f6cac44 --- /dev/null +++ b/examples/pulsar/scale-out.yaml @@ -0,0 +1,23 @@ +apiVersion: operations.kubeblocks.io/v1alpha1 +kind: OpsRequest +metadata: + name: pulsar-broker-scale-out + namespace: default +spec: + # Specifies the name of the Cluster resource that this operation is targeting. + clusterName: pulsar-basic-cluster + type: HorizontalScaling + # Lists HorizontalScaling objects, each specifying scaling requirements for a Component, including desired total replica counts, configurations for new instances, modifications for existing instances, and instance downscaling options + horizontalScaling: + # Specifies the name of the Component. + # - proxy + # - bookies-recovery + # - broker + # - bookies + # - zookeeper + - componentName: broker + # Specifies the replica changes for scaling in components + scaleOut: + # Specifies the replica changes for the component. + # add one more replica to current component + replicaChanges: 1 diff --git a/examples/pulsar/start.yaml b/examples/pulsar/start.yaml index d36d2180d..645a69272 100644 --- a/examples/pulsar/start.yaml +++ b/examples/pulsar/start.yaml @@ -1,9 +1,9 @@ -apiVersion: apps.kubeblocks.io/v1alpha1 +apiVersion: operations.kubeblocks.io/v1alpha1 kind: OpsRequest metadata: name: pulsar-start namespace: default spec: # Specifies the name of the Cluster resource that this operation is targeting. - clusterName: pulsar-cluster - type: Start + clusterName: pulsar-basic-cluster + type: Start \ No newline at end of file diff --git a/examples/pulsar/stop.yaml b/examples/pulsar/stop.yaml index 938e7f99c..8d3520b95 100644 --- a/examples/pulsar/stop.yaml +++ b/examples/pulsar/stop.yaml @@ -1,9 +1,18 @@ -apiVersion: apps.kubeblocks.io/v1alpha1 +apiVersion: operations.kubeblocks.io/v1alpha1 kind: OpsRequest metadata: name: pulsar-stop namespace: default spec: # Specifies the name of the Cluster resource that this operation is targeting. - clusterName: pulsar-cluster + clusterName: pulsar-basic-cluster type: Stop + # Lists Components to be stopped. ComponentOps specifies the Component to be operated on. + # stop: + # Specifies the name of the Component. + # - proxy + # - bookies-recovery + # - broker + # - bookies + # - zookeeper + # - componentName: broker diff --git a/examples/pulsar/verticalscale.yaml b/examples/pulsar/verticalscale.yaml index 5df4220ae..4e44dac93 100644 --- a/examples/pulsar/verticalscale.yaml +++ b/examples/pulsar/verticalscale.yaml @@ -1,20 +1,20 @@ -apiVersion: apps.kubeblocks.io/v1alpha1 +apiVersion: operations.kubeblocks.io/v1alpha1 kind: OpsRequest metadata: name: pulsar-verticalscaling namespace: default spec: # Specifies the name of the Cluster resource that this operation is targeting. - clusterName: pulsar-cluster + clusterName: pulsar-basic-cluster type: VerticalScaling - # Lists VerticalScaling objects, each specifying a component and its desired compute resources for vertical scaling. + # Lists VerticalScaling objects, each specifying a component and its desired compute resources for vertical scaling. verticalScaling: - # - pulsar-broker - # - pulsar-proxy - # - bookies + # - proxy # - bookies-recovery + # - broker + # - bookies # - zookeeper - - componentName: bookies + - componentName: broker # VerticalScaling refers to the process of adjusting the compute resources (e.g., CPU, memory) allocated to a Component. It defines the parameters required for the operation. requests: cpu: '1' diff --git a/examples/pulsar/zookeeper-cluster.yaml b/examples/pulsar/zookeeper-cluster.yaml new file mode 100644 index 000000000..de5e77cdd --- /dev/null +++ b/examples/pulsar/zookeeper-cluster.yaml @@ -0,0 +1,37 @@ +apiVersion: apps.kubeblocks.io/v1 +kind: Cluster +metadata: + name: zk-cluster + namespace: default +spec: + terminationPolicy: WipeOut + services: + - componentSelector: zookeeper + name: myzk + serviceName: myzk + spec: + ports: + - name: client + port: 2181 + protocol: TCP + targetPort: 2181 + type: ClusterIP + componentSpecs: + - name: zookeeper + componentDef: pulsar-zookeeper-3 + replicas: 1 + resources: + limits: + cpu: "0.5" + memory: "0.5Gi" + requests: + cpu: "0.5" + memory: "0.5Gi" + volumeClaimTemplates: + - name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi diff --git a/examples/pulsar/zookeeper-service-descriptor.yaml b/examples/pulsar/zookeeper-service-descriptor.yaml new file mode 100644 index 000000000..6f8382997 --- /dev/null +++ b/examples/pulsar/zookeeper-service-descriptor.yaml @@ -0,0 +1,17 @@ +apiVersion: apps.kubeblocks.io/v1 +kind: ServiceDescriptor +metadata: + name: zookeeper-sd + namespace: default +spec: + # Specifies the type or nature of the service. + # Should represent a well-known application cluster type, such as {mysql, redis, zookeeper}. + serviceKind: zookeeper + # Represents the version of the service reference. + serviceVersion: 3.8.5 + # Represents the endpoint of the service connection credential. + endpoint: + value: "zk-cluster-zookeeper-zookeeper" + # Represents the port of the service connection credential. + port: + value: "2181" \ No newline at end of file