diff --git a/docs/examples/elasticsearch/restart.yaml b/docs/examples/elasticsearch/restart.yaml new file mode 100644 index 0000000000..154d6b2340 --- /dev/null +++ b/docs/examples/elasticsearch/restart.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: restart + namespace: demo +spec: + type: Restart + databaseRef: + name: es-quickstart + timeout: 3m + apply: Always \ No newline at end of file diff --git a/docs/examples/elasticsearch/scalling/horizontal/Elasticsearch-hscale-down-Topology.yaml b/docs/examples/elasticsearch/scalling/horizontal/Elasticsearch-hscale-down-Topology.yaml new file mode 100644 index 0000000000..62dd2a1135 --- /dev/null +++ b/docs/examples/elasticsearch/scalling/horizontal/Elasticsearch-hscale-down-Topology.yaml @@ -0,0 +1,14 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: esops-hscale-down-topology + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: es-hscale-topology + horizontalScaling: + topology: + master: 2 + ingest: 2 + data: 2 \ No newline at end of file diff --git a/docs/examples/elasticsearch/scalling/horizontal/Elasticsearch-hscale-down-combined.yaml b/docs/examples/elasticsearch/scalling/horizontal/Elasticsearch-hscale-down-combined.yaml new file mode 100644 index 0000000000..b977a3e58b --- /dev/null +++ b/docs/examples/elasticsearch/scalling/horizontal/Elasticsearch-hscale-down-combined.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: esops-hscale-down-combined + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: es + horizontalScaling: + node: 2 \ No newline at end of file diff --git a/docs/examples/elasticsearch/scalling/horizontal/Elasticsearch-hscale-up-Topology.yaml b/docs/examples/elasticsearch/scalling/horizontal/Elasticsearch-hscale-up-Topology.yaml new file mode 100644 index 0000000000..fae53ea350 --- /dev/null +++ b/docs/examples/elasticsearch/scalling/horizontal/Elasticsearch-hscale-up-Topology.yaml @@ -0,0 +1,14 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: esops-hscale-up-topology + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: es-hscale-topology + horizontalScaling: + topology: + master: 3 + ingest: 3 + data: 3 \ No newline at end of file diff --git a/docs/examples/elasticsearch/scalling/horizontal/Elasticsearch-hscale-up-combined.yaml b/docs/examples/elasticsearch/scalling/horizontal/Elasticsearch-hscale-up-combined.yaml new file mode 100644 index 0000000000..66b5d466df --- /dev/null +++ b/docs/examples/elasticsearch/scalling/horizontal/Elasticsearch-hscale-up-combined.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: es-combined + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: es + horizontalScaling: + node: 3 \ No newline at end of file diff --git a/docs/examples/elasticsearch/scalling/horizontal/topology.yaml b/docs/examples/elasticsearch/scalling/horizontal/topology.yaml new file mode 100644 index 0000000000..a11ee86494 --- /dev/null +++ b/docs/examples/elasticsearch/scalling/horizontal/topology.yaml @@ -0,0 +1,37 @@ +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-cluster + namespace: demo +spec: + enableSSL: true + version: xpack-8.11.1 + storageType: Durable + topology: + master: + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + ingest: + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/docs/examples/elasticsearch/scalling/vertical/Elasticsearch-vertical-scaling-combined.yaml b/docs/examples/elasticsearch/scalling/vertical/Elasticsearch-vertical-scaling-combined.yaml new file mode 100644 index 0000000000..24cf3201bd --- /dev/null +++ b/docs/examples/elasticsearch/scalling/vertical/Elasticsearch-vertical-scaling-combined.yaml @@ -0,0 +1,28 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: vscale-topology + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: es-cluster + verticalScaling: + master: + resources: + limits: + cpu: 750m + memory: 800Mi + data: + resources: + requests: + cpu: 760m + memory: 900Mi + ingest: + resources: + limits: + cpu: 900m + memory: 1.2Gi + requests: + cpu: 800m + memory: 1Gi \ No newline at end of file diff --git a/docs/examples/elasticsearch/scalling/vertical/Elasticsearch-vertical-scaling-topology.yaml b/docs/examples/elasticsearch/scalling/vertical/Elasticsearch-vertical-scaling-topology.yaml new file mode 100644 index 0000000000..7b0132522f --- /dev/null +++ b/docs/examples/elasticsearch/scalling/vertical/Elasticsearch-vertical-scaling-topology.yaml @@ -0,0 +1,18 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: vscale-combined + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: es-combined + verticalScaling: + node: + resources: + limits: + cpu: 1500m + memory: 2Gi + requests: + cpu: 600m + memory: 2Gi diff --git a/docs/examples/elasticsearch/update-version/elasticsearch.yaml b/docs/examples/elasticsearch/update-version/elasticsearch.yaml new file mode 100644 index 0000000000..5308b788bc --- /dev/null +++ b/docs/examples/elasticsearch/update-version/elasticsearch.yaml @@ -0,0 +1,19 @@ +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-demo + namespace: demo +spec: + deletionPolicy: Delete + enableSSL: true + replicas: 3 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + version: xpack-9.1.3 + #ghcr.io/kubedb/kubedb-provisioner:v0.59.0 diff --git a/docs/examples/elasticsearch/update-version/update-version.yaml b/docs/examples/elasticsearch/update-version/update-version.yaml new file mode 100644 index 0000000000..29230f23d7 --- /dev/null +++ b/docs/examples/elasticsearch/update-version/update-version.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: es-demo-update + namespace: demo +spec: + type: UpdateVersion + databaseRef: + name: es-demo + updateVersion: + targetVersion: xpack-9.1.4 \ No newline at end of file diff --git a/docs/examples/elasticsearch/volume-expantion/elasticsearch-volume-expansion-combined.yaml b/docs/examples/elasticsearch/volume-expantion/elasticsearch-volume-expansion-combined.yaml new file mode 100644 index 0000000000..646a9378e8 --- /dev/null +++ b/docs/examples/elasticsearch/volume-expantion/elasticsearch-volume-expansion-combined.yaml @@ -0,0 +1,12 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: es-volume-expansion-combined + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: es-combined + volumeExpansion: + mode: "Online" + node: 4Gi \ No newline at end of file diff --git a/docs/examples/elasticsearch/volume-expantion/elasticsearch-volume-expansion-topology.yaml b/docs/examples/elasticsearch/volume-expantion/elasticsearch-volume-expansion-topology.yaml new file mode 100644 index 0000000000..191463551a --- /dev/null +++ b/docs/examples/elasticsearch/volume-expantion/elasticsearch-volume-expansion-topology.yaml @@ -0,0 +1,14 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: volume-expansion-topology + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: es-cluster + volumeExpansion: + mode: "Online" + master: 5Gi + data: 5Gi + ingest: 4Gi \ No newline at end of file diff --git a/docs/examples/elasticsearch/volume-expantion/volume-expansion-topo-data.yaml b/docs/examples/elasticsearch/volume-expantion/volume-expansion-topo-data.yaml new file mode 100644 index 0000000000..de94dfe303 --- /dev/null +++ b/docs/examples/elasticsearch/volume-expantion/volume-expansion-topo-data.yaml @@ -0,0 +1,12 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: volume-expansion-data-nodes + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: es-cluster + volumeExpansion: + mode: "Online" + data: 5Gi \ No newline at end of file diff --git a/docs/guides/elasticsearch/autoscaler/_index.md b/docs/guides/elasticsearch/autoscaler/_index.md index 0e47d380fe..72148b6188 100644 --- a/docs/guides/elasticsearch/autoscaler/_index.md +++ b/docs/guides/elasticsearch/autoscaler/_index.md @@ -5,6 +5,6 @@ menu: identifier: es-auto-scaling name: Autoscaling parent: es-elasticsearch-guides - weight: 44 + weight: 145 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/elasticsearch/backup/_index.md b/docs/guides/elasticsearch/backup/_index.md index 5e46daaaff..c4815c3707 100644 --- a/docs/guides/elasticsearch/backup/_index.md +++ b/docs/guides/elasticsearch/backup/_index.md @@ -5,6 +5,6 @@ menu: identifier: guides-es-backup name: Backup & Restore parent: es-elasticsearch-guides - weight: 40 + weight: 85 menu_name: docs_{{ .version }} --- \ No newline at end of file diff --git a/docs/guides/elasticsearch/cli/cli.md b/docs/guides/elasticsearch/cli/cli.md index 540a6b911d..6bc76310c9 100644 --- a/docs/guides/elasticsearch/cli/cli.md +++ b/docs/guides/elasticsearch/cli/cli.md @@ -5,7 +5,7 @@ menu: identifier: es-cli-cli name: Quickstart parent: es-cli-elasticsearch - weight: 100 + weight: 155 menu_name: docs_{{ .version }} section_menu_id: guides --- diff --git a/docs/guides/elasticsearch/clustering/_index.md b/docs/guides/elasticsearch/clustering/_index.md index 8e6d3df87d..b5326e9baa 100755 --- a/docs/guides/elasticsearch/clustering/_index.md +++ b/docs/guides/elasticsearch/clustering/_index.md @@ -5,6 +5,6 @@ menu: identifier: es-clustering-elasticsearch name: Clustering parent: es-elasticsearch-guides - weight: 25 + weight: 35 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/elasticsearch/concepts/_index.md b/docs/guides/elasticsearch/concepts/_index.md index ee9c9f11d8..c3766b5a30 100755 --- a/docs/guides/elasticsearch/concepts/_index.md +++ b/docs/guides/elasticsearch/concepts/_index.md @@ -5,6 +5,6 @@ menu: identifier: es-concepts-elasticsearch name: Concepts parent: es-elasticsearch-guides - weight: 20 + weight: 25 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/elasticsearch/configuration/_index.md b/docs/guides/elasticsearch/configuration/_index.md index b66e9c446c..cc4320e12e 100755 --- a/docs/guides/elasticsearch/configuration/_index.md +++ b/docs/guides/elasticsearch/configuration/_index.md @@ -5,6 +5,6 @@ menu: identifier: es-configuration name: Custom Configuration parent: es-elasticsearch-guides - weight: 30 + weight: 45 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/elasticsearch/custom-rbac/_index.md b/docs/guides/elasticsearch/custom-rbac/_index.md index 0a63f6c384..6a27ce6316 100755 --- a/docs/guides/elasticsearch/custom-rbac/_index.md +++ b/docs/guides/elasticsearch/custom-rbac/_index.md @@ -5,6 +5,6 @@ menu: identifier: es-custom-rbac name: Custom RBAC parent: es-elasticsearch-guides - weight: 31 + weight: 55 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/elasticsearch/elasticsearch-dashboard/_index.md b/docs/guides/elasticsearch/elasticsearch-dashboard/_index.md index defd392ffe..4941abfbc2 100644 --- a/docs/guides/elasticsearch/elasticsearch-dashboard/_index.md +++ b/docs/guides/elasticsearch/elasticsearch-dashboard/_index.md @@ -5,6 +5,6 @@ menu: identifier: es-dashboard name: Elasticsearch Dashboard parent: es-elasticsearch-guides - weight: 32 + weight: 65 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/elasticsearch/monitoring/_index.md b/docs/guides/elasticsearch/monitoring/_index.md index c4206a8016..5f11444f55 100755 --- a/docs/guides/elasticsearch/monitoring/_index.md +++ b/docs/guides/elasticsearch/monitoring/_index.md @@ -5,6 +5,6 @@ menu: identifier: es-monitoring-elasticsearch name: Monitoring parent: es-elasticsearch-guides - weight: 50 + weight: 135 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/elasticsearch/plugins-backup/_index.md b/docs/guides/elasticsearch/plugins-backup/_index.md index 2b97dfb7cd..1dc860c7cb 100644 --- a/docs/guides/elasticsearch/plugins-backup/_index.md +++ b/docs/guides/elasticsearch/plugins-backup/_index.md @@ -5,6 +5,6 @@ menu: identifier: guides-es-plugins-backup name: Snapshot & Restore (Repository Plugins) parent: es-elasticsearch-guides - weight: 41 + weight: 155 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/elasticsearch/plugins/_index.md b/docs/guides/elasticsearch/plugins/_index.md index efa9f0d86b..daf12a425f 100755 --- a/docs/guides/elasticsearch/plugins/_index.md +++ b/docs/guides/elasticsearch/plugins/_index.md @@ -5,6 +5,6 @@ menu: identifier: es-plugin-elasticsearch name: Extensions & Plugins parent: es-elasticsearch-guides - weight: 60 + weight: 165 menu_name: docs_{{ .version }} --- \ No newline at end of file diff --git a/docs/guides/elasticsearch/private-registry/_index.md b/docs/guides/elasticsearch/private-registry/_index.md index d072bcd97a..b6431d51c6 100755 --- a/docs/guides/elasticsearch/private-registry/_index.md +++ b/docs/guides/elasticsearch/private-registry/_index.md @@ -5,6 +5,6 @@ menu: identifier: es-private-registry-elasticsearch name: Private Registry parent: es-elasticsearch-guides - weight: 35 + weight: 75 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/elasticsearch/reconfigure_tls/_index.md b/docs/guides/elasticsearch/reconfigure_tls/_index.md new file mode 100644 index 0000000000..e7111e587a --- /dev/null +++ b/docs/guides/elasticsearch/reconfigure_tls/_index.md @@ -0,0 +1,11 @@ +--- +title: Elasticsearch Reconfigure TLS/SSL +menu: + docs_{{ .version }}: + identifier: es-reconfigure-tls-elasticsearch + name: Reconfigure TLS/SSL + parent: es-elasticsearch-guides + weight: 110 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- diff --git a/docs/guides/elasticsearch/reconfigure_tls/elasticsearch.md b/docs/guides/elasticsearch/reconfigure_tls/elasticsearch.md new file mode 100644 index 0000000000..70df9f5c9b --- /dev/null +++ b/docs/guides/elasticsearch/reconfigure_tls/elasticsearch.md @@ -0,0 +1,1014 @@ +--- +title: Reconfigure Elasticsearch TLS/SSL Encryption +menu: + docs_{{ .version }}: + identifier: es-reconfigure-tls + name: Reconfigure Elasticsearch TLS/SSL Encryption + parent: es-reconfigure-tls-elasticsearch + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure Elasticsearch TLS/SSL (Transport Encryption) + +KubeDB supports reconfigure i.e. add, remove, update and rotation of TLS/SSL certificates for existing Elasticsearch database via a ElasticsearchOpsRequest. This tutorial will show you how to use KubeDB to reconfigure TLS/SSL encryption. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install [`cert-manger`](https://cert-manager.io/docs/installation/) v1.0.0 or later to your cluster to manage your SSL/TLS certificates. + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/Elasticsearch](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/Elasticsearch) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Add TLS to a Elasticsearch database + +Here, We are going to create a Elasticsearch without TLS and then reconfigure the database to use TLS. + +### Deploy Elasticsearch without TLS + +In this section, we are going to deploy a Elasticsearch topology cluster without TLS. In the next few sections we will reconfigure TLS using `ElasticsearchOpsRequest` CRD. Below is the YAML of the `Elasticsearch` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-demo + namespace: demo +spec: + deletionPolicy: WipeOut + enableSSL: true + replicas: 3 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: local-path + storageType: Durable + version: xpack-8.11.1 + + +``` + +Let's create the `Elasticsearch` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/Elasticsearch/reconfigure-tls/Elasticsearch.yaml +Elasticsearch.kubedb.com/es-demo created +``` + +Now, wait until `es-demo` has status `Ready`. i.e, + +```bash +$ kubectl get es -n demo -w +NAME VERSION STATUS AGE +es-demo xpack-8.11.1 Ready 26h + +``` + +Now, we can exec one hazelcast pod and verify configuration that the TLS is disabled. +```bash +$ kubectl exec -n demo es-demo-0 -- \ + cat /usr/share/elasticsearch/config/elasticsearch.yml | grep -A 2 -i xpack.security + +Defaulted container "elasticsearch" out of: elasticsearch, init-sysctl (init), config-merger (init) +xpack.security.enabled: true + +xpack.security.transport.ssl.enabled: true +xpack.security.transport.ssl.verification_mode: certificate +xpack.security.transport.ssl.key: certs/transport/tls.key +xpack.security.transport.ssl.certificate: certs/transport/tls.crt +xpack.security.transport.ssl.certificate_authorities: [ "certs/transport/ca.crt" ] + +xpack.security.http.ssl.enabled: false + +``` +Here, transport TLS is enabled but HTTP TLS is disabled. So, internal node to node communication is encrypted but communication from client to node is not encrypted. + +### Create Issuer/ ClusterIssuer + +Now, We are going to create an example `Issuer` that will be used to enable SSL/TLS in Elasticsearch. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. + +- Start off by generating a ca certificates using openssl. + +```bash +$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=ca/O=kubedb" +Generating a RSA private key +................+++++ +........................+++++ +writing new private key to './ca.key' +----- +``` + +- Now we are going to create a ca-secret using the certificate files that we have just generated. + +```bash +$ kubectl create secret tls es-ca \ + --cert=ca.crt \ + --key=ca.key \ + --namespace=demo +secret/es-ca created +``` + +Now, Let's create an `Issuer` using the `Elasticsearch-ca` secret that we have just created. The `YAML` file looks like this: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: es-issuer + namespace: demo +spec: + ca: + secretName: es-ca +``` + +Let's apply the `YAML` file: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/Elasticsearch/reconfigure-tls/Elasticsearch-issuer.yaml +issuer.cert-manager.io/es-issuer created +``` + +### Create ElasticsearchOpsRequest + +In order to add TLS to the Elasticsearch, we have to create a `ElasticsearchOpsRequest` CRO with our created issuer. Below is the YAML of the `ElasticsearchOpsRequest` CRO that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: add-tls + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: es-demo + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: es-issuer + certificates: + - alias: http + subject: + organizations: + - kubedb.com + emailAddresses: + - abc@kubedb.com +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing reconfigure TLS operation on `es-demo` cluster. +- `spec.type` specifies that we are performing `ReconfigureTLS` on Elasticsearch. +- `spec.tls.issuerRef` specifies the issuer name, kind and api group. +- `spec.tls.certificates` specifies the certificates. You can learn more about this field from [here](/docs/guides/elasticsearch/concepts/Elasticsearch.md#spectls). + +Let's create the `ElasticsearchOpsRequest` CR we have shown above, + +> **Note:** For combined Elasticsearch, you just need to refer Elasticsearch combined object in `databaseRef` field. To learn more about combined Elasticsearch, please visit [here](/docs/guides/elasticsearch/clustering/combined-cluster/index.md). + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/Elasticsearch/reconfigure-tls/Elasticsearch-add-tls.yaml +Elasticsearchopsrequest.ops.kubedb.com/add-tls created +``` + +#### Verify TLS Enabled Successfully + +Let's wait for `ElasticsearchOpsRequest` to be `Successful`. Run the following command to watch `ElasticsearchOpsRequest` CRO, + +```bash +$ kubectl get Elasticsearchopsrequest -n demo +NAME TYPE STATUS AGE +add-tls ReconfigureTLS Successful 73m +``` + +We can see from the above output that the `ElasticsearchOpsRequest` has succeeded. If we describe the `ElasticsearchOpsRequest` we will get an overview of the steps that were followed. + +```bash +$ kubectl describe Elasticsearchopsrequest -n demo add-tls +Name: add-tls +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ElasticsearchOpsRequest +Metadata: + Creation Timestamp: 2025-11-28T05:16:12Z + Generation: 1 + Resource Version: 884868 + UID: 2fa3b86a-4cfa-4e51-8cde-c5d7508c3eb0 +Spec: + Apply: IfReady + Database Ref: + Name: es-demo + Tls: + Certificates: + Alias: http + Email Addresses: + abc@kubedb.com + Subject: + Organizations: + kubedb.com + Issuer Ref: + API Group: cert-manager.io + Kind: Issuer + Name: es-issuer + Type: ReconfigureTLS +Status: + Conditions: + Last Transition Time: 2025-11-28T05:16:12Z + Message: Elasticsearch ops request is reconfiguring TLS + Observed Generation: 1 + Reason: ReconfigureTLS + Status: True + Type: ReconfigureTLS + Last Transition Time: 2025-11-28T05:16:20Z + Message: get certificate; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetCertificate + Last Transition Time: 2025-11-28T05:16:20Z + Message: ready condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: ReadyCondition + Last Transition Time: 2025-11-28T05:16:20Z + Message: issue condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IssueCondition + Last Transition Time: 2025-11-28T05:16:20Z + Message: Successfully synced all certificates + Observed Generation: 1 + Reason: CertificateSynced + Status: True + Type: CertificateSynced + Last Transition Time: 2025-11-28T05:16:32Z + Message: pod exists; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: PodExists--es-demo-0 + Last Transition Time: 2025-11-28T05:16:32Z + Message: create es client; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-demo-0 + Last Transition Time: 2025-11-28T05:16:32Z + Message: evict pod; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: EvictPod--es-demo-0 + Last Transition Time: 2025-11-28T05:17:42Z + Message: create es client; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreateEsClient + Last Transition Time: 2025-11-28T05:16:57Z + Message: pod exists; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: PodExists--es-demo-1 + Last Transition Time: 2025-11-28T05:16:57Z + Message: create es client; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-demo-1 + Last Transition Time: 2025-11-28T05:16:57Z + Message: evict pod; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: EvictPod--es-demo-1 + Last Transition Time: 2025-11-28T05:17:22Z + Message: pod exists; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: PodExists--es-demo-2 + Last Transition Time: 2025-11-28T05:17:22Z + Message: create es client; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-demo-2 + Last Transition Time: 2025-11-28T05:17:22Z + Message: evict pod; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: EvictPod--es-demo-2 + Last Transition Time: 2025-11-28T05:17:47Z + Message: Successfully restarted all the nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2025-11-28T05:17:51Z + Message: Successfully reconfigured TLS + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: +``` + +Now, Let's exec into a Elasticsearch broker pod and verify the configuration that the TLS is enabled. + +```bash +$ kubectl exec -n demo es-demo-0 -- \ + cat /usr/share/elasticsearch/config/elasticsearch.yml | grep -A 2 -i xpack.security + +Defaulted container "elasticsearch" out of: elasticsearch, init-sysctl (init), config-merger (init) +xpack.security.enabled: true + +xpack.security.transport.ssl.enabled: true +xpack.security.transport.ssl.verification_mode: certificate +xpack.security.transport.ssl.key: certs/transport/tls.key +xpack.security.transport.ssl.certificate: certs/transport/tls.crt +xpack.security.transport.ssl.certificate_authorities: [ "certs/transport/ca.crt" ] + +xpack.security.http.ssl.enabled: true +xpack.security.http.ssl.key: certs/http/tls.key +xpack.security.http.ssl.certificate: certs/http/tls.crt +xpack.security.http.ssl.certificate_authorities: [ "certs/http/ca.crt" ] + +``` + +We can see from the above output that, `xpack.security.http.ssl.enabled: true` which means TLS is enabled for HTTP communication. + +## Rotate Certificate + +Now we are going to rotate the certificate of this cluster. First let's check the current expiration date of the certificate. + +```bash +$ kubectl exec -n demo es-demo-0 -- /bin/sh -c '\ + openssl s_client -connect localhost:9200 -showcerts < /dev/null 2>/dev/null | \ + sed -ne "/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p" > /tmp/server.crt && \ + openssl x509 -in /tmp/server.crt -noout -enddate' +Defaulted container "elasticsearch" out of: elasticsearch, init-sysctl (init), config-merger (init) +notAfter=Feb 26 05:16:15 2026 GMT + +``` + +So, the certificate will expire on this time `Feb 26 05:16:17 2026 GMT`. + +### Create ElasticsearchOpsRequest + +Now we are going to increase it using a ElasticsearchOpsRequest. Below is the yaml of the ops request that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: esops-rotate + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: es-demo + tls: + rotateCertificates: true +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing reconfigure TLS operation on `es-demo`. +- `spec.type` specifies that we are performing `ReconfigureTLS` on our cluster. +- `spec.tls.rotateCertificates` specifies that we want to rotate the certificate of this Elasticsearch cluster. + +Let's create the `ElasticsearchOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/Elasticsearch/reconfigure-tls/esops-rotate.yaml +Elasticsearchopsrequest.ops.kubedb.com/esops-rotate created +``` + +#### Verify Certificate Rotated Successfully + +Let's wait for `ElasticsearchOpsRequest` to be `Successful`. Run the following command to watch `ElasticsearchOpsRequest` CRO, + +```bash +$ kubectl get Elasticsearchopsrequest -n demo esops-rotate +NAME TYPE STATUS AGE +esops-rotate ReconfigureTLS Successful 85m + +``` + +We can see from the above output that the `ElasticsearchOpsRequest` has succeeded. If we describe the `ElasticsearchOpsRequest` we will get an overview of the steps that were followed. + +```bash +$ kubectl describe Elasticsearchopsrequest -n demo esops-rotate +Name: esops-rotate +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ElasticsearchOpsRequest +Metadata: + Creation Timestamp: 2025-11-28T07:02:38Z + Generation: 1 + Resource Version: 893511 + UID: 43503dc9-ddeb-4569-a8a9-b10a96feeb60 +Spec: + Apply: IfReady + Database Ref: + Name: es-demo + Tls: + Rotate Certificates: true + Type: ReconfigureTLS +Status: + Conditions: + Last Transition Time: 2025-11-28T07:02:38Z + Message: Elasticsearch ops request is reconfiguring TLS + Observed Generation: 1 + Reason: ReconfigureTLS + Status: True + Type: ReconfigureTLS + Last Transition Time: 2025-11-28T07:02:41Z + Message: successfully add issuing condition to all the certificates + Observed Generation: 1 + Reason: IssueCertificatesSucceeded + Status: True + Type: IssueCertificatesSucceeded + Last Transition Time: 2025-11-28T07:02:46Z + Message: get certificate; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetCertificate + Last Transition Time: 2025-11-28T07:02:46Z + Message: ready condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: ReadyCondition + Last Transition Time: 2025-11-28T07:02:47Z + Message: issue condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IssueCondition + Last Transition Time: 2025-11-28T07:02:47Z + Message: Successfully synced all certificates + Observed Generation: 1 + Reason: CertificateSynced + Status: True + Type: CertificateSynced + Last Transition Time: 2025-11-28T07:02:56Z + Message: pod exists; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: PodExists--es-demo-0 + Last Transition Time: 2025-11-28T07:02:56Z + Message: create es client; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-demo-0 + Last Transition Time: 2025-11-28T07:02:56Z + Message: evict pod; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: EvictPod--es-demo-0 + Last Transition Time: 2025-11-28T07:04:06Z + Message: create es client; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreateEsClient + Last Transition Time: 2025-11-28T07:03:21Z + Message: pod exists; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: PodExists--es-demo-1 + Last Transition Time: 2025-11-28T07:03:21Z + Message: create es client; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-demo-1 + Last Transition Time: 2025-11-28T07:03:21Z + Message: evict pod; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: EvictPod--es-demo-1 + Last Transition Time: 2025-11-28T07:03:46Z + Message: pod exists; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: PodExists--es-demo-2 + Last Transition Time: 2025-11-28T07:03:46Z + Message: create es client; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-demo-2 + Last Transition Time: 2025-11-28T07:03:46Z + Message: evict pod; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: EvictPod--es-demo-2 + Last Transition Time: 2025-11-28T07:04:11Z + Message: Successfully restarted all the nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2025-11-28T07:04:15Z + Message: Successfully reconfigured TLS + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: +``` + + + +As we can see from the above output, the certificate has been rotated successfully. + +## Change Issuer/ClusterIssuer + +Now, we are going to change the issuer of this database. + +- Let's create a new ca certificate and key using a different subject `CN=ca-update,O=kubedb-updated`. + +```bash +$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=ca-updated/O=kubedb-updated" +.+........+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*........+.....+......+...+.+..............+....+..+.+...+......+.....+.........+............+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*.......+........+.......+...+......+.....+..........+..+.........+......+....+...+..+....+..+.......+............+...+..+...+.+............+..+................+.....+................+.....+.+........+.+.....+.........................+........+......+....+...........+.+....................+.+..+......+......+...+...+...+......+.+...+.........+.....+.......+...+..+.............+.....+.+..............+......+.+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +..+........+...+...............+...+....+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*...+...+...+...................+.....+.+......+.....+.........+....+...+.....+...+.......+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*....+...+..+............+....+..+...+..........+.........+......+.........+...........+....+..+.+..+.......+.....+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +``` + +- Now we are going to create a new ca-secret using the certificate files that we have just generated. + +```bash +$ kubectl create secret tls es-new-ca \ + --cert=ca.crt \ + --key=ca.key \ + --namespace=demo +secret/es-new-ca created + +``` + +Now, Let's create a new `Issuer` using the `mongo-new-ca` secret that we have just created. The `YAML` file looks like this: + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: es-new-issuer + namespace: demo +spec: + ca: + secretName: es-new-ca +``` + +Let's apply the `YAML` file: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/Elasticsearch/reconfigure-tls/Elasticsearch-new-issuer.yaml +issuer.cert-manager.io/es-new-issuer created +``` + +### Create ElasticsearchOpsRequest + +In order to use the new issuer to issue new certificates, we have to create a `ElasticsearchOpsRequest` CRO with the newly created issuer. Below is the YAML of the `ElasticsearchOpsRequest` CRO that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: esops-update-issuer + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: es-demo + tls: + issuerRef: + name: es-new-issuer + kind: Issuer + apiGroup: "cert-manager.io" +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing reconfigure TLS operation on `es-demo` cluster. +- `spec.type` specifies that we are performing `ReconfigureTLS` on our Elasticsearch. +- `spec.tls.issuerRef` specifies the issuer name, kind and api group. + +Let's create the `ElasticsearchOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/Elasticsearch/reconfigure-tls/Elasticsearch-update-tls-issuer.yaml +Elasticsearchpsrequest.ops.kubedb.com/esops-update-issuer created +``` + +#### Verify Issuer is changed successfully + +Let's wait for `ElasticsearchOpsRequest` to be `Successful`. Run the following command to watch `ElasticsearchOpsRequest` CRO, + +```bash +$ kubectl get Elasticsearchopsrequests -n demo esops-update-issuer +NAME TYPE STATUS AGE +esops-update-issuer ReconfigureTLS Successful 6m28s +``` + +We can see from the above output that the `ElasticsearchOpsRequest` has succeeded. If we describe the `ElasticsearchOpsRequest` we will get an overview of the steps that were followed. + +```bash +$ kubectl describe Elasticsearchopsrequest -n demo esops-update-issuer +Name: esops-update-issuer +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ElasticsearchOpsRequest +Metadata: + Creation Timestamp: 2025-11-28T09:32:41Z + Generation: 1 + Resource Version: 905680 + UID: 9abdfdc1-2c7e-4d1d-b226-029c0e6d99fc +Spec: + Apply: IfReady + Database Ref: + Name: es-demo + Tls: + Issuer Ref: + API Group: cert-manager.io + Kind: Issuer + Name: es-new-issuer + Type: ReconfigureTLS +Status: + Conditions: + Last Transition Time: 2025-11-28T09:32:41Z + Message: Elasticsearch ops request is reconfiguring TLS + Observed Generation: 1 + Reason: ReconfigureTLS + Status: True + Type: ReconfigureTLS + Last Transition Time: 2025-11-28T09:32:49Z + Message: get certificate; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetCertificate + Last Transition Time: 2025-11-28T09:32:49Z + Message: ready condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: ReadyCondition + Last Transition Time: 2025-11-28T09:32:49Z + Message: issue condition; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IssueCondition + Last Transition Time: 2025-11-28T09:32:49Z + Message: Successfully synced all certificates + Observed Generation: 1 + Reason: CertificateSynced + Status: True + Type: CertificateSynced + Last Transition Time: 2025-11-28T09:33:00Z + Message: pod exists; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: PodExists--es-demo-0 + Last Transition Time: 2025-11-28T09:33:00Z + Message: create es client; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-demo-0 + Last Transition Time: 2025-11-28T09:33:00Z + Message: evict pod; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: EvictPod--es-demo-0 + Last Transition Time: 2025-11-28T09:35:31Z + Message: create es client; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreateEsClient + Last Transition Time: 2025-11-28T09:33:25Z + Message: pod exists; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: PodExists--es-demo-1 + Last Transition Time: 2025-11-28T09:33:25Z + Message: create es client; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-demo-1 + Last Transition Time: 2025-11-28T09:33:25Z + Message: evict pod; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: EvictPod--es-demo-1 + Last Transition Time: 2025-11-28T09:33:50Z + Message: pod exists; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: PodExists--es-demo-2 + Last Transition Time: 2025-11-28T09:33:50Z + Message: create es client; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-demo-2 + Last Transition Time: 2025-11-28T09:33:50Z + Message: evict pod; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: EvictPod--es-demo-2 + Last Transition Time: 2025-11-28T09:34:15Z + Message: Successfully restarted all the nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2025-11-28T09:34:21Z + Message: Successfully reconfigured TLS + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 6m47s KubeDB Ops-manager Operator Pausing Elasticsearch demo/es-demo + Warning get certificate; ConditionStatus:True 6m39s KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning ready condition; ConditionStatus:True 6m39s KubeDB Ops-manager Operator ready condition; ConditionStatus:True + Warning issue condition; ConditionStatus:True 6m39s KubeDB Ops-manager Operator issue condition; ConditionStatus:True + Warning get certificate; ConditionStatus:True 6m39s KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning ready condition; ConditionStatus:True 6m39s KubeDB Ops-manager Operator ready condition; ConditionStatus:True + Warning issue condition; ConditionStatus:True 6m39s KubeDB Ops-manager Operator issue condition; ConditionStatus:True + Warning get certificate; ConditionStatus:True 6m39s KubeDB Ops-manager Operator get certificate; ConditionStatus:True + Warning ready condition; ConditionStatus:True 6m39s KubeDB Ops-manager Operator ready condition; ConditionStatus:True + Warning issue condition; ConditionStatus:True 6m39s KubeDB Ops-manager Operator issue condition; ConditionStatus:True + Normal CertificateSynced 6m39s KubeDB Ops-manager Operator Successfully synced all certificates + Warning pod exists; ConditionStatus:True; PodName:es-demo-0 6m28s KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-demo-0 + Warning create es client; ConditionStatus:True; PodName:es-demo-0 6m28s KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-demo-0 + Warning evict pod; ConditionStatus:True; PodName:es-demo-0 6m28s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-demo-0 + Warning create es client; ConditionStatus:False 6m23s KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 6m8s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-demo-1 6m3s KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-demo-1 + Warning create es client; ConditionStatus:True; PodName:es-demo-1 6m3s KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-demo-1 + Warning evict pod; ConditionStatus:True; PodName:es-demo-1 6m3s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-demo-1 + Warning create es client; ConditionStatus:False 5m58s KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 5m43s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-demo-2 5m38s KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-demo-2 + Warning create es client; ConditionStatus:True; PodName:es-demo-2 5m38s KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-demo-2 + Warning evict pod; ConditionStatus:True; PodName:es-demo-2 5m38s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-demo-2 + Warning create es client; ConditionStatus:False 5m33s KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 5m18s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Normal RestartNodes 5m13s KubeDB Ops-manager Operator Successfully restarted all the nodes + Normal ResumeDatabase 5m7s KubeDB Ops-manager Operator Resuming Elasticsearch demo/es-demo + Normal ResumeDatabase 5m7s KubeDB Ops-manager Operator Successfully resumed Elasticsearch demo/es-demo + Normal Successful 5m7s KubeDB Ops-manager Operator Successfully Reconfigured TLS + Warning pod exists; ConditionStatus:True; PodName:es-demo-0 5m7s KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-demo-0 + Warning create es client; ConditionStatus:True; PodName:es-demo-0 5m7s KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-demo-0 + Warning evict pod; ConditionStatus:True; PodName:es-demo-0 5m7s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-demo-0 + Warning create es client; ConditionStatus:False 5m2s KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 4m47s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-demo-1 4m42s KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-demo-1 + Warning create es client; ConditionStatus:True; PodName:es-demo-1 4m42s KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-demo-1 + Warning evict pod; ConditionStatus:True; PodName:es-demo-1 4m42s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-demo-1 + Warning create es client; ConditionStatus:False 4m37s KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 4m22s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-demo-2 4m17s KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-demo-2 + Warning create es client; ConditionStatus:True; PodName:es-demo-2 4m17s KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-demo-2 + Warning evict pod; ConditionStatus:True; PodName:es-demo-2 4m17s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-demo-2 + Warning create es client; ConditionStatus:False 4m12s KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 3m57s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Normal RestartNodes 3m52s KubeDB Ops-manager Operator Successfully restarted all the nodes + +``` + +Now, Let's exec into a Elasticsearch node and find out the ca subject to see if it matches the one we have provided. + +```bash +$ kubectl exec -it -n demo es-demo-0 -- bash +elasticsearch@es-demo-0:~$ openssl x509 -in /usr/share/elasticsearch/config/certs/http/..2025_11_28_09_34_24.3912740802/tls.crt -noout -issuer +issuer=CN = ca-updated, O = kubedb-updated +elasticsearch@es-demo-0:~$ openssl x509 -in /usr/share/elasticsearch/config/certs/transport/..2025_11_28_09_34_24.2105953641/tls.crt -noout -issuer +issuer=CN = ca-updated, O = kubedb-updated + +``` + +We can see from the above output that, the subject name matches the subject name of the new ca certificate that we have created. So, the issuer is changed successfully. + +## Remove TLS from the Database + +Now, we are going to remove TLS from this database using a ElasticsearchOpsRequest. + +### Create ElasticsearchOpsRequest + +Below is the YAML of the `ElasticsearchOpsRequest` CRO that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: esops-remove + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: es-demo + tls: + remove: true +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing reconfigure TLS operation on `es-demo` cluster. +- `spec.type` specifies that we are performing `ReconfigureTLS` on Elasticsearch. +- `spec.tls.remove` specifies that we want to remove tls from this cluster. + +Let's create the `ElasticsearchOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/Elasticsearch/reconfigure-tls/esops-remove.yaml +Elasticsearchopsrequest.ops.kubedb.com/esops-remove created +``` + +#### Verify TLS Removed Successfully + +Let's wait for `ElasticsearchOpsRequest` to be `Successful`. Run the following command to watch `ElasticsearchOpsRequest` CRO, + +```bash +$ kubectl get Elasticsearchopsrequest -n demo esops-remove +NAME TYPE STATUS AGE +esops-remove ReconfigureTLS Successful 3m16s + +``` + +We can see from the above output that the `ElasticsearchOpsRequest` has succeeded. If we describe the `ElasticsearchOpsRequest` we will get an overview of the steps that were followed. + +```bash +$ kubectl describe Elasticsearchopsrequest -n demo esops-remove +Name: esops-remove +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ElasticsearchOpsRequest +Metadata: + Creation Timestamp: 2025-11-28T10:42:00Z + Generation: 1 + Resource Version: 911280 + UID: 7eefbe63-1fcc-4ca3-bb5d-65ec22d7fd9a +Spec: + Apply: IfReady + Database Ref: + Name: es-demo + Tls: + Remove: true + Type: ReconfigureTLS +Status: + Conditions: + Last Transition Time: 2025-11-28T10:42:00Z + Message: Elasticsearch ops request is reconfiguring TLS + Observed Generation: 1 + Reason: ReconfigureTLS + Status: True + Type: ReconfigureTLS + Last Transition Time: 2025-11-28T10:42:14Z + Message: pod exists; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: PodExists--es-demo-0 + Last Transition Time: 2025-11-28T10:42:14Z + Message: create es client; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-demo-0 + Last Transition Time: 2025-11-28T10:42:14Z + Message: evict pod; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: EvictPod--es-demo-0 + Last Transition Time: 2025-11-28T10:43:24Z + Message: create es client; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreateEsClient + Last Transition Time: 2025-11-28T10:42:34Z + Message: pod exists; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: PodExists--es-demo-1 + Last Transition Time: 2025-11-28T10:42:34Z + Message: create es client; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-demo-1 + Last Transition Time: 2025-11-28T10:42:34Z + Message: evict pod; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: EvictPod--es-demo-1 + Last Transition Time: 2025-11-28T10:43:09Z + Message: pod exists; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: PodExists--es-demo-2 + Last Transition Time: 2025-11-28T10:43:09Z + Message: create es client; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-demo-2 + Last Transition Time: 2025-11-28T10:43:09Z + Message: evict pod; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: EvictPod--es-demo-2 + Last Transition Time: 2025-11-28T10:43:29Z + Message: Successfully restarted all the nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2025-11-28T10:43:33Z + Message: Successfully reconfigured TLS + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 3m43s KubeDB Ops-manager Operator Pausing Elasticsearch demo/es-demo + Warning pod exists; ConditionStatus:True; PodName:es-demo-0 3m29s KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-demo-0 + Warning create es client; ConditionStatus:True; PodName:es-demo-0 3m29s KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-demo-0 + Warning evict pod; ConditionStatus:True; PodName:es-demo-0 3m29s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-demo-0 + Warning create es client; ConditionStatus:False 3m24s KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 3m14s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-demo-1 3m9s KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-demo-1 + Warning create es client; ConditionStatus:True; PodName:es-demo-1 3m9s KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-demo-1 + Warning evict pod; ConditionStatus:True; PodName:es-demo-1 3m9s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-demo-1 + Warning create es client; ConditionStatus:False 3m4s KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 2m39s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-demo-2 2m34s KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-demo-2 + Warning create es client; ConditionStatus:True; PodName:es-demo-2 2m34s KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-demo-2 + Warning evict pod; ConditionStatus:True; PodName:es-demo-2 2m34s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-demo-2 + Warning create es client; ConditionStatus:False 2m29s KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 2m19s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Normal RestartNodes 2m14s KubeDB Ops-manager Operator Successfully restarted all the nodes + Normal ResumeDatabase 2m10s KubeDB Ops-manager Operator Resuming Elasticsearch demo/es-demo + Normal ResumeDatabase 2m10s KubeDB Ops-manager Operator Successfully resumed Elasticsearch demo/es-demo + Normal Successful 2m10s KubeDB Ops-manager Operator Successfully Reconfigured TLS + +``` + +Now, Let's exec into one of the broker node and find out that TLS is disabled or not. + +```bash +$ kubectl exec -n demo es-demo-0 -- \ + cat /usr/share/elasticsearch/config/elasticsearch.yml | grep -A 2 -i xpack.security + +Defaulted container "elasticsearch" out of: elasticsearch, init-sysctl (init), config-merger (init) +xpack.security.enabled: true + +xpack.security.transport.ssl.enabled: true +xpack.security.transport.ssl.verification_mode: certificate +xpack.security.transport.ssl.key: certs/transport/tls.key +xpack.security.transport.ssl.certificate: certs/transport/tls.crt +xpack.security.transport.ssl.certificate_authorities: [ "certs/transport/ca.crt" ] + +xpack.security.http.ssl.enabled: false + +``` + +So, we can see from the above that, `xpack.security.http.ssl.enabled` is set to `false` which means TLS is disabled for HTTP layer. Also, the transport layer TLS settings are removed from the `elasticsearch.yml` file. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete elasticsearchopsrequest -n demo add-tls esops-remove esops-rotate esops-update-issuer +kubectl delete Elasticsearch -n demo es-demo +kubectl delete issuer -n demo es-issuer es-new-issuer +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Elasticsearch object](/docs/guides/elasticsearch/concepts/elasticsearch.md). +- Different Elasticsearch topology clustering modes [here](/docs/guides/elasticsearch/clustering/_index.md). +- Monitor your Elasticsearch database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/elasticsearch/monitoring/using-prometheus-operator.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). + diff --git a/docs/guides/elasticsearch/reconfigure_tls/es-tls.png b/docs/guides/elasticsearch/reconfigure_tls/es-tls.png new file mode 100644 index 0000000000..e149322f9c Binary files /dev/null and b/docs/guides/elasticsearch/reconfigure_tls/es-tls.png differ diff --git a/docs/guides/elasticsearch/reconfigure_tls/overview.md b/docs/guides/elasticsearch/reconfigure_tls/overview.md new file mode 100644 index 0000000000..82cc42d4ae --- /dev/null +++ b/docs/guides/elasticsearch/reconfigure_tls/overview.md @@ -0,0 +1,54 @@ +--- +title: Reconfiguring TLS/SSL Overview +menu: + docs_{{ .version }}: + identifier: es-reconfigure-tls-overview + name: Overview + parent: es-reconfigure-tls-elasticsearch + weight: 5 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfiguring TLS of Elasticsearch + +This guide will give an overview on how KubeDB Ops-manager operator reconfigures TLS configuration i.e. add TLS, remove TLS, update issuer/cluster issuer or Certificates and rotate the certificates of `Elasticsearch`. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: +- [Elasticsearch](/docs/guides/Elasticsearch/concepts/elasticsearch.md) +- [ElasticsearchOpsRequest](/docs/guides/Elasticsearch/concepts/elasticsearch-ops-request.md) + +## How Reconfiguring Elasticsearch TLS Configuration Process Works + +The following diagram shows how KubeDB Ops-manager operator reconfigures TLS of a `Elasticsearch`. Open the image in a new tab to see the enlarged version. + +
+   Reconfiguring TLS process of Elasticsearch +
Fig: Reconfiguring TLS process of Elasticsearch
+
+ +The Reconfiguring Elasticsearch TLS process consists of the following steps: + +1. At first, a user creates a `Elasticsearch` Custom Resource Object (CRO). + +2. `KubeDB` Provisioner operator watches the `Elasticsearch` CRO. + +3. When the operator finds a `Elasticsearch` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to reconfigure the TLS configuration of the `Elasticsearch` database the user creates a `ElasticsearchOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `ElasticsearchOpsRequest` CR. + +6. When it finds a `ElasticsearchOpsRequest` CR, it pauses the `Elasticsearch` object which is referred from the `ElasticsearchOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `Elasticsearch` object during the reconfiguring TLS process. + +7. Then the `KubeDB` Ops-manager operator will add, remove, update or rotate TLS configuration based on the Ops Request yaml. + +8. Then the `KubeDB` Ops-manager operator will restart all the Pods of the database so that they restart with the new TLS configuration defined in the `ElasticsearchOpsRequest` CR. + +9. After the successful reconfiguring of the `Elasticsearch` TLS, the `KubeDB` Ops-manager operator resumes the `Elasticsearch` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the next docs, we are going to show a step by step guide on reconfiguring TLS configuration of a Elasticsearch database using `ElasticsearchOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/elasticsearch/restart/index.md b/docs/guides/elasticsearch/restart/index.md new file mode 100644 index 0000000000..464e673333 --- /dev/null +++ b/docs/guides/elasticsearch/restart/index.md @@ -0,0 +1,345 @@ +--- +title: Elasticsearch Restart +menu: + docs_{{ .version }}: + identifier: es-restart-elasticsearch + name: Restart + parent: es-elasticsearch-guides + weight: 115 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- +> New to KubeDB? Please start [here](/docs/README.md). + +# Restart Elasticsearch + +KubeDB supports restarting an Elasticsearch database using a `ElasticsearchOpsRequest`. Restarting can be +useful if some pods are stuck in a certain state or not functioning correctly. + +This guide will demonstrate how to restart an Elasticsearch cluster using an OpsRequest. + +--- + +## Before You Begin + +- You need a running Kubernetes cluster and a properly configured `kubectl` command-line tool. If you don’t +have a cluster, you can create one using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install the KubeDB CLI on your workstation and the KubeDB operator in your cluster by following the [installation steps](/docs/setup/README.md). + +- For better isolation, this tutorial uses a separate namespace called `demo`: + +```bash +kubectl create ns demo +namespace/demo created +``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/Elasticsearch](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/Elasticsearch) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Deploy Elasticsearch + +In this section, we are going to deploy a Elasticsearch database using KubeDB. + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es + namespace: demo +spec: + version: xpack-8.2.3 + enableSSL: true + replicas: 3 + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut +``` + +Let's create the `Elasticsearch` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/elasticsearch/quickstart/overview/elasticsearch/yamls/elasticsearch-v1.yaml +Elasticsearch.kubedb.com/es created +``` +let's wait until all pods are in the `Running` state, + +```shell +kubectl get pods -n demo +NAME READY STATUS RESTARTS AGE +es-0 2/2 Running 0 6m28s +es-1 2/2 Running 0 6m28s +es-2 2/2 Running 0 6m28s +``` + +### Populate Data + +To connect to our Elasticsearch cluster, let's port-forward the Elasticsearch service to local machine: + +```bash +$ kubectl port-forward -n demo svc/sample-es 9200 +Forwarding from 127.0.0.1:9200 -> 9200 +Forwarding from [::1]:9200 -> 9200 +``` + +Keep it like that and switch to another terminal window: + +```bash +$ export ELASTIC_USER=$(kubectl get secret -n demo es-demo -o jsonpath='{.data.username}' | base64 -d) + +$ export ELASTIC_PASSWORD=$(kubectl get secret -n demo es-demo -o jsonpath='{.data.password}' | base64 -d) + +$ curl -XGET -k -u "$ELASTIC_USER:$ELASTIC_PASSWORD" "https://localhost:9200/_cluster/health?pretty" +{ + "cluster_name" : "sample-es", + "status" : "green", + "timed_out" : false, + "number_of_nodes" : 3, + "number_of_data_nodes" : 3, + "active_primary_shards" : 1, + "active_shards" : 2, + "relocating_shards" : 0, + "initializing_shards" : 0, + "unassigned_shards" : 0, + "delayed_unassigned_shards" : 0, + "number_of_pending_tasks" : 0, + "number_of_in_flight_fetch" : 0, + "task_max_waiting_in_queue_millis" : 0, + "active_shards_percent_as_number" : 100.0 +} +``` + +So, our cluster status is green. Let's create some indices with dummy data: + +```bash +$ curl -XPOST -k -u "$ELASTIC_USER:$ELASTIC_PASSWORD" "https://localhost:9200/products/_doc?pretty" -H 'Content-Type: application/json' -d ' +{ + "name": "KubeDB", + "vendor": "AppsCode Inc.", + "description": "Database Operator for Kubernetes" +} +' + +$ curl -XPOST -k -u "$ELASTIC_USER:$ELASTIC_PASSWORD" "https://localhost:9200/companies/_doc?pretty" -H 'Content-Type: application/json' -d ' +{ + "name": "AppsCode Inc.", + "mission": "Accelerate the transition to Containers by building a Kubernetes-native Data Platform", + "products": ["KubeDB", "Stash", "KubeVault", "Kubeform", "ByteBuilders"] +} +' +``` + +Now, let’s verify that the indexes have been created successfully. + +```bash +$ curl -XGET -k -u "$ELASTIC_USER:$ELASTIC_PASSWORD" "https://localhost:9200/_cat/indices?v&s=index&pretty" +health status index uuid pri rep docs.count docs.deleted store.size pri.store.size +green open .geoip_databases oiaZfJA8Q5CihQon0oR8hA 1 1 42 0 81.6mb 40.8mb +green open companies GuGisWJ8Tkqnq8vhREQ2-A 1 1 1 0 11.5kb 5.7kb +green open products wyu-fImDRr-Hk_GXVF7cDw 1 1 1 0 10.6kb 5.3kb +``` + + +# Apply Restart opsRequest + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: restart + namespace: demo +spec: + type: Restart + databaseRef: + name: es + timeout: 3m + apply: Always +``` + +Here, + +- `spec.type` specifies the type of operation (Restart in this case). + +- `spec.databaseRef` references the Elasticsearch database. The OpsRequest must be created in the same namespace as the database. + +- `spec.timeout` the maximum time the operator will wait for the operation to finish before marking it as failed. + +- `spec.apply` determines whether to always apply the operation (Always) or if the database phase is ready (IfReady). + +Let's create the `ElasticsearchOpsRequest` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/elasticsearch/restart/yamls/restart.yaml +ElasticsearchOpsRequest.ops.kubedb.com/restart created +``` + +In a Elasticsearch cluster, all pods act as primary nodes. When you apply a restart OpsRequest, the KubeDB operator will restart the pods sequentially, one by one, to maintain cluster availability. + +Let's watch the rolling restart process with: +```shell +NAME READY STATUS RESTARTS AGE +es-0 2/2 Terminating 0 56m +es-1 2/2 Running 0 55m +es-2 2/2 Running 0 54m +``` + +```shell +NAME READY STATUS RESTARTS AGE +es-0 2/2 Running 0 112s +es-1 2/2 Terminating 0 55m +es-2 2/2 Running 0 56m + +``` +```shell +NAME READY STATUS RESTARTS AGE +es-0 2/2 Running 0 112s +es-1 2/2 Running 0 42s +es-2 2/2 Terminating 0 56m + +``` + +```shell +$ kubectl get Elasticsearchopsrequest -n demo +NAME TYPE STATUS AGE +restart Restart Successful 64m + +$ kubectl get Elasticsearchopsrequest -n demo restart -oyaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"ops.kubedb.com/v1alpha1","kind":"ElasticsearchOpsRequest","metadata":{"annotations":{},"name":"restart","namespace":"demo"},"spec":{"apply":"Always","databaseRef":{"name":"es-quickstart"},"timeout":"3m","type":"Restart"}} + creationTimestamp: "2025-11-11T05:02:36Z" + generation: 1 + name: restart + namespace: demo + resourceVersion: "749630" + uid: 52fe9376-cef4-4171-9ca7-8a0d1be902fb +spec: + apply: Always + databaseRef: + name: es-quickstart + timeout: 3m + type: Restart +status: + conditions: + - lastTransitionTime: "2025-11-11T05:02:36Z" + message: Elasticsearch ops request is restarting nodes + observedGeneration: 1 + reason: Restart + status: "True" + type: Restart + - lastTransitionTime: "2025-11-11T05:02:44Z" + message: pod exists; ConditionStatus:True; PodName:es-quickstart-0 + observedGeneration: 1 + status: "True" + type: PodExists--es-quickstart-0 + - lastTransitionTime: "2025-11-11T05:02:44Z" + message: create es client; ConditionStatus:True; PodName:es-quickstart-0 + observedGeneration: 1 + status: "True" + type: CreateEsClient--es-quickstart-0 + - lastTransitionTime: "2025-11-11T05:02:44Z" + message: evict pod; ConditionStatus:True; PodName:es-quickstart-0 + observedGeneration: 1 + status: "True" + type: EvictPod--es-quickstart-0 + - lastTransitionTime: "2025-11-11T05:03:55Z" + message: create es client; ConditionStatus:True + observedGeneration: 1 + status: "True" + type: CreateEsClient + - lastTransitionTime: "2025-11-11T05:03:09Z" + message: pod exists; ConditionStatus:True; PodName:es-quickstart-1 + observedGeneration: 1 + status: "True" + type: PodExists--es-quickstart-1 + - lastTransitionTime: "2025-11-11T05:03:09Z" + message: create es client; ConditionStatus:True; PodName:es-quickstart-1 + observedGeneration: 1 + status: "True" + type: CreateEsClient--es-quickstart-1 + - lastTransitionTime: "2025-11-11T05:03:09Z" + message: evict pod; ConditionStatus:True; PodName:es-quickstart-1 + observedGeneration: 1 + status: "True" + type: EvictPod--es-quickstart-1 + - lastTransitionTime: "2025-11-11T05:03:34Z" + message: pod exists; ConditionStatus:True; PodName:es-quickstart-2 + observedGeneration: 1 + status: "True" + type: PodExists--es-quickstart-2 + - lastTransitionTime: "2025-11-11T05:03:34Z" + message: create es client; ConditionStatus:True; PodName:es-quickstart-2 + observedGeneration: 1 + status: "True" + type: CreateEsClient--es-quickstart-2 + - lastTransitionTime: "2025-11-11T05:03:34Z" + message: evict pod; ConditionStatus:True; PodName:es-quickstart-2 + observedGeneration: 1 + status: "True" + type: EvictPod--es-quickstart-2 + - lastTransitionTime: "2025-11-11T05:03:59Z" + message: Successfully restarted all nodes + observedGeneration: 1 + reason: RestartNodes + status: "True" + type: RestartNodes + - lastTransitionTime: "2025-11-11T05:03:59Z" + message: Successfully completed the modification process. + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful + +``` +**Verify Data Persistence** + +After the restart, reconnect to the database and verify that the previously created database still exists: +Let's port-forward the port `9200` to local machine: + +```bash +$ kubectl port-forward -n demo svc/es-demo 9200 +Forwarding from 127.0.0.1:9200 -> 9200 +Forwarding from [::1]:9200 -> 9200 + +``` + + +Now let's check the data persistencyof our Elasticsearch database. + +```bash +$ curl -XGET -k -u "$ELASTIC_USER:$ELASTIC_PASSWORD" "https://localhost:9200/_cat/indices?v&s=index&pretty" +health status index uuid pri rep docs.count docs.deleted store.size pri.store.size dataset.size +green open companies 02UKouHARfuMs2lZXMkVQQ 1 1 1 0 13.6kb 6.8kb 6.8kb +green open kubedb-system 2Fr26ppkSyy7uJrkfIhzvg 1 1 1 6 433.3kb 191.1kb 191.1kb +green open products XxAYeIKOSLaOqp2rczCwFg 1 1 1 0 12.4kb 6.2kb 6.2kb + +``` + +As you can see, the previously created indices `companies` and `products` are still present after the restart, confirming data persistence after the restart operation. + + +## Cleaning up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete Elasticsearchopsrequest -n demo restart +kubectl delete Elasticsearch -n demo es +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Elasticsearch object](/docs/guides/elasticsearch/concepts/elasticsearch/index.md). +- Detail concepts of [ElasticsearchopsRequest object](/docs/guides/elasticsearch/concepts/elasticsearch-ops-request/index.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md) diff --git a/docs/guides/elasticsearch/rotateauth/_index.md b/docs/guides/elasticsearch/rotateauth/_index.md index 608378e4d2..4b985eccb3 100644 --- a/docs/guides/elasticsearch/rotateauth/_index.md +++ b/docs/guides/elasticsearch/rotateauth/_index.md @@ -5,7 +5,7 @@ menu: identifier: es-rotateauth-elasticsearch name: Rotate Authentication parent: es-elasticsearch-guides - weight: 45 + weight: 125 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/elasticsearch/scaling/_index.md b/docs/guides/elasticsearch/scaling/_index.md new file mode 100644 index 0000000000..9a04cb916c --- /dev/null +++ b/docs/guides/elasticsearch/scaling/_index.md @@ -0,0 +1,10 @@ +--- +title: Elasticsearch Scalling +menu: + docs_{{ .version }}: + identifier: es-scalling-elasticsearch + name: Scalling + parent: es-elasticsearch-guides + weight: 105 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/elasticsearch/scaling/horizontal/_index.md b/docs/guides/elasticsearch/scaling/horizontal/_index.md new file mode 100644 index 0000000000..d4644f51ea --- /dev/null +++ b/docs/guides/elasticsearch/scaling/horizontal/_index.md @@ -0,0 +1,10 @@ +--- +title: Elasticsearch Horizontal Scaling +menu: + docs_{{ .version }}: + identifier: es-horizontal-scalling-elasticsearch + name: Horizontal Scaling + parent: es-scalling-elasticsearch + weight: 10 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/elasticsearch/scaling/horizontal/combined.md b/docs/guides/elasticsearch/scaling/horizontal/combined.md new file mode 100644 index 0000000000..605e1ce7b8 --- /dev/null +++ b/docs/guides/elasticsearch/scaling/horizontal/combined.md @@ -0,0 +1,484 @@ +--- +title: Horizontal Scaling Combined Elasticsearch +menu: + docs_{{ .version }}: + identifier: es-horizontal-scaling-combined + name: Combined Cluster + parent: es-horizontal-scalling-elasticsearch + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Horizontal Scale Elasticsearch Combined Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to scale the Elasticsearch combined cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) + - [Combined](/docs/guides/elasticsearch/clustering/combined-cluster/index.md) + - [ElasticsearchOpsRequest](/docs/guides/elasticsearch/concepts/elasticsearch-ops-request/index.md) + - [Horizontal Scaling Overview](/docs/guides/elasticsearch/scaling/horizontal/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/Elasticsearch](/docs/examples/elasticsearch) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Horizontal Scaling on Combined Cluster + +Here, we are going to deploy a `Elasticsearch` combined cluster using a supported version by `KubeDB` operator. Then we are going to apply horizontal scaling on it. + +### Prepare Elasticsearch Combined cluster + +Now, we are going to deploy a `Elasticsearch` combined cluster with version `xpack-9.1.4`. + +### Deploy Elasticsearch combined cluster + +In this section, we are going to deploy a Elasticsearch combined cluster. Then, in the next section we will scale the cluster using `ElasticsearchOpsRequest` CRD. Below is the YAML of the `Elasticsearch` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es + namespace: demo +spec: + version: xpack-9.1.4 + enableSSL: true + replicas: 2 + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut +``` + +Let's create the `Elasticsearch` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/overview/quickstart/elasticsearch/yamls/elasticsearch-v1.yaml +Elasticsearch.kubedb.com/es created +``` + +Now, wait until `es` has status `Ready`. i.e, + +```bash +$ kubectl get es -n demo +NAME VERSION STATUS AGE +es xpack-9.1.4 Ready 3m53s +``` + +Let's check the number of replicas has from Elasticsearch object, number of pods the petset have, + +```bash +$ kubectl get elasticsearch -n demo es -o json | jq '.spec.replicas' +2 +$ kubectl get petsets -n demo es -o json | jq '.spec.replicas' +2 + +``` + +We can see from both command that the cluster has 2 replicas. + +Also, we can verify the replicas of the combined from an internal Elasticsearch command by exec into a replica. + +Now lets check the number of replicas, + +```bash +$ kubectl get all,secret,pvc -n demo -l 'app.kubernetes.io/instance=es' +NAME READY STATUS RESTARTS AGE +pod/es-0 1/1 Running 0 5m +pod/es-1 1/1 Running 0 4m54s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/es ClusterIP 10.43.72.228 9200/TCP 5m5s +service/es-master ClusterIP None 9300/TCP 5m5s +service/es-pods ClusterIP None 9200/TCP 5m5s + +NAME TYPE VERSION AGE +appbinding.appcatalog.appscode.com/es kubedb.com/elasticsearch 9.1.4 5m2s + +NAME TYPE DATA AGE +secret/es-apm-system-cred kubernetes.io/basic-auth 2 5m4s +secret/es-auth kubernetes.io/basic-auth 2 5m8s +secret/es-beats-system-cred kubernetes.io/basic-auth 2 5m4s +secret/es-ca-cert kubernetes.io/tls 2 5m9s +secret/es-client-cert kubernetes.io/tls 3 5m8s +secret/es-config Opaque 1 5m8s +secret/es-http-cert kubernetes.io/tls 3 5m8s +secret/es-kibana-system-cred kubernetes.io/basic-auth 2 5m4s +secret/es-logstash-system-cred kubernetes.io/basic-auth 2 5m4s +secret/es-remote-monitoring-user-cred kubernetes.io/basic-auth 2 5m4s +secret/es-transport-cert kubernetes.io/tls 3 5m8s + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE +persistentvolumeclaim/data-es-0 Bound pvc-7c8cc17d-7427-4411-9262-f213e826540b 1Gi RWO standard 5m5s +persistentvolumeclaim/data-es-1 Bound pvc-f2cf7ac9-b0c2-4c44-93dc-476cc06c25b4 1Gi RWO standard 4m59s + +``` + +We can see from the above output that the Elasticsearch has 2 nodes. + +We are now ready to apply the `ElasticsearchOpsRequest` CR to scale this cluster. + +## Scale Up Replicas + +Here, we are going to scale up the replicas of the combined cluster to meet the desired number of replicas after scaling. + +#### Create ElasticsearchOpsRequest + +In order to scale up the replicas of the combined cluster, we have to create a `ElasticsearchOpsRequest` CR with our desired replicas. Below is the YAML of the `ElasticsearchOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: esops-hscale-up-combined + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: es + horizontalScaling: + node: 3 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling operation on `es` cluster. +- `spec.type` specifies that we are performing `HorizontalScaling` on Elasticsearch. +- `spec.horizontalScaling.node` specifies the desired replicas after scaling. + +Let's create the `ElasticsearchOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/scaling/horizontal/Elasticsearch-hscale-up-combined.yaml +Elasticsearchopsrequest.ops.kubedb.com/esops-hscale-up-combined created +``` + +#### Verify Combined cluster replicas scaled up successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Elasticsearch` object and related `PetSets` and `Pods`. + +Let's wait for `ElasticsearchOpsRequest` to be `Successful`. Run the following command to watch `ElasticsearchOpsRequest` CR, + +```bash +$ kubectl get Elasticsearchopsrequest -n demo +NAME TYPE STATUS AGE +esops-hscale-up-combined HorizontalScaling Successful 2m42s +``` + +We can see from the above output that the `ElasticsearchOpsRequest` has succeeded. If we describe the `ElasticsearchOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe Elasticsearchopsrequests -n demo esops-hscale-up-combined +Name: esops-hscale-up-combined +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ElasticsearchOpsRequest +Metadata: + Creation Timestamp: 2025-11-13T10:25:18Z + Generation: 1 + Resource Version: 810747 + UID: 29134aef-1379-4e4f-91c8-23b1cf74c784 +Spec: + Apply: IfReady + Database Ref: + Name: es + Horizontal Scaling: + Node: 3 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2025-11-13T10:25:58Z + Message: Elasticsearch ops request is horizontally scaling the nodes. + Observed Generation: 1 + Reason: HorizontalScale + Status: True + Type: HorizontalScale + Last Transition Time: 2025-11-13T10:26:06Z + Message: patch pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPetSet + Last Transition Time: 2025-11-13T10:26:26Z + Message: is node in cluster; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IsNodeInCluster + Last Transition Time: 2025-11-13T10:26:31Z + Message: ScaleUp es nodes + Observed Generation: 1 + Reason: HorizontalScaleCombinedNode + Status: True + Type: HorizontalScaleCombinedNode + Last Transition Time: 2025-11-13T10:26:36Z + Message: successfully updated Elasticsearch CR + Observed Generation: 1 + Reason: UpdateDatabase + Status: True + Type: UpdateDatabase + Last Transition Time: 2025-11-13T10:26:36Z + Message: Successfully Horizontally Scaled. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 2m54s KubeDB Ops-manager Operator Pausing Elasticsearch demo/es + Warning patch pet set; ConditionStatus:True 2m46s KubeDB Ops-manager Operator patch pet set; ConditionStatus:True + Warning is node in cluster; ConditionStatus:False 2m41s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:False + Warning is node in cluster; ConditionStatus:True 2m26s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:True + Normal HorizontalScaleCombinedNode 2m21s KubeDB Ops-manager Operator ScaleUp es nodes + Normal UpdateDatabase 2m16s KubeDB Ops-manager Operator successfully updated Elasticsearch CR + Normal ResumeDatabase 2m16s KubeDB Ops-manager Operator Resuming Elasticsearch demo/es + Normal ResumeDatabase 2m16s KubeDB Ops-manager Operator Successfully resumed Elasticsearch demo/es + Normal Successful 2m16s KubeDB Ops-manager Operator Successfully Horizontally Scaled Database +bonusree@bonusree-HP-ProBook-450-G4 ~> + +``` + +Now, we are going to verify the number of replicas this cluster has from the Elasticsearch object, number of pods the petset have, + +```bash +$ kubectl get Elasticsearch -n demo es -o json | jq '.spec.replicas' +3 + +$ kubectl get petset -n demo es -o json | jq '.spec.replicas' +3 +``` + + + +From all the above outputs we can see that the brokers of the combined Elasticsearch is `3`. That means we have successfully scaled up the replicas of the Elasticsearch combined cluster. + +### Scale Down Replicas + +Here, we are going to scale down the replicas of the Elasticsearch combined cluster to meet the desired number of replicas after scaling. + +#### Create ElasticsearchOpsRequest + +In order to scale down the replicas of the Elasticsearch combined cluster, we have to create a `ElasticsearchOpsRequest` CR with our desired replicas. Below is the YAML of the `ElasticsearchOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: esops-hscale-down-combined + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: es + horizontalScaling: + node: 2 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling down operation on `es` cluster. +- `spec.type` specifies that we are performing `HorizontalScaling` on Elasticsearch. +- `spec.horizontalScaling.node` specifies the desired replicas after scaling. + +Let's create the `ElasticsearchOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/scaling/horizontal/Elasticsearch-hscale-down-combined.yaml +Elasticsearchopsrequest.ops.kubedb.com/esops-hscale-down-combined created +``` + +#### Verify Combined cluster replicas scaled down successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Elasticsearch` object and related `PetSets` and `Pods`. + +Let's wait for `ElasticsearchOpsRequest` to be `Successful`. Run the following command to watch `ElasticsearchOpsRequest` CR, + +```bash +$ kubectl get Elasticsearchopsrequest -n demo +NAME TYPE STATUS AGE +esops-hscale-down-combined HorizontalScaling Successful 76s +``` + +We can see from the above output that the `ElasticsearchOpsRequest` has succeeded. If we describe the `ElasticsearchOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe Elasticsearchopsrequests -n demo esops-hscale-down-combined +Name: esops-hscale-down-combined +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ElasticsearchOpsRequest +Metadata: + Creation Timestamp: 2025-11-13T10:46:22Z + Generation: 1 + Resource Version: 811301 + UID: 558530d7-5d02-4757-b459-476129b411d6 +Spec: + Apply: IfReady + Database Ref: + Name: es + Horizontal Scaling: + Node: 2 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2025-11-13T10:46:22Z + Message: Elasticsearch ops request is horizontally scaling the nodes. + Observed Generation: 1 + Reason: HorizontalScale + Status: True + Type: HorizontalScale + Last Transition Time: 2025-11-13T10:46:30Z + Message: create es client; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreateEsClient + Last Transition Time: 2025-11-13T10:46:30Z + Message: get voting config exclusion; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetVotingConfigExclusion + Last Transition Time: 2025-11-13T10:46:31Z + Message: exclude node allocation; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: ExcludeNodeAllocation + Last Transition Time: 2025-11-13T10:46:31Z + Message: get used data nodes; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetUsedDataNodes + Last Transition Time: 2025-11-13T10:46:31Z + Message: move data; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: MoveData + Last Transition Time: 2025-11-13T10:46:31Z + Message: patch pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPetSet + Last Transition Time: 2025-11-13T10:46:35Z + Message: get pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPod + Last Transition Time: 2025-11-13T10:46:35Z + Message: delete pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePvc + Last Transition Time: 2025-11-13T10:46:40Z + Message: get pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPvc + Last Transition Time: 2025-11-13T10:46:45Z + Message: delete voting config exclusion; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeleteVotingConfigExclusion + Last Transition Time: 2025-11-13T10:46:45Z + Message: delete node allocation exclusion; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeleteNodeAllocationExclusion + Last Transition Time: 2025-11-13T10:46:45Z + Message: ScaleDown es nodes + Observed Generation: 1 + Reason: HorizontalScaleCombinedNode + Status: True + Type: HorizontalScaleCombinedNode + Last Transition Time: 2025-11-13T10:46:51Z + Message: successfully updated Elasticsearch CR + Observed Generation: 1 + Reason: UpdateDatabase + Status: True + Type: UpdateDatabase + Last Transition Time: 2025-11-13T10:46:51Z + Message: Successfully Horizontally Scaled. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 112s KubeDB Ops-manager Operator Pausing Elasticsearch demo/es + Warning create es client; ConditionStatus:True 104s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning get voting config exclusion; ConditionStatus:True 104s KubeDB Ops-manager Operator get voting config exclusion; ConditionStatus:True + Warning exclude node allocation; ConditionStatus:True 103s KubeDB Ops-manager Operator exclude node allocation; ConditionStatus:True + Warning get used data nodes; ConditionStatus:True 103s KubeDB Ops-manager Operator get used data nodes; ConditionStatus:True + Warning move data; ConditionStatus:True 103s KubeDB Ops-manager Operator move data; ConditionStatus:True + Warning patch pet set; ConditionStatus:True 103s KubeDB Ops-manager Operator patch pet set; ConditionStatus:True + Warning get pod; ConditionStatus:True 99s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 99s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:False 99s KubeDB Ops-manager Operator get pvc; ConditionStatus:False + Warning get pod; ConditionStatus:True 94s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 94s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:True 94s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning create es client; ConditionStatus:True 89s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning delete voting config exclusion; ConditionStatus:True 89s KubeDB Ops-manager Operator delete voting config exclusion; ConditionStatus:True + Warning delete node allocation exclusion; ConditionStatus:True 89s KubeDB Ops-manager Operator delete node allocation exclusion; ConditionStatus:True + Normal HorizontalScaleCombinedNode 89s KubeDB Ops-manager Operator ScaleDown es nodes + Normal UpdateDatabase 83s KubeDB Ops-manager Operator successfully updated Elasticsearch CR + Normal ResumeDatabase 83s KubeDB Ops-manager Operator Resuming Elasticsearch demo/es + Normal ResumeDatabase 83s KubeDB Ops-manager Operator Successfully resumed Elasticsearch demo/es + Normal Successful 83s KubeDB Ops-manager Operator Successfully Horizontally Scaled Database +``` + +Now, we are going to verify the number of replicas this cluster has from the Elasticsearch object, number of pods the petset have, + +```bash +$ kubectl get Elasticsearch -n demo es -o json | jq '.spec.replicas' +2 + +$ kubectl get petset -n demo es -o json | jq '.spec.replicas' +2 +``` + + +From all the above outputs we can see that the replicas of the combined cluster is `2`. That means we have successfully scaled down the replicas of the Elasticsearch combined cluster. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete es -n demo es +kubectl delete Elasticsearchopsrequest -n demo esops-hscale-up-combined esops-hscale-down-combined +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Elasticsearch object](/docs/guides/elasticsearch/concepts/elasticsearch/index.md). +- Different Elasticsearch topology clustering modes [here](/docs/guides/elasticsearch/clustering/_index.md). +- Monitor your Elasticsearch with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/elasticsearch/monitoring/using-prometheus-operator.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/elasticsearch/scaling/horizontal/overview.md b/docs/guides/elasticsearch/scaling/horizontal/overview.md new file mode 100644 index 0000000000..3c4033673f --- /dev/null +++ b/docs/guides/elasticsearch/scaling/horizontal/overview.md @@ -0,0 +1,57 @@ +--- +title: Elasticsearch Horizontal Scaling Overview +menu: + docs_{{ .version }}: + identifier: es-horizontal-scalling-overview + name: Overview + parent: es-horizontal-scalling-elasticsearch + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Elasticsearch Horizontal Scaling + +This guide will give an overview on how KubeDB Ops-manager operator scales up or down `Elasticsearch` cluster replicas of various component such as Combined, Broker, Controller. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) + - [ElasticsearchOpsRequest](/docs/guides/elasticsearch/concepts/elasticsearch-ops-request/index.md) + +## How Horizontal Scaling Process Works + +The following diagram shows how KubeDB Ops-manager operator scales up or down `Elasticsearch` database components. Open the image in a new tab to see the enlarged version. + +[//]: # (
) + +[//]: # (  Horizontal scaling process of Elasticsearch) + +[//]: # (
Fig: Horizontal scaling process of Elasticsearch
) + +[//]: # (
) + +The Horizontal scaling process consists of the following steps: + +1. At first, a user creates a `Elasticsearch` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `Elasticsearch` CR. + +3. When the operator finds a `Elasticsearch` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to scale the various components of the `Elasticsearch` cluster, the user creates a `ElasticsearchOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `ElasticsearchOpsRequest` CR. + +6. When it finds a `ElasticsearchOpsRequest` CR, it halts the `Elasticsearch` object which is referred from the `ElasticsearchOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `Elasticsearch` object during the horizontal scaling process. + +7. Then the `KubeDB` Ops-manager operator will scale the related PetSet Pods to reach the expected number of replicas defined in the `ElasticsearchOpsRequest` CR. + +8. After the successfully scaling the replicas of the related PetSet Pods, the `KubeDB` Ops-manager operator updates the number of replicas in the `Elasticsearch` object to reflect the updated state. + +9. After the successful scaling of the `Elasticsearch` replicas, the `KubeDB` Ops-manager operator resumes the `Elasticsearch` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the next docs, we are going to show a step by step guide on horizontal scaling of Elasticsearch cluster using `ElasticsearchOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/elasticsearch/scaling/horizontal/topology.md b/docs/guides/elasticsearch/scaling/horizontal/topology.md new file mode 100644 index 0000000000..8080f872e1 --- /dev/null +++ b/docs/guides/elasticsearch/scaling/horizontal/topology.md @@ -0,0 +1,625 @@ +--- +title: Horizontal Scaling Topology Elasticsearch +menu: + docs_{{ .version }}: + identifier: es-horizontal-scaling-Topology + name: Topology Cluster + parent: es-horizontal-scalling-elasticsearch + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Horizontal Scale Elasticsearch Topology Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to scale the Elasticsearch Topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) + - [Topology](/docs/guides/elasticsearch/clustering/topology-cluster/index.md) + - [ElasticsearchOpsRequest](/docs/guides/elasticsearch/concepts/elasticsearch-ops-request/index.md) + - [Horizontal Scaling Overview](/docs/guides/elasticsearch/scaling/horizontal/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/Elasticsearch](/docs/examples/elasticsearch) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Horizontal Scaling on Topology Cluster + +Here, we are going to deploy a `Elasticsearch` Topology cluster using a supported version by `KubeDB` operator. Then we are going to apply horizontal scaling on it. + +### Prepare Elasticsearch Topology cluster + +Now, we are going to deploy a `Elasticsearch` Topology cluster with version `xpack-8.11.1`. + +### Deploy Elasticsearch Topology cluster + +In this section, we are going to deploy a Elasticsearch Topology cluster. Then, in the next section we will scale the cluster using `ElasticsearchOpsRequest` CRD. Below is the YAML of the `Elasticsearch` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-hscale-topology + namespace: demo +spec: + enableSSL: true + version: xpack-8.11.1 + storageType: Durable + topology: + master: + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + ingest: + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Let's create the `Elasticsearch` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/clustering/topology.yaml +Elasticsearch.kubedb.com/es-hscale-topology created +``` + +Now, wait until `es-hscale-topology` has status `Ready`. i.e, + +```bash +$ kubectl get es -n demo +NAME VERSION STATUS AGE +es-hscale-topology xpack-8.11.1 Ready 3m53s +``` + +Let's check the number of replicas has from Elasticsearch object, number of pods the petset have, + +```bash +$ kubectl get elasticsearch -n demo es-hscale-topology -o json | jq '.spec.topology.master.replicas' +3 +$ kubectl get elasticsearch -n demo es-hscale-topology -o json | jq '.spec.topology.ingest.replicas' +3 +$ kubectl get elasticsearch -n demo es-hscale-topology -o json | jq '.spec.topology.data.replicas' +3 +``` + +We can see from both command that the cluster has 3 replicas. + +Also, we can verify the replicas of the Topology from an internal Elasticsearch command by exec into a replica. + +Now lets check the number of replicas, + +```bash +$ kubectl get all,secret,pvc -n demo -l 'app.kubernetes.io/instance=es-hscale-topology' +NAME READY STATUS RESTARTS AGE +pod/es-hscale-topology-data-0 1/1 Running 0 27m +pod/es-hscale-topology-data-1 1/1 Running 0 25m +pod/es-hscale-topology-data-2 1/1 Running 0 24m +pod/es-hscale-topology-ingest-0 1/1 Running 0 27m +pod/es-hscale-topology-ingest-1 1/1 Running 0 25m +pod/es-hscale-topology-ingest-2 1/1 Running 0 24m +pod/es-hscale-topology-master-0 1/1 Running 0 27m +pod/es-hscale-topology-master-1 1/1 Running 0 25m +pod/es-hscale-topology-master-2 1/1 Running 0 24m + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/es-hscale-topology ClusterIP 10.43.33.118 9200/TCP 27m +service/es-hscale-topology-master ClusterIP None 9300/TCP 27m +service/es-hscale-topology-pods ClusterIP None 9200/TCP 27m + +NAME TYPE VERSION AGE +appbinding.appcatalog.appscode.com/es-hscale-topology kubedb.com/elasticsearch 8.11.1 27m + +NAME TYPE DATA AGE +secret/es-hscale-topology-apm-system-cred kubernetes.io/basic-auth 2 27m +secret/es-hscale-topology-auth kubernetes.io/basic-auth 2 27m +secret/es-hscale-topology-beats-system-cred kubernetes.io/basic-auth 2 27m +secret/es-hscale-topology-ca-cert kubernetes.io/tls 2 27m +secret/es-hscale-topology-client-cert kubernetes.io/tls 3 27m +secret/es-hscale-topology-config Opaque 1 27m +secret/es-hscale-topology-http-cert kubernetes.io/tls 3 27m +secret/es-hscale-topology-kibana-system-cred kubernetes.io/basic-auth 2 27m +secret/es-hscale-topology-logstash-system-cred kubernetes.io/basic-auth 2 27m +secret/es-hscale-topology-remote-monitoring-user-cred kubernetes.io/basic-auth 2 27m +secret/es-hscale-topology-transport-cert kubernetes.io/tls 3 27m + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE +persistentvolumeclaim/data-es-hscale-topology-data-0 Bound pvc-ce9ce1ec-a2db-43c8-9d40-d158f53f25fe 1Gi RWO standard 27m +persistentvolumeclaim/data-es-hscale-topology-data-1 Bound pvc-babfc22c-1e29-44e3-a094-8fa48876db68 1Gi RWO standard 25m +persistentvolumeclaim/data-es-hscale-topology-data-2 Bound pvc-c0e64663-1cc4-420c-85b9-4f643c76f006 1Gi RWO standard 24m +persistentvolumeclaim/data-es-hscale-topology-ingest-0 Bound pvc-3de6c8f6-17aa-43d8-8c10-8cbd2dc543aa 1Gi RWO standard 27m +persistentvolumeclaim/data-es-hscale-topology-ingest-1 Bound pvc-d990c570-c687-4192-ad2e-bad127b7b5db 1Gi RWO standard 25m +persistentvolumeclaim/data-es-hscale-topology-ingest-2 Bound pvc-4540c342-811a-4b82-970e-0e6d29e80e9b 1Gi RWO standard 24m +persistentvolumeclaim/data-es-hscale-topology-master-0 Bound pvc-902a0ebb-b6fb-4106-8220-f137972a84be 1Gi RWO standard 27m +persistentvolumeclaim/data-es-hscale-topology-master-1 Bound pvc-f97215e6-1a91-4e77-8bfb-78d907828e51 1Gi RWO standard 25m +persistentvolumeclaim/data-es-hscale-topology-master-2 Bound pvc-a9160094-c08e-4d40-b4ea-ec5681f8be30 1Gi RWO standard 24m + +``` + +We can see from the above output that the Elasticsearch has 3 nodes. + +We are now ready to apply the `ElasticsearchOpsRequest` CR to scale this cluster. + + +### Scale Down Replicas + +Here, we are going to scale down the replicas of the Elasticsearch Topology cluster to meet the desired number of replicas after scaling. + +#### Create ElasticsearchOpsRequest + +In order to scale down the replicas of the Elasticsearch Topology cluster, we have to create a `ElasticsearchOpsRequest` CR with our desired replicas. Below is the YAML of the `ElasticsearchOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: esops-hscale-down-topology + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: es-hscale-topology + horizontalScaling: + topology: + master: 2 + ingest: 2 + data: 2 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling down operation on `es-hscale-topology` cluster. +- `spec.type` specifies that we are performing `HorizontalScaling` on Elasticsearch. +- `verticalScaling.topology` - specifies the desired node resources for different type of node of the Elasticsearch running in cluster topology mode (ie. `Elasticsearch.spec.topology` is `not empty`). + - `topology.master` - specifies the desired resources for the master nodes. It takes input same as the k8s [resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types). + - `topology.data` - specifies the desired node resources for the data nodes. It takes input same as the k8s [resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types). + - `topology.ingest` - specifies the desired node resources for the ingest nodes. It takes input same as the k8s [resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types). + +> Note: It is recommended not to use resources below the default one; `cpu: 500m, memory: 1Gi`. + + +Let's create the `ElasticsearchOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/scaling/horizontal/Elasticsearch-hscale-down-Topology.yaml +Elasticsearchopsrequest.ops.kubedb.com/esops-hscale-down-topology created +``` + +#### Verify Topology cluster replicas scaled down successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Elasticsearch` object and related `PetSets` and `Pods`. + +Let's wait for `ElasticsearchOpsRequest` to be `Successful`. Run the following command to watch `ElasticsearchOpsRequest` CR, + +```bash +$ kubectl get Elasticsearchopsrequest -n demo +NAME TYPE STATUS AGE +esops-hscale-down-Topology HorizontalScaling Successful 76s +``` + +We can see from the above output that the `ElasticsearchOpsRequest` has succeeded. If we describe the `ElasticsearchOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe Elasticsearchopsrequests -n demo esops-hscale-down-topology +Name: esops-hscale-down-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ElasticsearchOpsRequest +Metadata: + Creation Timestamp: 2025-11-17T12:01:29Z + Generation: 1 + Resource Version: 11617 + UID: 4b4f9728-b31e-4336-a95c-cf34d97d8b4a +Spec: + Apply: IfReady + Database Ref: + Name: es-hscale-topology + Horizontal Scaling: + Topology: + Data: 2 + Ingest: 2 + Master: 2 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2025-11-17T12:01:29Z + Message: Elasticsearch ops request is horizontally scaling the nodes. + Observed Generation: 1 + Reason: HorizontalScale + Status: True + Type: HorizontalScale + Last Transition Time: 2025-11-17T12:01:37Z + Message: create es client; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreateEsClient + Last Transition Time: 2025-11-17T12:01:37Z + Message: patch pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPetSet + Last Transition Time: 2025-11-17T12:01:42Z + Message: get pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPod + Last Transition Time: 2025-11-17T12:01:42Z + Message: delete pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePvc + Last Transition Time: 2025-11-17T12:02:27Z + Message: get pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPvc + Last Transition Time: 2025-11-17T12:01:52Z + Message: ScaleDown es-hscale-topology-ingest nodes + Observed Generation: 1 + Reason: HorizontalScaleIngestNode + Status: True + Type: HorizontalScaleIngestNode + Last Transition Time: 2025-11-17T12:01:57Z + Message: exclude node allocation; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: ExcludeNodeAllocation + Last Transition Time: 2025-11-17T12:01:57Z + Message: get used data nodes; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetUsedDataNodes + Last Transition Time: 2025-11-17T12:01:57Z + Message: move data; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: MoveData + Last Transition Time: 2025-11-17T12:02:12Z + Message: delete node allocation exclusion; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeleteNodeAllocationExclusion + Last Transition Time: 2025-11-17T12:02:12Z + Message: ScaleDown es-hscale-topology-data nodes + Observed Generation: 1 + Reason: HorizontalScaleDataNode + Status: True + Type: HorizontalScaleDataNode + Last Transition Time: 2025-11-17T12:02:18Z + Message: get voting config exclusion; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetVotingConfigExclusion + Last Transition Time: 2025-11-17T12:02:32Z + Message: delete voting config exclusion; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeleteVotingConfigExclusion + Last Transition Time: 2025-11-17T12:02:32Z + Message: ScaleDown es-hscale-topology-master nodes + Observed Generation: 1 + Reason: HorizontalScaleMasterNode + Status: True + Type: HorizontalScaleMasterNode + Last Transition Time: 2025-11-17T12:02:37Z + Message: successfully updated Elasticsearch CR + Observed Generation: 1 + Reason: UpdateDatabase + Status: True + Type: UpdateDatabase + Last Transition Time: 2025-11-17T12:02:38Z + Message: Successfully Horizontally Scaled. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 101s KubeDB Ops-manager Operator Pausing Elasticsearch demo/es-hscale-topology + Warning create es client; ConditionStatus:True 93s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning patch pet set; ConditionStatus:True 93s KubeDB Ops-manager Operator patch pet set; ConditionStatus:True + Warning get pod; ConditionStatus:True 88s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 88s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:False 88s KubeDB Ops-manager Operator get pvc; ConditionStatus:False + Warning get pod; ConditionStatus:True 83s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 83s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:True 83s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning create es client; ConditionStatus:True 78s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Normal HorizontalScaleIngestNode 78s KubeDB Ops-manager Operator ScaleDown es-hscale-topology-ingest nodes + Warning create es client; ConditionStatus:True 73s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning exclude node allocation; ConditionStatus:True 73s KubeDB Ops-manager Operator exclude node allocation; ConditionStatus:True + Warning get used data nodes; ConditionStatus:True 73s KubeDB Ops-manager Operator get used data nodes; ConditionStatus:True + Warning move data; ConditionStatus:True 73s KubeDB Ops-manager Operator move data; ConditionStatus:True + Warning patch pet set; ConditionStatus:True 73s KubeDB Ops-manager Operator patch pet set; ConditionStatus:True + Warning get pod; ConditionStatus:True 68s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 68s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:False 68s KubeDB Ops-manager Operator get pvc; ConditionStatus:False + Warning get pod; ConditionStatus:True 63s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 63s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:True 63s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning create es client; ConditionStatus:True 58s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning delete node allocation exclusion; ConditionStatus:True 58s KubeDB Ops-manager Operator delete node allocation exclusion; ConditionStatus:True + Normal HorizontalScaleDataNode 58s KubeDB Ops-manager Operator ScaleDown es-hscale-topology-data nodes + Warning create es client; ConditionStatus:True 53s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning get voting config exclusion; ConditionStatus:True 52s KubeDB Ops-manager Operator get voting config exclusion; ConditionStatus:True + Warning patch pet set; ConditionStatus:True 52s KubeDB Ops-manager Operator patch pet set; ConditionStatus:True + Warning get pod; ConditionStatus:True 48s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 48s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:False 48s KubeDB Ops-manager Operator get pvc; ConditionStatus:False + Warning get pod; ConditionStatus:True 43s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning delete pvc; ConditionStatus:True 43s KubeDB Ops-manager Operator delete pvc; ConditionStatus:True + Warning get pvc; ConditionStatus:True 43s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning create es client; ConditionStatus:True 38s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning delete voting config exclusion; ConditionStatus:True 38s KubeDB Ops-manager Operator delete voting config exclusion; ConditionStatus:True + Normal HorizontalScaleMasterNode 38s KubeDB Ops-manager Operator ScaleDown es-hscale-topology-master nodes + Normal UpdateDatabase 33s KubeDB Ops-manager Operator successfully updated Elasticsearch CR + Normal ResumeDatabase 33s KubeDB Ops-manager Operator Resuming Elasticsearch demo/es-hscale-topology + Normal ResumeDatabase 33s KubeDB Ops-manager Operator Successfully resumed Elasticsearch demo/es-hscale-topology + Normal Successful 33s KubeDB Ops-manager Operator Successfully Horizontally Scaled Database +``` + +Now, we are going to verify the number of replicas this cluster has from the Elasticsearch object, number of pods the petset have, + +```bash +$ kubectl get elasticsearch -n demo es-hscale-topology -o json | jq '.spec.topology.master.replicas' +2 +$ kubectl get elasticsearch -n demo es-hscale-topology -o json | jq '.spec.topology.data.replicas' +2 +$ kubectl get elasticsearch -n demo es-hscale-topology -o json | jq '.spec.topology.ingest.replicas' +2 +``` +From all the above outputs we can see that the replicas of the Topology cluster is `2`. That means we have successfully scaled down the replicas of the Elasticsearch Topology cluster. + +Only one node can be scaling down at a time. So we are scaling down the `ingest` node. +```bash +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: esops-ingest-hscale-down-topology + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: es-hscale-topology + horizontalScaling: + topology: + ingest: 2 +``` + + + +## Scale Up Replicas + +Here, we are going to scale up the replicas of the Topology cluster to meet the desired number of replicas after scaling. + +#### Create ElasticsearchOpsRequest + +In order to scale up the replicas of the Topology cluster, we have to create a `ElasticsearchOpsRequest` CR with our desired replicas. Below is the YAML of the `ElasticsearchOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: esops-hscale-up-topology + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: es-hscale-topology + horizontalScaling: + topology: + master: 3 + ingest: 3 + data: 3 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling operation on `es-hscale-topology` cluster. +- `spec.type` specifies that we are performing `HorizontalScaling` on Elasticsearch. +- `verticalScaling.topology` - specifies the desired node resources for different type of node of the Elasticsearch running in cluster topology mode (ie. `Elasticsearch.spec.topology` is `not empty`). + - `topology.master` - specifies the desired resources for the master nodes. It takes input same as the k8s [resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types). + - `topology.data` - specifies the desired node resources for the data nodes. It takes input same as the k8s [resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types). + - `topology.ingest` - specifies the desired node resources for the ingest nodes. It takes input same as the k8s [resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-types). + +> Note: It is recommended not to use resources below the default one; `cpu: 500m, memory: 1Gi`. + +Let's create the `ElasticsearchOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/scaling/horizontal/Elasticsearch-hscale-up-Topology.yaml +Elasticsearchopsrequest.ops.kubedb.com/esops-hscale-up-topology created +``` + +#### Verify Topology cluster replicas scaled up successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Elasticsearch` object and related `PetSets` and `Pods`. + +Let's wait for `ElasticsearchOpsRequest` to be `Successful`. Run the following command to watch `ElasticsearchOpsRequest` CR, + +```bash +$ kubectl get Elasticsearchopsrequest -n demo +NAME TYPE STATUS AGE +esops-hscale-up-topology HorizontalScaling Successful 13m +``` + +We can see from the above output that the `ElasticsearchOpsRequest` has succeeded. If we describe the `ElasticsearchOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe Elasticsearchopsrequests -n demo esops-hscale-up-topology +Name: esops-hscale-up-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ElasticsearchOpsRequest +Metadata: + Creation Timestamp: 2025-11-17T12:12:44Z + Generation: 1 + Resource Version: 12241 + UID: 5342e779-62bc-4fe1-b91c-21b30c30cd39 +Spec: + Apply: IfReady + Database Ref: + Name: es-hscale-topology + Horizontal Scaling: + Topology: + Data: 3 + Ingest: 3 + Master: 3 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2025-11-17T12:12:44Z + Message: Elasticsearch ops request is horizontally scaling the nodes. + Observed Generation: 1 + Reason: HorizontalScale + Status: True + Type: HorizontalScale + Last Transition Time: 2025-11-17T12:12:52Z + Message: patch pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPetSet + Last Transition Time: 2025-11-17T12:13:58Z + Message: is node in cluster; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IsNodeInCluster + Last Transition Time: 2025-11-17T12:13:12Z + Message: ScaleUp es-hscale-topology-ingest nodes + Observed Generation: 1 + Reason: HorizontalScaleIngestNode + Status: True + Type: HorizontalScaleIngestNode + Last Transition Time: 2025-11-17T12:13:37Z + Message: ScaleUp es-hscale-topology-data nodes + Observed Generation: 1 + Reason: HorizontalScaleDataNode + Status: True + Type: HorizontalScaleDataNode + Last Transition Time: 2025-11-17T12:14:02Z + Message: ScaleUp es-hscale-topology-master nodes + Observed Generation: 1 + Reason: HorizontalScaleMasterNode + Status: True + Type: HorizontalScaleMasterNode + Last Transition Time: 2025-11-17T12:14:07Z + Message: successfully updated Elasticsearch CR + Observed Generation: 1 + Reason: UpdateDatabase + Status: True + Type: UpdateDatabase + Last Transition Time: 2025-11-17T12:14:08Z + Message: Successfully Horizontally Scaled. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 6m15s KubeDB Ops-manager Operator Pausing Elasticsearch demo/es-hscale-topology + Warning patch pet set; ConditionStatus:True 6m7s KubeDB Ops-manager Operator patch pet set; ConditionStatus:True + Warning is node in cluster; ConditionStatus:False 6m2s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:False + Warning is node in cluster; ConditionStatus:True 5m52s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:True + Normal HorizontalScaleIngestNode 5m47s KubeDB Ops-manager Operator ScaleUp es-hscale-topology-ingest nodes + Warning patch pet set; ConditionStatus:True 5m42s KubeDB Ops-manager Operator patch pet set; ConditionStatus:True + Warning is node in cluster; ConditionStatus:False 5m37s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:False + Warning is node in cluster; ConditionStatus:True 5m27s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:True + Normal HorizontalScaleDataNode 5m22s KubeDB Ops-manager Operator ScaleUp es-hscale-topology-data nodes + Warning patch pet set; ConditionStatus:True 5m17s KubeDB Ops-manager Operator patch pet set; ConditionStatus:True + Warning is node in cluster; ConditionStatus:False 5m12s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:False + Warning is node in cluster; ConditionStatus:True 5m1s KubeDB Ops-manager Operator is node in cluster; ConditionStatus:True + Normal HorizontalScaleMasterNode 4m57s KubeDB Ops-manager Operator ScaleUp es-hscale-topology-master nodes + Normal UpdateDatabase 4m52s KubeDB Ops-manager Operator successfully updated Elasticsearch CR + Normal ResumeDatabase 4m52s KubeDB Ops-manager Operator Resuming Elasticsearch demo/es-hscale-topology + Normal ResumeDatabase 4m52s KubeDB Ops-manager Operator Successfully resumed Elasticsearch demo/es-hscale-topology + Normal Successful 4m51s KubeDB Ops-manager Operator Successfully Horizontally Scaled Database +``` + +Now, we are going to verify the number of replicas this cluster has from the Elasticsearch object, number of pods the petset have, + +```bash +$ kubectl get elasticsearch -n demo es-hscale-topology -o json | jq '.spec.topology.master.replicas' +3 +$ kubectl get elasticsearch -n demo es-hscale-topology -o json | jq '.spec.topology.data.replicas' +3 +$ kubectl get elasticsearch -n demo es-hscale-topology -o json | jq '.spec.topology.ingest.replicas' +3 +``` + +From all the above outputs we can see that the brokers of the Topology Elasticsearch is `3`. That means we have successfully scaled up the replicas of the Elasticsearch Topology cluster. + +Only one node can be scaling up at a time. So we are scaling up the `ingest` node. +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: esops-ingest-hscale-up-topology + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: es-hscale-topology + horizontalScaling: + topology: + ingest: 3 +``` + + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete es -n demo es-hscale-topology +kubectl delete Elasticsearchopsrequest -n demo esops-hscale-down-topology,esops-hscale-up-topology,esops-ingest-hscale-up-topology,esops-ingest-hscale-down-topology +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Elasticsearch object](/docs/guides/elasticsearch/concepts/elasticsearch/index.md). +- Different Elasticsearch topology clustering modes [here](/docs/guides/elasticsearch/clustering/_index.md). +- Monitor your Elasticsearch with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/elasticsearch/monitoring/using-prometheus-operator.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/elasticsearch/scaling/vertical/_index.md b/docs/guides/elasticsearch/scaling/vertical/_index.md new file mode 100644 index 0000000000..56102a43f0 --- /dev/null +++ b/docs/guides/elasticsearch/scaling/vertical/_index.md @@ -0,0 +1,10 @@ +--- +title: Elasticsearch Vertical Scaling +menu: + docs_{{ .version }}: + identifier: es-vertical-scalling-elasticsearch + name: Vertical Scaling + parent: es-scalling-elasticsearch + weight: 20 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/elasticsearch/scaling/vertical/combined.md b/docs/guides/elasticsearch/scaling/vertical/combined.md new file mode 100644 index 0000000000..eb45ecf9c8 --- /dev/null +++ b/docs/guides/elasticsearch/scaling/vertical/combined.md @@ -0,0 +1,313 @@ +--- +title: Vertical Scaling Elasticsearch Combined Cluster +menu: + docs_{{ .version }}: + identifier: es-vertical-scaling-combined + name: Combined Cluster + parent: es-vertical-scalling-elasticsearch + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Vertical Scale Elasticsearch Combined Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to update the resources of a Elasticsearch combined cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) + - [Combined](/docs/guides/elasticsearch/clustering/combined-cluster/index.md) + - [ElasticsearchOpsRequest](/docs/guides/elasticsearch/concepts/elasticsearch-ops-request/index.md) + - [Vertical Scaling Overview](/docs/guides/elasticsearch/scaling/vertical/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/elasticsearch](/docs/examples/elasticsearch) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Vertical Scaling on Combined Cluster + +Here, we are going to deploy a `Elasticsearch` combined cluster using a supported version by `KubeDB` operator. Then we are going to apply vertical scaling on it. + +### Prepare Elasticsearch Combined Cluster + +Now, we are going to deploy a `Elasticsearch` combined cluster database with version `xpack-8.11.1`. + +### Deploy Elasticsearch Combined Cluster + +In this section, we are going to deploy a Elasticsearch combined cluster. Then, in the next section we will update the resources of the database using `ElasticsearchOpsRequest` CRD. Below is the YAML of the `Elasticsearch` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-combined + namespace: demo +spec: + version: xpack-8.11.1 + enableSSL: true + replicas: 1 + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut + +``` + +Let's create the `Elasticsearch` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/clustering/multi-node-es.yaml +Elasticsearch.kubedb.com/es-combined created +``` + +Now, wait until `es-combined` has status `Ready`. i.e, + +```bash +$ kubectl get elasticsearch -n demo -w +NAME VERSION STATUS AGE +es-combined xpack-8.11.1 Ready 3h17m + +``` + +Let's check the Pod containers resources, + +```bash +$ kubectl get pod -n demo es-combined-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "1536Mi" + }, + "requests": { + "cpu": "500m", + "memory": "1536Mi" + } +} + +``` +This is the default resources of the Elasticsearch combined cluster set by the `KubeDB` operator. + +We are now ready to apply the `ElasticsearchOpsRequest` CR to update the resources of this database. + +### Vertical Scaling + +Here, we are going to update the resources of the combined cluster to meet the desired resources after scaling. + +#### Create ElasticsearchOpsRequest + +In order to update the resources of the database, we have to create a `ElasticsearchOpsRequest` CR with our desired resources. Below is the YAML of the `ElasticsearchOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: vscale-combined + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: es-combined + verticalScaling: + node: + resources: + limits: + cpu: 1500m + memory: 2Gi + requests: + cpu: 600m + memory: 2Gi + +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `es-combined` cluster. +- `spec.type` specifies that we are performing `VerticalScaling` on Elasticsearch. +- `spec.VerticalScaling.node` specifies the desired resources after scaling. + +Let's create the `ElasticsearchOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/clustering/topology-es.yaml +``` + +#### Verify Elasticsearch Combined cluster resources updated successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the resources of `Elasticsearch` object and related `PetSets` and `Pods`. + +Let's wait for `ElasticsearchOpsRequest` to be `Successful`. Run the following command to watch `ElasticsearchOpsRequest` CR, + +```bash +$ kubectl get elasticsearchopsrequest -n demo +NAME TYPE STATUS AGE +vscale-combined VerticalScaling Successful 2m38s + +``` + +We can see from the above output that the `ElasticsearchOpsRequest` has succeeded. If we describe the `ElasticsearchOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe Elasticsearchopsrequest -n demo vscale-combined +Name: vscale-combined +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ElasticsearchOpsRequest +Metadata: + Creation Timestamp: 2025-11-19T08:55:15Z + Generation: 1 + Resource Version: 66012 + UID: bb814c10-12af-438e-9553-5565120bbdb9 +Spec: + Apply: IfReady + Database Ref: + Name: es-combined + Type: VerticalScaling + Vertical Scaling: + Node: + Resources: + Limits: + Cpu: 1500m + Memory: 2Gi + Requests: + Cpu: 600m + Memory: 2Gi +Status: + Conditions: + Last Transition Time: 2025-11-19T08:55:15Z + Message: Elasticsearch ops request is vertically scaling the nodes + Observed Generation: 1 + Reason: VerticalScale + Status: True + Type: VerticalScale + Last Transition Time: 2025-11-19T08:55:27Z + Message: successfully reconciled the Elasticsearch resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2025-11-19T08:55:32Z + Message: pod exists; ConditionStatus:True; PodName:es-combined-0 + Observed Generation: 1 + Status: True + Type: PodExists--es-combined-0 + Last Transition Time: 2025-11-19T08:55:32Z + Message: create es client; ConditionStatus:True; PodName:es-combined-0 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-combined-0 + Last Transition Time: 2025-11-19T08:55:32Z + Message: disable shard allocation; ConditionStatus:True; PodName:es-combined-0 + Observed Generation: 1 + Status: True + Type: DisableShardAllocation--es-combined-0 + Last Transition Time: 2025-11-19T08:55:32Z + Message: evict pod; ConditionStatus:True; PodName:es-combined-0 + Observed Generation: 1 + Status: True + Type: EvictPod--es-combined-0 + Last Transition Time: 2025-11-19T08:55:57Z + Message: create es client; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreateEsClient + Last Transition Time: 2025-11-19T08:55:57Z + Message: re enable shard allocation; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: ReEnableShardAllocation + Last Transition Time: 2025-11-19T08:56:02Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2025-11-19T08:56:07Z + Message: successfully updated Elasticsearch CR + Observed Generation: 1 + Reason: UpdateDatabase + Status: True + Type: UpdateDatabase + Last Transition Time: 2025-11-19T08:56:07Z + Message: Successfully completed the modification process. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 2m6s KubeDB Ops-manager Operator Pausing Elasticsearch demo/es-combined + Normal UpdatePetSets 114s KubeDB Ops-manager Operator successfully reconciled the Elasticsearch resources + Warning pod exists; ConditionStatus:True; PodName:es-combined-0 109s KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-combined-0 + Warning create es client; ConditionStatus:True; PodName:es-combined-0 109s KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-combined-0 + Warning disable shard allocation; ConditionStatus:True; PodName:es-combined-0 109s KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-combined-0 + Warning evict pod; ConditionStatus:True; PodName:es-combined-0 109s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-combined-0 + Warning create es client; ConditionStatus:False 104s KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 84s KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 84s KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Normal RestartNodes 79s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal UpdateDatabase 74s KubeDB Ops-manager Operator successfully updated Elasticsearch CR + Normal ResumeDatabase 74s KubeDB Ops-manager Operator Resuming Elasticsearch demo/es-combined + Normal ResumeDatabase 74s KubeDB Ops-manager Operator Successfully resumed Elasticsearch demo/es-combined + Normal Successful 74s KubeDB Ops-manager Operator Successfully Updated Database + +``` + +Now, we are going to verify from one of the Pod yaml whether the resources of the combined cluster has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get pod -n demo es-combined-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "1500m", + "memory": "2Gi" + }, + "requests": { + "cpu": "600m", + "memory": "2Gi" + } +} + +``` + +The above output verifies that we have successfully scaled up the resources of the Elasticsearch combined cluster. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete mg -n demo es-combined +kubectl delete Elasticsearchopsrequest -n demo vscale-combined +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Elasticsearch object](/docs/guides/elasticsearch/concepts/elasticsearch/index.md). +- Different Elasticsearch topology clustering modes [here](/docs/guides/elasticsearch/clustering/_index.md). +- Monitor your Elasticsearch database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/elasticsearch/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Elasticsearch database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/elasticsearch/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/elasticsearch/scaling/vertical/overview.md b/docs/guides/elasticsearch/scaling/vertical/overview.md new file mode 100644 index 0000000000..91d482aded --- /dev/null +++ b/docs/guides/elasticsearch/scaling/vertical/overview.md @@ -0,0 +1,54 @@ +--- +title: Elasticsearch Vertical Scaling Overview +menu: + docs_{{ .version }} + identifier: es-vertical-scalling-overview + name: Overview + parent: es-vertical-scalling-elasticsearch + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Elasticsearch Vertical Scaling + +This guide will give an overview on how KubeDB Ops-manager operator updates the resources(for example CPU and Memory etc.) of the `Elasticsearch`. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: +- [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) +- [ElasticsearchOpsRequest](/docs/guides/elasticsearch/concepts/elasticsearch-ops-request/index.md) + +## How Vertical Scaling Process Works + +The following diagram shows how KubeDB Ops-manager operator updates the resources of the `Elasticsearch`. Open the image in a new tab to see the enlarged version. + +
+   Vertical scaling process of Elasticsearch +
Fig: Vertical scaling process of Elasticsearch
+
+ +The vertical scaling process consists of the following steps: + +1. At first, a user creates a `Elasticsearch` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `Elasticsearch` CR. + +3. When the operator finds a `Elasticsearch` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to update the resources(for example `CPU`, `Memory` etc.) of the `Elasticsearch` cluster, the user creates a `ElasticsearchOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `ElasticsearchOpsRequest` CR. + +6. When it finds a `ElasticsearchOpsRequest` CR, it halts the `Elasticsearch` object which is referred from the `ElasticsearchOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `Elasticsearch` object during the vertical scaling process. + +7. Then the `KubeDB` Ops-manager operator will update resources of the PetSet Pods to reach desired state. + +8. After the successful update of the resources of the PetSet's replica, the `KubeDB` Ops-manager operator updates the `Elasticsearch` object to reflect the updated state. + +9. After the successful update of the `Elasticsearch` resources, the `KubeDB` Ops-manager operator resumes the `Elasticsearch` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the next docs, we are going to show a step by step guide on updating resources of Elasticsearch database using `ElasticsearchOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/elasticsearch/scaling/vertical/topology.md b/docs/guides/elasticsearch/scaling/vertical/topology.md new file mode 100644 index 0000000000..43d7b6efa9 --- /dev/null +++ b/docs/guides/elasticsearch/scaling/vertical/topology.md @@ -0,0 +1,692 @@ +--- +title: Vertical Scaling Elasticsearch Topology Cluster +menu: + docs_{{ .version }}: + identifier: es-vertical-scaling-topology + name: Topology Cluster + parent: es-vertical-scalling-elasticsearch + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Vertical Scale Elasticsearch Topology Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to update the resources of a Elasticsearch topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Elasticsearch](/docs/guides/Elasticsearch/concepts/Elasticsearch.md) + - [Topology](/docs/guides/Elasticsearch/clustering/topology-cluster/index.md) + - [ElasticsearchOpsRequest](/docs/guides/Elasticsearch/concepts/elasticsearch-ops-request.md) + - [Vertical Scaling Overview](/docs/guides/Elasticsearch/scaling/vertical-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/Elasticsearch](/docs/examples/elasticsearch) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Vertical Scaling on Topology Cluster + +Here, we are going to deploy a `Elasticsearch` topology cluster using a supported version by `KubeDB` operator. Then we are going to apply vertical scaling on it. + +### Prepare Elasticsearch Topology Cluster + +Now, we are going to deploy a `Elasticsearch` topology cluster database with version `xpack-8.11.1`. + +### Deploy Elasticsearch Topology Cluster + +In this section, we are going to deploy a Elasticsearch topology cluster. Then, in the next section we will update the resources of the database using `ElasticsearchOpsRequest` CRD. Below is the YAML of the `Elasticsearch` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-cluster + namespace: demo +spec: + enableSSL: true + version: xpack-8.11.1 + storageType: Durable + topology: + master: + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + ingest: + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Let's create the `Elasticsearch` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/scaling/Elasticsearch-topology.yaml +Elasticsearch.kubedb.com/es-cluster created +``` + +Now, wait until `es-cluster` has status `Ready`. i.e, + +```bash +$ kubectl get es -n demo -w +NAME VERSION STATUS AGE +es-cluster xpack-8.11.1 Ready 53m + +``` + +Let's check the Pod containers resources for both `data`,`ingest` and `master` of the Elasticsearch topology cluster. Run the following command to get the resources of the `broker` and `controller` containers of the Elasticsearch topology cluster + +```bash +$ kubectl get pod -n demo es-cluster-data-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "1536Mi" + }, + "requests": { + "cpu": "500m", + "memory": "1536Mi" + } +} +$ kubectl get pod -n demo es-cluster-ingest-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "1536Mi" + }, + "requests": { + "cpu": "500m", + "memory": "1536Mi" + } +} + +$ kubectl get pod -n demo es-cluster-master-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "1536Mi" + }, + "requests": { + "cpu": "500m", + "memory": "1536Mi" + } +} + +``` +This is the default resources of the Elasticsearch topology cluster set by the `KubeDB` operator. + +We are now ready to apply the `ElasticsearchOpsRequest` CR to update the resources of this database. + +### Vertical Scaling + +Here, we are going to update the resources of the topology cluster to meet the desired resources after scaling. + +#### Create ElasticsearchOpsRequest + +In order to update the resources of the database, we have to create a `ElasticsearchOpsRequest` CR with our desired resources. Below is the YAML of the `ElasticsearchOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: vscale-topology + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: es-cluster + verticalScaling: + master: + resources: + limits: + cpu: 750m + memory: 800Mi + data: + resources: + requests: + cpu: 760m + memory: 900Mi + ingest: + resources: + limits: + cpu: 900m + memory: 1.2Gi + requests: + cpu: 800m + memory: 1Gi +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `es-cluster` cluster. +- `spec.type` specifies that we are performing `VerticalScaling` on Elasticsearch. +- `spec.VerticalScaling.node` specifies the desired resources after scaling. + +Let's create the `ElasticsearchOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/scaling/vertical/Elasticsearch-vertical-scaling-topology.yaml +Elasticsearchopsrequest.ops.kubedb.com/vscale-topology created +``` + +#### Verify Elasticsearch Topology cluster resources updated successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the resources of `Elasticsearch` object and related `PetSets` and `Pods`. + +Let's wait for `ElasticsearchOpsRequest` to be `Successful`. Run the following command to watch `ElasticsearchOpsRequest` CR, + +```bash +$ kubectl get elasticsearchopsrequest -n demo +NAME TYPE STATUS AGE +vscale-topology VerticalScaling Successful 18m + +``` + +We can see from the above output that the `ElasticsearchOpsRequest` has succeeded. If we describe the `ElasticsearchOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe Elasticsearchopsrequest -n demo vscale-topology +Name: vscale-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ElasticsearchOpsRequest +Metadata: + Creation Timestamp: 2025-11-19T11:55:28Z + Generation: 1 + Resource Version: 71748 + UID: be8b4117-90d3-4122-8705-993ce8621635 +Spec: + Apply: IfReady + Database Ref: + Name: es-cluster + Type: VerticalScaling + Vertical Scaling: + Data: + Resources: + Requests: + Cpu: 760m + Memory: 900Mi + Ingest: + Resources: + Limits: + Cpu: 900m + Memory: 1.2Gi + Requests: + Cpu: 800m + Memory: 1Gi + Master: + Resources: + Limits: + Cpu: 750m + Memory: 800Mi +Status: + Conditions: + Last Transition Time: 2025-11-19T11:55:29Z + Message: Elasticsearch ops request is vertically scaling the nodes + Observed Generation: 1 + Reason: VerticalScale + Status: True + Type: VerticalScale + Last Transition Time: 2025-11-19T11:55:50Z + Message: successfully reconciled the Elasticsearch resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2025-11-19T11:55:55Z + Message: pod exists; ConditionStatus:True; PodName:es-cluster-ingest-0 + Observed Generation: 1 + Status: True + Type: PodExists--es-cluster-ingest-0 + Last Transition Time: 2025-11-19T11:55:55Z + Message: create es client; ConditionStatus:True; PodName:es-cluster-ingest-0 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-cluster-ingest-0 + Last Transition Time: 2025-11-19T11:55:55Z + Message: disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-0 + Observed Generation: 1 + Status: True + Type: DisableShardAllocation--es-cluster-ingest-0 + Last Transition Time: 2025-11-19T11:56:50Z + Message: evict pod; ConditionStatus:True; PodName:es-cluster-ingest-0 + Observed Generation: 1 + Status: True + Type: EvictPod--es-cluster-ingest-0 + Last Transition Time: 2025-11-19T12:03:25Z + Message: create es client; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreateEsClient + Last Transition Time: 2025-11-19T11:56:35Z + Message: re enable shard allocation; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: ReEnableShardAllocation + Last Transition Time: 2025-11-19T11:56:40Z + Message: pod exists; ConditionStatus:True; PodName:es-cluster-ingest-1 + Observed Generation: 1 + Status: True + Type: PodExists--es-cluster-ingest-1 + Last Transition Time: 2025-11-19T11:56:40Z + Message: create es client; ConditionStatus:True; PodName:es-cluster-ingest-1 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-cluster-ingest-1 + Last Transition Time: 2025-11-19T11:56:40Z + Message: disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-1 + Observed Generation: 1 + Status: True + Type: DisableShardAllocation--es-cluster-ingest-1 + Last Transition Time: 2025-11-19T11:57:35Z + Message: evict pod; ConditionStatus:True; PodName:es-cluster-ingest-1 + Observed Generation: 1 + Status: True + Type: EvictPod--es-cluster-ingest-1 + Last Transition Time: 2025-11-19T11:57:25Z + Message: pod exists; ConditionStatus:True; PodName:es-cluster-ingest-2 + Observed Generation: 1 + Status: True + Type: PodExists--es-cluster-ingest-2 + Last Transition Time: 2025-11-19T11:57:25Z + Message: create es client; ConditionStatus:True; PodName:es-cluster-ingest-2 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-cluster-ingest-2 + Last Transition Time: 2025-11-19T11:57:25Z + Message: disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-2 + Observed Generation: 1 + Status: True + Type: DisableShardAllocation--es-cluster-ingest-2 + Last Transition Time: 2025-11-19T11:57:25Z + Message: evict pod; ConditionStatus:True; PodName:es-cluster-ingest-2 + Observed Generation: 1 + Status: True + Type: EvictPod--es-cluster-ingest-2 + Last Transition Time: 2025-11-19T11:58:10Z + Message: pod exists; ConditionStatus:True; PodName:es-cluster-data-0 + Observed Generation: 1 + Status: True + Type: PodExists--es-cluster-data-0 + Last Transition Time: 2025-11-19T11:58:10Z + Message: create es client; ConditionStatus:True; PodName:es-cluster-data-0 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-cluster-data-0 + Last Transition Time: 2025-11-19T11:58:10Z + Message: disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-0 + Observed Generation: 1 + Status: True + Type: DisableShardAllocation--es-cluster-data-0 + Last Transition Time: 2025-11-19T11:59:10Z + Message: evict pod; ConditionStatus:True; PodName:es-cluster-data-0 + Observed Generation: 1 + Status: True + Type: EvictPod--es-cluster-data-0 + Last Transition Time: 2025-11-19T11:58:35Z + Message: pod exists; ConditionStatus:True; PodName:es-cluster-data-1 + Observed Generation: 1 + Status: True + Type: PodExists--es-cluster-data-1 + Last Transition Time: 2025-11-19T11:58:35Z + Message: create es client; ConditionStatus:True; PodName:es-cluster-data-1 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-cluster-data-1 + Last Transition Time: 2025-11-19T11:58:35Z + Message: disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-1 + Observed Generation: 1 + Status: True + Type: DisableShardAllocation--es-cluster-data-1 + Last Transition Time: 2025-11-19T11:58:35Z + Message: evict pod; ConditionStatus:True; PodName:es-cluster-data-1 + Observed Generation: 1 + Status: True + Type: EvictPod--es-cluster-data-1 + Last Transition Time: 2025-11-19T11:59:00Z + Message: pod exists; ConditionStatus:True; PodName:es-cluster-data-2 + Observed Generation: 1 + Status: True + Type: PodExists--es-cluster-data-2 + Last Transition Time: 2025-11-19T11:59:00Z + Message: create es client; ConditionStatus:True; PodName:es-cluster-data-2 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-cluster-data-2 + Last Transition Time: 2025-11-19T11:59:00Z + Message: disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-2 + Observed Generation: 1 + Status: True + Type: DisableShardAllocation--es-cluster-data-2 + Last Transition Time: 2025-11-19T11:59:00Z + Message: evict pod; ConditionStatus:True; PodName:es-cluster-data-2 + Observed Generation: 1 + Status: True + Type: EvictPod--es-cluster-data-2 + Last Transition Time: 2025-11-19T11:59:25Z + Message: pod exists; ConditionStatus:True; PodName:es-cluster-master-0 + Observed Generation: 1 + Status: True + Type: PodExists--es-cluster-master-0 + Last Transition Time: 2025-11-19T11:59:25Z + Message: create es client; ConditionStatus:True; PodName:es-cluster-master-0 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-cluster-master-0 + Last Transition Time: 2025-11-19T11:59:25Z + Message: disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-0 + Observed Generation: 1 + Status: True + Type: DisableShardAllocation--es-cluster-master-0 + Last Transition Time: 2025-11-19T12:00:25Z + Message: evict pod; ConditionStatus:True; PodName:es-cluster-master-0 + Observed Generation: 1 + Status: True + Type: EvictPod--es-cluster-master-0 + Last Transition Time: 2025-11-19T12:00:15Z + Message: pod exists; ConditionStatus:True; PodName:es-cluster-master-1 + Observed Generation: 1 + Status: True + Type: PodExists--es-cluster-master-1 + Last Transition Time: 2025-11-19T12:00:15Z + Message: create es client; ConditionStatus:True; PodName:es-cluster-master-1 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-cluster-master-1 + Last Transition Time: 2025-11-19T12:00:15Z + Message: disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-1 + Observed Generation: 1 + Status: True + Type: DisableShardAllocation--es-cluster-master-1 + Last Transition Time: 2025-11-19T12:00:15Z + Message: evict pod; ConditionStatus:True; PodName:es-cluster-master-1 + Observed Generation: 1 + Status: True + Type: EvictPod--es-cluster-master-1 + Last Transition Time: 2025-11-19T12:01:05Z + Message: pod exists; ConditionStatus:True; PodName:es-cluster-master-2 + Observed Generation: 1 + Status: True + Type: PodExists--es-cluster-master-2 + Last Transition Time: 2025-11-19T12:01:05Z + Message: create es client; ConditionStatus:True; PodName:es-cluster-master-2 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-cluster-master-2 + Last Transition Time: 2025-11-19T12:01:05Z + Message: disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-2 + Observed Generation: 1 + Status: True + Type: DisableShardAllocation--es-cluster-master-2 + Last Transition Time: 2025-11-19T12:01:05Z + Message: evict pod; ConditionStatus:True; PodName:es-cluster-master-2 + Observed Generation: 1 + Status: True + Type: EvictPod--es-cluster-master-2 + Last Transition Time: 2025-11-19T12:02:10Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2025-11-19T12:02:15Z + Message: successfully updated Elasticsearch CR + Observed Generation: 1 + Reason: UpdateDatabase + Status: True + Type: UpdateDatabase + Last Transition Time: 2025-11-19T12:02:15Z + Message: Successfully completed the modification process. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 19m KubeDB Ops-manager Operator Pausing Elasticsearch demo/es-cluster + Normal UpdatePetSets 19m KubeDB Ops-manager Operator successfully reconciled the Elasticsearch resources + Warning pod exists; ConditionStatus:True; PodName:es-cluster-ingest-0 19m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-ingest-0 + Warning create es client; ConditionStatus:True; PodName:es-cluster-ingest-0 19m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-ingest-0 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-0 19m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-0 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-ingest-0 19m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-ingest-0 + Warning create es client; ConditionStatus:False 19m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Normal UpdatePetSets 19m KubeDB Ops-manager Operator successfully reconciled the Elasticsearch resources + Normal UpdatePetSets 19m KubeDB Ops-manager Operator successfully reconciled the Elasticsearch resources + Normal UpdatePetSets 18m KubeDB Ops-manager Operator successfully reconciled the Elasticsearch resources + Warning create es client; ConditionStatus:True 18m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 18m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-cluster-ingest-1 18m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-ingest-1 + Warning create es client; ConditionStatus:True; PodName:es-cluster-ingest-1 18m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-ingest-1 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-1 18m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-1 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-ingest-1 18m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-ingest-1 + Warning pod exists; ConditionStatus:True; PodName:es-cluster-ingest-0 18m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-ingest-0 + Warning create es client; ConditionStatus:True; PodName:es-cluster-ingest-0 18m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-ingest-0 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-0 18m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-0 + Warning evict pod; ConditionStatus:False; PodName:es-cluster-ingest-0 18m KubeDB Ops-manager Operator evict pod; ConditionStatus:False; PodName:es-cluster-ingest-0 + Warning create es client; ConditionStatus:False 18m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning pod exists; ConditionStatus:True; PodName:es-cluster-ingest-0 18m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-ingest-0 + Warning create es client; ConditionStatus:True; PodName:es-cluster-ingest-0 18m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-ingest-0 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-0 18m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-0 + Warning pod exists; ConditionStatus:True; PodName:es-cluster-ingest-0 18m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-ingest-0 + Warning create es client; ConditionStatus:True; PodName:es-cluster-ingest-0 18m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-ingest-0 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-0 18m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-0 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-ingest-0 18m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-ingest-0 + Warning create es client; ConditionStatus:False 18m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 17m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 17m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-cluster-ingest-2 17m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-ingest-2 + Warning create es client; ConditionStatus:True; PodName:es-cluster-ingest-2 17m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-ingest-2 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-2 17m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-2 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-ingest-2 17m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-ingest-2 + Warning create es client; ConditionStatus:True 17m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 17m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning create es client; ConditionStatus:False 17m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning pod exists; ConditionStatus:True; PodName:es-cluster-ingest-1 17m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-ingest-1 + Warning create es client; ConditionStatus:True; PodName:es-cluster-ingest-1 17m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-ingest-1 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-1 17m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-1 + Warning evict pod; ConditionStatus:False; PodName:es-cluster-ingest-1 17m KubeDB Ops-manager Operator evict pod; ConditionStatus:False; PodName:es-cluster-ingest-1 + Warning pod exists; ConditionStatus:True; PodName:es-cluster-ingest-1 17m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-ingest-1 + Warning create es client; ConditionStatus:True; PodName:es-cluster-ingest-1 17m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-ingest-1 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-1 17m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-1 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-ingest-1 17m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-ingest-1 + Warning create es client; ConditionStatus:False 17m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 17m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 17m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-cluster-data-0 17m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-data-0 + Warning create es client; ConditionStatus:True; PodName:es-cluster-data-0 17m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-data-0 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-0 17m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-0 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-data-0 17m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-data-0 + Warning create es client; ConditionStatus:False 17m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 17m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 17m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-cluster-ingest-2 16m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-ingest-2 + Warning create es client; ConditionStatus:True; PodName:es-cluster-ingest-2 16m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-ingest-2 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-2 16m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-ingest-2 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-ingest-2 16m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-ingest-2 + Warning create es client; ConditionStatus:False 16m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 16m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 16m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-cluster-data-1 16m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-data-1 + Warning create es client; ConditionStatus:True; PodName:es-cluster-data-1 16m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-data-1 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-1 16m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-1 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-data-1 16m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-data-1 + Warning create es client; ConditionStatus:False 16m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 16m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 16m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-cluster-data-2 16m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-data-2 + Warning create es client; ConditionStatus:True; PodName:es-cluster-data-2 16m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-data-2 + Warning create es client; ConditionStatus:True 16m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-2 16m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-2 + Warning re enable shard allocation; ConditionStatus:True 16m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning evict pod; ConditionStatus:True; PodName:es-cluster-data-2 16m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-data-2 + Warning create es client; ConditionStatus:False 16m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning pod exists; ConditionStatus:True; PodName:es-cluster-data-0 16m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-data-0 + Warning create es client; ConditionStatus:True; PodName:es-cluster-data-0 16m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-data-0 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-0 16m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-0 + Warning evict pod; ConditionStatus:False; PodName:es-cluster-data-0 16m KubeDB Ops-manager Operator evict pod; ConditionStatus:False; PodName:es-cluster-data-0 + Warning pod exists; ConditionStatus:True; PodName:es-cluster-data-0 16m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-data-0 + Warning create es client; ConditionStatus:True; PodName:es-cluster-data-0 16m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-data-0 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-0 16m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-0 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-data-0 16m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-data-0 + Warning create es client; ConditionStatus:False 16m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 15m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 15m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-cluster-master-0 15m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-master-0 + Warning create es client; ConditionStatus:True; PodName:es-cluster-master-0 15m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-master-0 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-0 15m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-0 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-master-0 15m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-master-0 + Warning create es client; ConditionStatus:False 15m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 15m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 15m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-cluster-data-1 15m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-data-1 + Warning create es client; ConditionStatus:True; PodName:es-cluster-data-1 15m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-data-1 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-1 15m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-1 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-data-1 15m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-data-1 + Warning create es client; ConditionStatus:False 15m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 15m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 15m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-cluster-data-2 15m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-data-2 + Warning create es client; ConditionStatus:True; PodName:es-cluster-data-2 15m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-data-2 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-2 15m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-data-2 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-data-2 15m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-data-2 + Warning create es client; ConditionStatus:False 15m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 15m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 15m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-cluster-master-1 15m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-master-1 + Warning create es client; ConditionStatus:True; PodName:es-cluster-master-1 15m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-master-1 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-1 15m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-1 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-master-1 15m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-master-1 + Warning create es client; ConditionStatus:True 15m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 15m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning create es client; ConditionStatus:False 14m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning pod exists; ConditionStatus:True; PodName:es-cluster-master-0 14m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-master-0 + Warning create es client; ConditionStatus:True; PodName:es-cluster-master-0 14m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-master-0 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-0 14m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-0 + Warning evict pod; ConditionStatus:False; PodName:es-cluster-master-0 14m KubeDB Ops-manager Operator evict pod; ConditionStatus:False; PodName:es-cluster-master-0 + Warning pod exists; ConditionStatus:True; PodName:es-cluster-master-0 14m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-master-0 + Warning create es client; ConditionStatus:True; PodName:es-cluster-master-0 14m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-master-0 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-0 14m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-0 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-master-0 14m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-master-0 + Warning create es client; ConditionStatus:False 14m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 14m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 14m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-cluster-master-2 14m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-master-2 + Warning create es client; ConditionStatus:True; PodName:es-cluster-master-2 14m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-master-2 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-2 14m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-2 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-master-2 14m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-master-2 + Warning create es client; ConditionStatus:False 14m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 13m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 13m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-cluster-master-1 13m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-master-1 + Warning create es client; ConditionStatus:True; PodName:es-cluster-master-1 13m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-master-1 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-1 13m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-1 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-master-1 13m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-master-1 + Warning create es client; ConditionStatus:False 13m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 13m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 13m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Normal RestartNodes 13m KubeDB Ops-manager Operator Successfully restarted all nodes + Normal UpdateDatabase 13m KubeDB Ops-manager Operator successfully updated Elasticsearch CR + Normal ResumeDatabase 13m KubeDB Ops-manager Operator Resuming Elasticsearch demo/es-cluster + Normal ResumeDatabase 13m KubeDB Ops-manager Operator Successfully resumed Elasticsearch demo/es-cluster + Normal Successful 13m KubeDB Ops-manager Operator Successfully Updated Database + Warning create es client; ConditionStatus:True 12m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 12m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-cluster-master-2 12m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-cluster-master-2 + Warning create es client; ConditionStatus:True; PodName:es-cluster-master-2 12m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-cluster-master-2 + Warning disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-2 12m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-cluster-master-2 + Warning evict pod; ConditionStatus:True; PodName:es-cluster-master-2 12m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-cluster-master-2 + Warning create es client; ConditionStatus:False 12m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 11m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 11m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Normal RestartNodes 11m KubeDB Ops-manager Operator Successfully restarted all nodes + +``` +Now, we are going to verify from one of the Pod yaml whether the resources of the topology cluster has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get pod -n demo es-cluster-ingest-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "900m", + "memory": "1288490188800m" + }, + "requests": { + "cpu": "800m", + "memory": "1Gi" + } +} +$ kubectl get pod -n demo es-cluster-data-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "900Mi" + }, + "requests": { + "cpu": "760m", + "memory": "900Mi" + } +} +$ kubectl get pod -n demo es-cluster-master-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "750m", + "memory": "800Mi" + }, + "requests": { + "cpu": "750m", + "memory": "800Mi" + } +} + +``` + +The above output verifies that we have successfully scaled up the resources of the Elasticsearch topology cluster. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete es -n demo es-cluster +kubectl delete Elasticsearchopsrequest -n demo vscale-topology +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Elasticsearch object](/docs/guides/Elasticsearch/concepts/Elasticsearch.md). +- Different Elasticsearch topology clustering modes [here](/docs/guides/Elasticsearch/clustering/_index.md). +- Monitor your Elasticsearch database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/Elasticsearch/monitoring/using-prometheus-operator.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/elasticsearch/update-version/_index.md b/docs/guides/elasticsearch/update-version/_index.md new file mode 100644 index 0000000000..f06ae940b7 --- /dev/null +++ b/docs/guides/elasticsearch/update-version/_index.md @@ -0,0 +1,10 @@ +--- +title: Elasticsearch Update Version +menu: + docs_{{ .version }}: + identifier: es-updateversion-elasticsearch + name: Update Version + parent: es-elasticsearch-guides + weight: 95 +menu_name: docs_{{ .version }} +--- diff --git a/docs/guides/elasticsearch/update-version/elasticsearch.md b/docs/guides/elasticsearch/update-version/elasticsearch.md new file mode 100644 index 0000000000..9c5979bcaa --- /dev/null +++ b/docs/guides/elasticsearch/update-version/elasticsearch.md @@ -0,0 +1,322 @@ +--- +title: Update Version Elasticsearch +menu: + docs_{{ .version }}: + identifier: es-updateversion-Elasticsearch + name: Elasticsearch + parent: es-updateversion-elasticsearch + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Update version of Elasticsearch + +This guide will show you how to use `KubeDB` Ops-manager operator to update the version of `Elasticsearch` Combined or Topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) + - [ElasticsearchOpsRequest](/docs/guides/elasticsearch/concepts/elasticsearch-ops-request/index.md) + - [Updating Overview](/docs/guides/elasticsearch/update-version/elasticsearch.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/elasticsearch](/docs/examples/elasticsearch) directory of [kubedb/docs](https://github.com/kube/docs) repository. + +## Prepare Elasticsearch + +Now, we are going to deploy a `Elasticsearch` replicaset database with version `xpack-9.1.3`. + +### Deploy Elasticsearch + +In this section, we are going to deploy a Elasticsearch topology cluster. Then, in the next section we will update the version using `ElasticsearchOpsRequest` CRD. Below is the YAML of the `Elasticsearch` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-demo + namespace: demo +spec: + deletionPolicy: Delete + enableSSL: true + replicas: 3 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + version: xpack-9.1.3 + +``` + +Let's create the `Elasticsearch` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/update-version/Elasticsearch.yaml +Elasticsearch.kubedb.com/es-demo created +``` + +Now, wait until `es-demo` created has status `Ready`. i.e, + +```bash +$ kubectl get es -n demo +NAME VERSION STATUS AGE +es-demo xpack-9.1.3 Ready 9m10s + +``` + +We are now ready to apply the `ElasticsearchOpsRequest` CR to update. + +### update Elasticsearch Version + +Here, we are going to update `Elasticsearch` from `xpack-9.1.3` to `xpack-9.1.4`. + +#### Create ElasticsearchOpsRequest: + +In order to update the version, we have to create a `ElasticsearchOpsRequest` CR with your desired version that is supported by `KubeDB`. Below is the YAML of the `ElasticsearchOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: es-demo-update + namespace: demo +spec: + type: UpdateVersion + databaseRef: + name: es-demo + updateVersion: + targetVersion: xpack-9.1.4 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing operation on `es-demo` Elasticsearch. +- `spec.type` specifies that we are going to perform `UpdateVersion` on our database. +- `spec.updateVersion.targetVersion` specifies the expected version of the database `xpack-9.1.4`. + +> **Note:** If you want to update combined Elasticsearch, you just refer to the `Elasticsearch` combined object name in `spec.databaseRef.name`. To create a combined Elasticsearch, you can refer to the [Elasticsearch Combined](/docs/guides/elasticsearch/clustering/combined-cluster/index.md) guide. + +Let's create the `ElasticsearchOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/update-version/update-version.yaml +Elasticsearchopsrequest.ops.kubedb.com/Elasticsearch-update-version created +``` + +#### Verify Elasticsearch version updated successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the image of `Elasticsearch` object and related `PetSets` and `Pods`. + +Let's wait for `ElasticsearchOpsRequest` to be `Successful`. Run the following command to watch `ElasticsearchOpsRequest` CR, + +```bash +$ kubectl get Elasticsearchopsrequest -n demo +NAME TYPE STATUS AGE +Elasticsearch-update-version UpdateVersion Successful 2m6s +``` + +We can see from the above output that the `ElasticsearchOpsRequest` has succeeded. If we describe the `ElasticsearchOpsRequest` we will get an overview of the steps that were followed to update the database version. + +```bash +$ kubectl describe Elasticsearchopsrequest -n demo es-demo-update +Name: es-demo-update +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ElasticsearchOpsRequest +Metadata: + Creation Timestamp: 2025-11-06T05:19:15Z + Generation: 1 + Resource Version: 609353 + UID: 722d8557-a6c6-4412-87d4-61faee8a3be2 +Spec: + Apply: IfReady + Database Ref: + Name: es-demo + Type: UpdateVersion + Update Version: + Target Version: xpack-9.1.4 +Status: + Conditions: + Last Transition Time: 2025-11-06T05:19:15Z + Message: Elasticsearch ops request is updating database version + Observed Generation: 1 + Reason: UpdateVersion + Status: True + Type: UpdateVersion + Last Transition Time: 2025-11-06T05:19:18Z + Message: Successfully updated PetSets + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2025-11-06T05:19:23Z + Message: pod exists; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: PodExists--es-demo-0 + Last Transition Time: 2025-11-06T05:19:23Z + Message: create es client; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-demo-0 + Last Transition Time: 2025-11-06T05:19:23Z + Message: disable shard allocation; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: DisableShardAllocation--es-demo-0 + Last Transition Time: 2025-11-06T05:19:23Z + Message: evict pod; ConditionStatus:True; PodName:es-demo-0 + Observed Generation: 1 + Status: True + Type: EvictPod--es-demo-0 + Last Transition Time: 2025-11-06T05:21:03Z + Message: create es client; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreateEsClient + Last Transition Time: 2025-11-06T05:19:58Z + Message: re enable shard allocation; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: ReEnableShardAllocation + Last Transition Time: 2025-11-06T05:20:03Z + Message: pod exists; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: PodExists--es-demo-1 + Last Transition Time: 2025-11-06T05:20:03Z + Message: create es client; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-demo-1 + Last Transition Time: 2025-11-06T05:20:03Z + Message: disable shard allocation; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: DisableShardAllocation--es-demo-1 + Last Transition Time: 2025-11-06T05:20:03Z + Message: evict pod; ConditionStatus:True; PodName:es-demo-1 + Observed Generation: 1 + Status: True + Type: EvictPod--es-demo-1 + Last Transition Time: 2025-11-06T05:20:33Z + Message: pod exists; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: PodExists--es-demo-2 + Last Transition Time: 2025-11-06T05:20:33Z + Message: create es client; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: CreateEsClient--es-demo-2 + Last Transition Time: 2025-11-06T05:20:33Z + Message: disable shard allocation; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: DisableShardAllocation--es-demo-2 + Last Transition Time: 2025-11-06T05:20:33Z + Message: evict pod; ConditionStatus:True; PodName:es-demo-2 + Observed Generation: 1 + Status: True + Type: EvictPod--es-demo-2 + Last Transition Time: 2025-11-06T05:21:08Z + Message: Successfully updated all nodes + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2025-11-06T05:21:08Z + Message: Successfully completed the modification process. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 29m KubeDB Ops-manager Operator Pausing Elasticsearch demo/es-demo + Warning pod exists; ConditionStatus:True; PodName:es-demo-0 29m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-demo-0 + Warning create es client; ConditionStatus:True; PodName:es-demo-0 29m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-demo-0 + Warning disable shard allocation; ConditionStatus:True; PodName:es-demo-0 29m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-demo-0 + Warning evict pod; ConditionStatus:True; PodName:es-demo-0 29m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-demo-0 + Warning create es client; ConditionStatus:False 29m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 29m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 29m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-demo-1 29m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-demo-1 + Warning create es client; ConditionStatus:True; PodName:es-demo-1 29m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-demo-1 + Warning disable shard allocation; ConditionStatus:True; PodName:es-demo-1 29m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-demo-1 + Warning evict pod; ConditionStatus:True; PodName:es-demo-1 29m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-demo-1 + Warning create es client; ConditionStatus:False 29m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 28m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 28m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Warning pod exists; ConditionStatus:True; PodName:es-demo-2 28m KubeDB Ops-manager Operator pod exists; ConditionStatus:True; PodName:es-demo-2 + Warning create es client; ConditionStatus:True; PodName:es-demo-2 28m KubeDB Ops-manager Operator create es client; ConditionStatus:True; PodName:es-demo-2 + Warning disable shard allocation; ConditionStatus:True; PodName:es-demo-2 28m KubeDB Ops-manager Operator disable shard allocation; ConditionStatus:True; PodName:es-demo-2 + Warning evict pod; ConditionStatus:True; PodName:es-demo-2 28m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:es-demo-2 + Warning create es client; ConditionStatus:False 28m KubeDB Ops-manager Operator create es client; ConditionStatus:False + Warning create es client; ConditionStatus:True 28m KubeDB Ops-manager Operator create es client; ConditionStatus:True + Warning re enable shard allocation; ConditionStatus:True 28m KubeDB Ops-manager Operator re enable shard allocation; ConditionStatus:True + Normal RestartPods 28m KubeDB Ops-manager Operator Successfully updated all nodes + Normal ResumeDatabase 28m KubeDB Ops-manager Operator Resuming Elasticsearch + Normal ResumeDatabase 28m KubeDB Ops-manager Operator Resuming Elasticsearch demo/es-demo + Normal ResumeDatabase 28m KubeDB Ops-manager Operator Successfully resumed Elasticsearch demo/es-demo + Normal Successful 28m KubeDB Ops-manager Operator Successfully Updated Database + +``` + +Now, we are going to verify whether the `Elasticsearch` and the related `PetSets` and their `Pods` have the new version image. Let's check, + +```bash +$ kubectl get es -n demo es-demo -o=jsonpath='{.spec.version}{"\n"}' +xpack-9.1.4 + +$ kubectl get petset -n demo es-demo -o=jsonpath='{.spec.template.spec.containers[0].image}{"\n"}' +ghcr.io/appscode-images/elastic:9.1.4@sha256:e0b89e3ace47308fa5fa842823bc622add3733e47c1067cd1e6afed2cfd317ca + +$ kubectl get pods -n demo es-demo-0 -o=jsonpath='{.spec.containers[0].image}{"\n"}' +ghcr.io/appscode-images/elastic:9.1.4 + +``` + +You can see from above, our `Elasticsearch` has been updated with the new version. So, the updateVersion process is successfully completed. + +> **NOTE:** If you want to update Opensearch, you can follow the same steps as above but using `ElasticsearchOpsRequest` CRD. You can visit [OpenSearch ](/docs/guides/elasticsearch/quickstart/overview/opensearch) guide for more details. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete Elasticsearchopsrequest -n demo es-demo-update +kubectl delete es -n demo es-demo +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Elasticsearch object](/docs/guides/elasticsearch/concepts/elasticsearch/index.md). +- Detail concepts of [ElasticsearchOpsRequest object](/docs/guides/elasticsearch/concepts/elasticsearch-ops-request/index.md). +- Detailed concept of [Elasticesearch Version](/docs/guides/elasticsearch/concepts/catalog/index.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/elasticsearch/update-version/overview.md b/docs/guides/elasticsearch/update-version/overview.md new file mode 100644 index 0000000000..2ccccf84c1 --- /dev/null +++ b/docs/guides/elasticsearch/update-version/overview.md @@ -0,0 +1,59 @@ +--- +title: Updating Elasticsearch Overview +menu: + docs_{{ .version }}: + identifier: guides-Elasticsearch-updating-overview + name: Overview + parent: es-updateversion-elasticsearch + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Updating Elasticsearch version + +This guide will give you an overview of how KubeDB ops manager updates the version of `Elasticsearch` database. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) + - [ElasticsearchOpsRequest](/docs/guides/elasticsearch/concepts/elasticsearch-ops-request/index.md) + +## How update Process Works + +The following diagram shows how KubeDB KubeDB ops manager used to update the version of `Elasticsearch`. Open the image in a new tab to see the enlarged version. + +[//]: # (
) + +[//]: # ( Elasticsearch update Flow) + +[//]: # (
Fig: updating Process of Elasticsearch
) + +[//]: # (
) + +The updating process consists of the following steps: + +1. At first, a user creates a `Elasticsearch` cr. + +2. `KubeDB-Provisioner` operator watches for the `Elasticsearch` cr. + +3. When it finds one, it creates a `PetSet` and related necessary stuff like secret, service, etc. + +4. Then, in order to update the version of the `Elasticsearch` database the user creates a `ElasticsearchOpsRequest` cr with the desired version. + +5. `KubeDB-ops-manager` operator watches for `ElasticsearchOpsRequest`. + +6. When it finds one, it Pauses the `Elasticsearch` object so that the `KubeDB-Provisioner` operator doesn't perform any operation on the `Elasticsearch` during the updating process. + +7. By looking at the target version from `ElasticsearchOpsRequest` cr, In case of major update `KubeDB-ops-manager` does some pre-update steps as we need old bin and lib files to update from current to target Elasticsearch version. + +8. Then By looking at the target version from `ElasticsearchOpsRequest` cr, `KubeDB-ops-manager` operator updates the images of the `PetSet` for updating versions. + +9. After successful upgradation of the `PetSet` and its `Pod` images, the `KubeDB-ops-manager` updates the image of the `Elasticsearch` object to reflect the updated cluster state. + +10. After successful upgradation of `Elasticsearch` object, the `KubeDB` ops manager resumes the `Elasticsearch` object so that the `KubeDB-provisioner` can resume its usual operations. + +In the next doc, we are going to show a step by step guide on updating of a Elasticsearch database using update operation. \ No newline at end of file diff --git a/docs/guides/elasticsearch/volume-expantion/_index.md b/docs/guides/elasticsearch/volume-expantion/_index.md new file mode 100644 index 0000000000..c9a842b9e1 --- /dev/null +++ b/docs/guides/elasticsearch/volume-expantion/_index.md @@ -0,0 +1,10 @@ +--- +title: Elasticsearch Voulume Expansion +menu: + docs_{{ .version }}: + identifier: es-voulume-expansion-elasticsearch + name: Voulume Expansion + parent: es-elasticsearch-guides + weight: 110 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/elasticsearch/volume-expantion/combined.md b/docs/guides/elasticsearch/volume-expantion/combined.md new file mode 100644 index 0000000000..0a09fe8a51 --- /dev/null +++ b/docs/guides/elasticsearch/volume-expantion/combined.md @@ -0,0 +1,365 @@ +--- +title: Elasticsearch Combined Volume Expansion +menu: + docs_{{ .version }}: + identifier: es-volume-expansion-combined + name: Combined + parent: es-voulume-expansion-elasticsearch + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Elasticsearch Combined Volume Expansion + +This guide will show you how to use `KubeDB` Ops-manager operator to expand the volume of a Elasticsearch Combined Cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- You must have a `StorageClass` that supports volume expansion. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) + - [Combined](/docs/guides/elasticsearch/clustering/combined-cluster/index.md) + - [ElasticsearchOpsRequest](/docs/guides/elasticsearch/concepts/elasticsearch-ops-request/index.md) + - [Volume Expansion Overview](/docs/guides/elasticsearch/volume-expansion/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> Note: The yaml files used in this tutorial are stored in [docs/examples/elasticsearch](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/elasticsearch) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Expand Volume of Combined Elasticsearch Cluster + +Here, we are going to deploy a `Elasticsearch` combined using a supported version by `KubeDB` operator. Then we are going to apply `ElasticsearchOpsRequest` to expand its volume. + +### Prepare Elasticsearch Combined CLuster + +At first verify that your cluster has a storage class, that supports volume expansion. Let's check, + +```bash +$ kubectl get storageclass +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +standard (default) kubernetes.io/gce-pd Delete Immediate true 2m49s +``` + +We can see from the output the `standard` storage class has `ALLOWVOLUMEEXPANSION` field as true. So, this storage class supports volume expansion. We can use it. + +Now, we are going to deploy a `Elasticsearch` combined cluster with version `xpack-8.11.1`. + +### Deploy Elasticsearch + +In this section, we are going to deploy a Elasticsearch combined cluster with 1GB volume. Then, in the next section we will expand its volume to 2GB using `ElasticsearchOpsRequest` CRD. Below is the YAML of the `Elasticsearch` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-combined + namespace: demo +spec: + version: xpack-8.11.1 + enableSSL: true + replicas: 1 + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut + +``` + +Let's create the `Elasticsearch` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/clustering/multi-nodes-es.yaml +Elasticsearch.kubedb.com/es-combined created +``` + +Now, wait until `es-combined` has status `Ready`. i.e, + +```bash +$ kubectl get es -n demo -w +NAME VERSION STATUS AGE +es-combined xpack-8.11.1 Ready 75s + +``` + +Let's check volume size from petset, and from the persistent volume, + +```bash +$ kubectl get petset -n demo es-combined -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1Gi" +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-edeeff75-9823-4aeb-9189-37adad567ec7 1Gi RWO Delete Bound demo/data-es-combined-0 longhorn 2m21s + +``` + +You can see the petset has 1GB storage, and the capacity of all the persistent volumes are also 1GB. + +We are now ready to apply the `ElasticsearchOpsRequest` CR to expand the volume of this database. + +### Volume Expansion + +Here, we are going to expand the volume of the Elasticsearch combined cluster. + +#### Create ElasticsearchOpsRequest + +In order to expand the volume of the database, we have to create a `ElasticsearchOpsRequest` CR with our desired volume size. Below is the YAML of the `ElasticsearchOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: es-volume-expansion-combined + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: es-combined + volumeExpansion: + mode: "Online" + node: 4Gi +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing volume expansion operation on `es-combined`. +- `spec.type` specifies that we are performing `VolumeExpansion` on our database. +- `spec.volumeExpansion.node` specifies the desired volume size. + +Let's create the `ElasticsearchOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/volume-expansion/elasticsearch-volume-expansion-combined.yaml +Elasticsearchopsrequest.ops.kubedb.com/es-volume-exp-combined created +``` + +#### Verify Elasticsearch Combined volume expanded successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the volume size of `Elasticsearch` object and related `PetSets` and `Persistent Volumes`. + +Let's wait for `ElasticsearchOpsRequest` to be `Successful`. Run the following command to watch `ElasticsearchOpsRequest` CR, + +```bash +$ kubectl get Elasticsearchopsrequest -n demo +NAME TYPE STATUS AGE +es-volume-exp-combined VolumeExpansion Successful 2m4s +``` + +We can see from the above output that the `ElasticsearchOpsRequest` has succeeded. If we describe the `ElasticsearchOpsRequest` we will get an overview of the steps that were followed to expand the volume of the database. + +```bash +$ kubectl describe Elasticsearchopsrequest -n demo es-volume-expansion-combined +Name: es-volume-expansion-combined +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ElasticsearchOpsRequest +Metadata: + Creation Timestamp: 2025-11-20T12:19:05Z + Generation: 1 + Resource Version: 127891 + UID: 4199c88c-d3c4-44d0-8084-efdaa49b9c03 +Spec: + Apply: IfReady + Database Ref: + Name: es-combined + Type: VolumeExpansion + Volume Expansion: + Mode: Offline + Node: 4Gi +Status: + Conditions: + Last Transition Time: 2025-11-20T12:19:05Z + Message: Elasticsearch ops request is expanding volume of the Elasticsearch nodes. + Observed Generation: 1 + Reason: VolumeExpansion + Status: True + Type: VolumeExpansion + Last Transition Time: 2025-11-20T12:19:13Z + Message: get pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPetSet + Last Transition Time: 2025-11-20T12:19:13Z + Message: delete pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: deletePetSet + Last Transition Time: 2025-11-20T12:19:23Z + Message: successfully deleted the PetSets with orphan propagation policy + Observed Generation: 1 + Reason: OrphanPetSetPods + Status: True + Type: OrphanPetSetPods + Last Transition Time: 2025-11-20T12:19:28Z + Message: get pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPod + Last Transition Time: 2025-11-20T12:19:28Z + Message: patch opsrequest; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchOpsrequest + Last Transition Time: 2025-11-20T12:20:23Z + Message: create db client; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreateDbClient + Last Transition Time: 2025-11-20T12:19:28Z + Message: db operation; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DbOperation + Last Transition Time: 2025-11-20T12:19:28Z + Message: delete pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePod + Last Transition Time: 2025-11-20T12:19:33Z + Message: get pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPvc + Last Transition Time: 2025-11-20T12:19:33Z + Message: patch pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPvc + Last Transition Time: 2025-11-20T12:19:58Z + Message: compare storage; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CompareStorage + Last Transition Time: 2025-11-20T12:19:58Z + Message: create pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreatePod + Last Transition Time: 2025-11-20T12:20:28Z + Message: successfully updated combined node PVC sizes + Observed Generation: 1 + Reason: VolumeExpansionCombinedNode + Status: True + Type: VolumeExpansionCombinedNode + Last Transition Time: 2025-11-20T12:20:37Z + Message: successfully reconciled the Elasticsearch resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2025-11-20T12:20:42Z + Message: PetSet is recreated + Observed Generation: 1 + Reason: ReadyPetSets + Status: True + Type: ReadyPetSets + Last Transition Time: 2025-11-20T12:20:48Z + Message: successfully updated Elasticsearch CR + Observed Generation: 1 + Reason: UpdateDatabase + Status: True + Type: UpdateDatabase + Last Transition Time: 2025-11-20T12:20:48Z + Message: Successfully completed the modification process. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 114s KubeDB Ops-manager Operator Pausing Elasticsearch demo/es-combined + Warning get pet set; ConditionStatus:True 106s KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Warning delete pet set; ConditionStatus:True 106s KubeDB Ops-manager Operator delete pet set; ConditionStatus:True + Warning get pet set; ConditionStatus:True 101s KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Normal OrphanPetSetPods 96s KubeDB Ops-manager Operator successfully deleted the PetSets with orphan propagation policy + Warning get pod; ConditionStatus:True 91s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 91s KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning create db client; ConditionStatus:True 91s KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning db operation; ConditionStatus:True 91s KubeDB Ops-manager Operator db operation; ConditionStatus:True + Warning delete pod; ConditionStatus:True 91s KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 86s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 86s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 86s KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 86s KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 81s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 81s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 76s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 76s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 71s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 71s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 66s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 66s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 61s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 61s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 61s KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 61s KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 61s KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning get pod; ConditionStatus:True 56s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:False 56s KubeDB Ops-manager Operator create db client; ConditionStatus:False + Warning get pod; ConditionStatus:True 51s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 46s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 41s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 36s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:True 36s KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning db operation; ConditionStatus:True 36s KubeDB Ops-manager Operator db operation; ConditionStatus:True + Normal VolumeExpansionCombinedNode 31s KubeDB Ops-manager Operator successfully updated combined node PVC sizes + Normal UpdatePetSets 22s KubeDB Ops-manager Operator successfully reconciled the Elasticsearch resources + Warning get pet set; ConditionStatus:True 17s KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Normal ReadyPetSets 17s KubeDB Ops-manager Operator PetSet is recreated + Normal UpdateDatabase 11s KubeDB Ops-manager Operator successfully updated Elasticsearch CR + Normal ResumeDatabase 11s KubeDB Ops-manager Operator Resuming Elasticsearch demo/es-combined + Normal ResumeDatabase 11s KubeDB Ops-manager Operator Successfully resumed Elasticsearch demo/es-combined + Normal Successful 11s KubeDB Ops-manager Operator Successfully Updated Database + +``` + +Now, we are going to verify from the `Petset`, and the `Persistent Volumes` whether the volume of the database has expanded to meet the desired state, Let's check, + +```bash +$ kubectl get petset -n demo es-combined -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"4Gi" + +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-edeeff75-9823-4aeb-9189-37adad567ec7 4Gi RWO Delete Bound demo/data-es-combined-0 longhorn 13m +``` + +The above output verifies that we have successfully expanded the volume of the Elasticsearch. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete Elasticsearchopsrequest -n demo es-volume-expansion-combined +kubectl delete es -n demo es-combined +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Elasticsearch object](/docs/guides/elasticsearch/concepts/elasticsearch.md). +- Different Elasticsearch topology clustering modes [here](/docs/guides/elasticsearch/clustering/topology-cluster/index.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/elasticsearch/volume-expantion/overview.md b/docs/guides/elasticsearch/volume-expantion/overview.md new file mode 100644 index 0000000000..089bd91cc9 --- /dev/null +++ b/docs/guides/elasticsearch/volume-expantion/overview.md @@ -0,0 +1,56 @@ +--- +title: Elasticsearch Volume Expansion Overview +menu: + docs_{{ .version }}: + identifier: es-volume-expansion-overview + name: Overview + parent: es-voulume-expansion-elasticsearch + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Elasticsearch Volume Expansion + +This guide will give an overview on how KubeDB Ops-manager operator expand the volume of various component of `Elasticsearch` like:. (Combined and Topology). + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: +- [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) +- [ElasticsearchOpsRequest](/docs/guides/elasticsearch/concepts/elasticsearch-ops-request/index.md) + +## How Volume Expansion Process Works + +The following diagram shows how KubeDB Ops-manager operator expand the volumes of `Elasticsearch` database components. Open the image in a new tab to see the enlarged version. + +
+   Volume Expansion process of Elasticsearch +
Fig: Volume Expansion process of Elasticsearch
+
+ +The Volume Expansion process consists of the following steps: + +1. At first, a user creates a `Elasticsearch` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `Elasticsearch` CR. + +3. When the operator finds a `Elasticsearch` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Each PetSet creates a Persistent Volume according to the Volume Claim Template provided in the petset configuration. This Persistent Volume will be expanded by the `KubeDB` Ops-manager operator. + +5. Then, in order to expand the volume of the various components (ie. Combined, Broker, Controller) of the `Elasticsearch`, the user creates a `ElasticsearchOpsRequest` CR with desired information. + +6. `KubeDB` Ops-manager operator watches the `ElasticsearchOpsRequest` CR. + +7. When it finds a `ElasticsearchOpsRequest` CR, it halts the `Elasticsearch` object which is referred from the `ElasticsearchOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `Elasticsearch` object during the volume expansion process. + +8. Then the `KubeDB` Ops-manager operator will expand the persistent volume to reach the expected size defined in the `ElasticsearchOpsRequest` CR. + +9. After the successful Volume Expansion of the related PetSet Pods, the `KubeDB` Ops-manager operator updates the new volume size in the `Elasticsearch` object to reflect the updated state. + +10. After the successful Volume Expansion of the `Elasticsearch` components, the `KubeDB` Ops-manager operator resumes the `Elasticsearch` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the next docs, we are going to show a step-by-step guide on Volume Expansion of various Elasticsearch database components using `ElasticsearchOpsRequest` CRD. diff --git a/docs/guides/elasticsearch/volume-expantion/topology.md b/docs/guides/elasticsearch/volume-expantion/topology.md new file mode 100644 index 0000000000..45cf5ab153 --- /dev/null +++ b/docs/guides/elasticsearch/volume-expantion/topology.md @@ -0,0 +1,765 @@ +--- +title: Elasticsearch Topology Volume Expansion +menu: + docs_{{ .version }}: + identifier: es-volume-expansion-topology + name: Topology + parent: es-voulume-expansion-elasticsearch + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Elasticsearch Topology Volume Expansion + +This guide will show you how to use `KubeDB` Ops-manager operator to expand the volume of a Elasticsearch Topology Cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- You must have a `StorageClass` that supports volume expansion. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) + - [Topology](/docs/guides/elasticsearch/clustering/topology-cluster/_index.md) + - [ElasticsearchOpsRequest](/docs/guides/elasticsearch/concepts/elasticsearch-ops-request/index.md) + - [Volume Expansion Overview](/docs/guides/elasticsearch/volume-expansion/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> Note: The yaml files used in this tutorial are stored in [docs/examples/elasticsearch](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/elasticsearch) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Expand Volume of Topology Elasticsearch Cluster + +Here, we are going to deploy a `Elasticsearch` topology using a supported version by `KubeDB` operator. Then we are going to apply `ElasticsearchOpsRequest` to expand its volume. + +### Prepare Elasticsearch Topology Cluster + +At first verify that your cluster has a storage class, that supports volume expansion. Let's check, + +```bash +$ kubectl get storageclass +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +standard (default) kubernetes.io/gce-pd Delete Immediate true 2m49s +``` + +We can see from the output the `standard` storage class has `ALLOWVOLUMEEXPANSION` field as true. So, this storage class supports volume expansion. We can use it. + +Now, we are going to deploy a `Elasticsearch` combined cluster with version `xpack-8.11.1`. + +### Deploy Elasticsearch + +In this section, we are going to deploy a Elasticsearch topology cluster for broker and controller with 1Gi volume. Then, in the next section we will expand its volume to 2Gi using `ElasticsearchOpsRequest` CRD. Below is the YAML of the `Elasticsearch` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-cluster + namespace: demo +spec: + enableSSL: true + version: xpack-8.11.1 + storageType: Durable + topology: + master: + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + + data: + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + ingest: + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Let's create the `Elasticsearch` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}docs/examples/elasticsearch/clustering/topology-es.yaml +Elasticsearch.kubedb.com/es-cluster created +``` + +Now, wait until `es-cluster` has status `Ready`. i.e, + +```bash +$ kubectl get es -n demo +NAME VERSION STATUS AGE +es-cluster xpack-8.11.1 Ready 22h + +``` + +Let's check volume size from petset, and from the persistent volume, + +```bash +$ kubectl get petset -n demo es-cluster-data -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1Gi" +$ kubectl get petset -n demo es-cluster-master -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1Gi" +$ kubectl get petset -n demo es-cluster-ingest -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1Gi" +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-11b48c6e-d996-45a7-8ba2-f8d71a655912 1Gi RWO Delete Bound demo/data-es-cluster-ingest-2 standard 22h +pvc-1904104c-bbf2-4754-838a-8a647b2bd23e 1Gi RWO Delete Bound demo/data-es-cluster-data-2 standard 22h +pvc-19aa694a-29c0-43d9-a495-c84c77df2dd8 1Gi RWO Delete Bound demo/data-es-cluster-master-0 standard 22h +pvc-33702b18-7e98-41b7-9b19-73762cb4f86a 1Gi RWO Delete Bound demo/data-es-cluster-master-1 standard 22h +pvc-8604968f-f433-4931-82bc-8d240d6f52d8 1Gi RWO Delete Bound demo/data-es-cluster-data-0 standard 22h +pvc-ae5ccc43-d078-4816-a553-8a3cd1f674be 1Gi RWO Delete Bound demo/data-es-cluster-ingest-0 standard 22h +pvc-b4225042-c69f-41df-99b2-1b3191057a85 1Gi RWO Delete Bound demo/data-es-cluster-data-1 standard 22h +pvc-bd4b7d5a-8494-4ee2-a25c-697a6f23cb79 1Gi RWO Delete Bound demo/data-es-cluster-ingest-1 standard 22h +pvc-c9057b3b-4412-467f-8ae5-f6414e0059c3 1Gi RWO Delete Bound demo/data-es-cluster-master-2 standard 22h +``` + +You can see the petsets have 1Gi storage, and the capacity of all the persistent volumes are also 1Gi. + +We are now ready to apply the `ElasticsearchOpsRequest` CR to expand the volume of this database. + +### Volume Expansion + +Here, we are going to expand the volume of the Elasticsearch topology cluster. + +#### Create ElasticsearchOpsRequest + +In order to expand the volume of the database, we have to create a `ElasticsearchOpsRequest` CR with our desired volume size. Below is the YAML of the `ElasticsearchOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: volume-expansion-topology + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: es-cluster + volumeExpansion: + mode: "Online" + master: 5Gi + data: 5Gi + ingest: 4Gi +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing volume expansion operation on `es-cluster`. +- `spec.type` specifies that we are performing `VolumeExpansion` on our database. +- `spec.volumeExpansion.data` specifies the desired volume size for data node. +- `spec.volumeExpansion.master` specifies the desired volume size for master node. +- `spec.volumeExpansion.ingest` specifies the desired volume size for ingest node. + +> If you want to expand the volume of only one node, you can specify the desired volume size for that node only. + +Let's create the `ElasticsearchOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/volume-expansion/elasticsearch-volume-expansion-topology.yaml +Elasticsearchopsrequest.ops.kubedb.com/volume-expansion-topology created +``` + +#### Verify Elasticsearch Topology volume expanded successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the volume size of `Elasticsearch` object and related `PetSets` and `Persistent Volumes`. + +Let's wait for `ElasticsearchOpsRequest` to be `Successful`. Run the following command to watch `ElasticsearchOpsRequest` CR, + +```bash +$ kubectl get Elasticsearchopsrequest -n demo +NAME TYPE STATUS AGE +volume-expansion-topology VolumeExpansion Successful 44m + +``` + +We can see from the above output that the `ElasticsearchOpsRequest` has succeeded. If we describe the `ElasticsearchOpsRequest` we will get an overview of the steps that were followed to expand the volume of Elasticsearch. + +```bash +$ kubectl describe Elasticsearchopsrequest -n demo volume-expansion-topology +Name: volume-expansion-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ElasticsearchOpsRequest +Metadata: + Creation Timestamp: 2025-11-20T10:07:17Z + Generation: 1 + Resource Version: 115931 + UID: 38107c4f-4249-4597-b8b4-06a445891872 +Spec: + Apply: IfReady + Database Ref: + Name: es-cluster + Type: VolumeExpansion + Volume Expansion: + Data: 5Gi + Ingest: 4Gi + Master: 5Gi + Mode: online +Status: + Conditions: + Last Transition Time: 2025-11-20T10:07:17Z + Message: Elasticsearch ops request is expanding volume of the Elasticsearch nodes. + Observed Generation: 1 + Reason: VolumeExpansion + Status: True + Type: VolumeExpansion + Last Transition Time: 2025-11-20T10:07:25Z + Message: get pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPetSet + Last Transition Time: 2025-11-20T10:07:25Z + Message: delete pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: deletePetSet + Last Transition Time: 2025-11-20T10:07:55Z + Message: successfully deleted the PetSets with orphan propagation policy + Observed Generation: 1 + Reason: OrphanPetSetPods + Status: True + Type: OrphanPetSetPods + Last Transition Time: 2025-11-20T10:08:00Z + Message: get pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPod + Last Transition Time: 2025-11-20T10:08:00Z + Message: patch opsrequest; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchOpsrequest + Last Transition Time: 2025-11-20T10:20:20Z + Message: create db client; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreateDbClient + Last Transition Time: 2025-11-20T10:08:00Z + Message: delete pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DeletePod + Last Transition Time: 2025-11-20T10:08:05Z + Message: get pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPvc + Last Transition Time: 2025-11-20T10:19:55Z + Message: compare storage; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CompareStorage + Last Transition Time: 2025-11-20T10:11:05Z + Message: create pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreatePod + Last Transition Time: 2025-11-20T10:11:40Z + Message: patch pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPvc + Last Transition Time: 2025-11-20T10:13:55Z + Message: successfully updated ingest node PVC sizes + Observed Generation: 1 + Reason: VolumeExpansionIngestNode + Status: True + Type: VolumeExpansionIngestNode + Last Transition Time: 2025-11-20T10:14:00Z + Message: db operation; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: DbOperation + Last Transition Time: 2025-11-20T10:17:15Z + Message: successfully updated data node PVC sizes + Observed Generation: 1 + Reason: VolumeExpansionDataNode + Status: True + Type: VolumeExpansionDataNode + Last Transition Time: 2025-11-20T10:20:25Z + Message: successfully updated master node PVC sizes + Observed Generation: 1 + Reason: VolumeExpansionMasterNode + Status: True + Type: VolumeExpansionMasterNode + Last Transition Time: 2025-11-20T10:21:02Z + Message: successfully reconciled the Elasticsearch resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2025-11-20T10:21:07Z + Message: PetSet is recreated + Observed Generation: 1 + Reason: ReadyPetSets + Status: True + Type: ReadyPetSets + Last Transition Time: 2025-11-20T10:21:12Z + Message: successfully updated Elasticsearch CR + Observed Generation: 1 + Reason: UpdateDatabase + Status: True + Type: UpdateDatabase + Last Transition Time: 2025-11-20T10:21:12Z + Message: Successfully completed the modification process. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 45m KubeDB Ops-manager Operator Pausing Elasticsearch demo/es-cluster + Warning get pet set; ConditionStatus:True 45m KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Warning delete pet set; ConditionStatus:True 45m KubeDB Ops-manager Operator delete pet set; ConditionStatus:True + Warning get pet set; ConditionStatus:True 44m KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Warning get pet set; ConditionStatus:True 44m KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Warning delete pet set; ConditionStatus:True 44m KubeDB Ops-manager Operator delete pet set; ConditionStatus:True + Warning get pet set; ConditionStatus:True 44m KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Warning get pet set; ConditionStatus:True 44m KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Warning delete pet set; ConditionStatus:True 44m KubeDB Ops-manager Operator delete pet set; ConditionStatus:True + Warning get pet set; ConditionStatus:True 44m KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Normal OrphanPetSetPods 44m KubeDB Ops-manager Operator successfully deleted the PetSets with orphan propagation policy + Warning get pod; ConditionStatus:True 44m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 44m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning create db client; ConditionStatus:True 44m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning delete pod; ConditionStatus:True 44m KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 44m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 44m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 44m KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 44m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 44m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 44m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 44m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 44m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 44m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 44m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 44m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 43m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 43m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 43m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 43m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 43m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 43m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 43m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 43m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 43m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 43m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 43m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 43m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 43m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 43m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 43m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 43m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 43m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 43m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 43m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 43m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 43m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 43m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 43m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 43m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 42m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 42m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 42m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 42m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 42m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 42m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 42m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 42m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 42m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 42m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 42m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 42m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 42m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 42m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 42m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 42m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 42m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 42m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 42m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 42m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 42m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 42m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 42m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 42m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 41m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 41m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 41m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 41m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 41m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 41m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 41m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 41m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 41m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 41m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 41m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 41m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 41m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 41m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 41m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 41m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 41m KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 41m KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 41m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning get pod; ConditionStatus:True 41m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:False 41m KubeDB Ops-manager Operator create db client; ConditionStatus:False + Warning get pod; ConditionStatus:True 41m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 41m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 41m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 40m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:True 40m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning get pod; ConditionStatus:True 40m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 40m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning create db client; ConditionStatus:True 40m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning delete pod; ConditionStatus:True 40m KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 40m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 40m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 40m KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 40m KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 40m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 40m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 40m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 40m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 40m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 40m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 40m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 40m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 40m KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 40m KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 40m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning get pod; ConditionStatus:True 40m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:False 40m KubeDB Ops-manager Operator create db client; ConditionStatus:False + Warning get pod; ConditionStatus:True 40m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 40m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 40m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 40m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 39m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 39m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:True 39m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning get pod; ConditionStatus:True 39m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 39m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning create db client; ConditionStatus:True 39m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning delete pod; ConditionStatus:True 39m KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 39m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 39m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 39m KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 39m KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 39m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 39m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 39m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 39m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 39m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 39m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 39m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 39m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 39m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 39m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 39m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 39m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 39m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 39m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 39m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 39m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 39m KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 39m KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 39m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning get pod; ConditionStatus:True 38m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:False 38m KubeDB Ops-manager Operator create db client; ConditionStatus:False + Warning get pod; ConditionStatus:True 38m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 38m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 38m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 38m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:True 38m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Normal VolumeExpansionIngestNode 38m KubeDB Ops-manager Operator successfully updated ingest node PVC sizes + Warning get pod; ConditionStatus:True 38m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 38m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning create db client; ConditionStatus:True 38m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning db operation; ConditionStatus:True 38m KubeDB Ops-manager Operator db operation; ConditionStatus:True + Warning delete pod; ConditionStatus:True 38m KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 38m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 38m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 38m KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 38m KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 38m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 38m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 38m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 38m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 38m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 38m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 38m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 38m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 38m KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 38m KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 38m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning get pod; ConditionStatus:True 37m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:False 37m KubeDB Ops-manager Operator create db client; ConditionStatus:False + Warning get pod; ConditionStatus:True 37m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 37m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 37m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 37m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:True 37m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning db operation; ConditionStatus:True 37m KubeDB Ops-manager Operator db operation; ConditionStatus:True + Warning get pod; ConditionStatus:True 37m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 37m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning create db client; ConditionStatus:True 37m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning db operation; ConditionStatus:True 37m KubeDB Ops-manager Operator db operation; ConditionStatus:True + Warning delete pod; ConditionStatus:True 37m KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 37m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 37m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 37m KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 37m KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 37m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 37m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 37m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 37m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 37m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 37m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 37m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 37m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 37m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 37m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 36m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 36m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 36m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 36m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 36m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 36m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 36m KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 36m KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 36m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning get pod; ConditionStatus:True 36m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:False 36m KubeDB Ops-manager Operator create db client; ConditionStatus:False + Warning get pod; ConditionStatus:True 36m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 36m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 36m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 36m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:True 36m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning db operation; ConditionStatus:True 36m KubeDB Ops-manager Operator db operation; ConditionStatus:True + Warning get pod; ConditionStatus:True 36m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 36m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning create db client; ConditionStatus:True 36m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning db operation; ConditionStatus:True 36m KubeDB Ops-manager Operator db operation; ConditionStatus:True + Warning delete pod; ConditionStatus:True 36m KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 36m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 36m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 36m KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 36m KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 36m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 36m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 36m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 36m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 35m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 35m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 35m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 35m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 35m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 35m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 35m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 35m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 35m KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 35m KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 35m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning get pod; ConditionStatus:True 35m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:False 35m KubeDB Ops-manager Operator create db client; ConditionStatus:False + Warning get pod; ConditionStatus:True 35m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 35m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 35m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 35m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:True 35m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning db operation; ConditionStatus:True 35m KubeDB Ops-manager Operator db operation; ConditionStatus:True + Normal VolumeExpansionDataNode 35m KubeDB Ops-manager Operator successfully updated data node PVC sizes + Warning get pod; ConditionStatus:True 35m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 35m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning create db client; ConditionStatus:True 35m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning delete pod; ConditionStatus:True 35m KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 35m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 35m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 35m KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 35m KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 34m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 34m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 34m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 34m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 34m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 34m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 34m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 34m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 34m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 34m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 34m KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 34m KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 34m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning get pod; ConditionStatus:True 34m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:False 34m KubeDB Ops-manager Operator create db client; ConditionStatus:False + Warning get pod; ConditionStatus:True 34m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 34m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 34m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 34m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:True 34m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning get pod; ConditionStatus:True 34m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 34m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning create db client; ConditionStatus:True 34m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning delete pod; ConditionStatus:True 34m KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 34m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 34m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 34m KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 34m KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 33m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 33m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 33m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 33m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 33m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 33m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 33m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 33m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 33m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 33m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 33m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 33m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 33m KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 33m KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 33m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning get pod; ConditionStatus:True 33m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:False 33m KubeDB Ops-manager Operator create db client; ConditionStatus:False + Warning get pod; ConditionStatus:True 33m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 33m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 33m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 33m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:True 33m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning get pod; ConditionStatus:True 33m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 33m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning create db client; ConditionStatus:True 33m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Warning delete pod; ConditionStatus:True 33m KubeDB Ops-manager Operator delete pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 32m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 32m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning patch pvc; ConditionStatus:True 32m KubeDB Ops-manager Operator patch pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:False 32m KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 32m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 32m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 32m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 32m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 32m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 32m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 32m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 32m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 32m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 32m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 32m KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create pod; ConditionStatus:True 32m KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning patch opsrequest; ConditionStatus:True 32m KubeDB Ops-manager Operator patch opsrequest; ConditionStatus:True + Warning get pod; ConditionStatus:True 32m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:False 32m KubeDB Ops-manager Operator create db client; ConditionStatus:False + Warning get pod; ConditionStatus:True 32m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 32m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 32m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 32m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning create db client; ConditionStatus:True 32m KubeDB Ops-manager Operator create db client; ConditionStatus:True + Normal VolumeExpansionMasterNode 32m KubeDB Ops-manager Operator successfully updated master node PVC sizes + Normal UpdatePetSets 31m KubeDB Ops-manager Operator successfully reconciled the Elasticsearch resources + Warning get pet set; ConditionStatus:True 31m KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Warning get pet set; ConditionStatus:True 31m KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Warning get pet set; ConditionStatus:True 31m KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Normal ReadyPetSets 31m KubeDB Ops-manager Operator PetSet is recreated + Normal UpdateDatabase 31m KubeDB Ops-manager Operator successfully updated Elasticsearch CR + Normal ResumeDatabase 31m KubeDB Ops-manager Operator Resuming Elasticsearch demo/es-cluster + Normal ResumeDatabase 31m KubeDB Ops-manager Operator Successfully resumed Elasticsearch demo/es-cluster + Normal Successful 31m KubeDB Ops-manager Operator Successfully Updated Database + Normal UpdatePetSets 31m KubeDB Ops-manager Operator successfully reconciled the Elasticsearch resources + +``` + +Now, we are going to verify from the `Petset`, and the `Persistent Volumes` whether the volume of the database has expanded to meet the desired state, Let's check, + +```bash +$ kubectl get petset -n demo es-cluster-data -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"5Gi" +$ kubectl get petset -n demo es-cluster-master -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"5Gi" +$ kubectl get petset -n demo es-cluster-ingest -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"4Gi" + +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-37f7398d-0251-4d3c-a439-d289b8cec6d2 5Gi RWO Delete Bound demo/data-es-cluster-master-2 standard 111m +pvc-3a5d2b3e-dd39-4468-a8da-5274992a6502 5Gi RWO Delete Bound demo/data-es-cluster-master-0 standard 111m +pvc-3cf21868-4b51-427b-b7ef-d0d26c753c8b 5Gi RWO Delete Bound demo/data-es-cluster-master-1 standard 111m +pvc-56e6ed8f-a729-4532-bdec-92b8101f7813 5Gi RWO Delete Bound demo/data-es-cluster-data-2 standard 111m +pvc-783d51f7-3bf2-4121-8f18-357d14d003ad 4Gi RWO Delete Bound demo/data-es-cluster-ingest-0 standard 111m +pvc-81d6c1d3-0aa6-4190-9ee0-dd4a8d62b6b3 4Gi RWO Delete Bound demo/data-es-cluster-ingest-2 standard 111m +pvc-942c6dce-4701-4e1a-b6f9-bf7d4ab56a11 5Gi RWO Delete Bound demo/data-es-cluster-data-1 standard 111m +pvc-b706647d-c9ba-4296-94aa-2f6ef2230b6e 4Gi RWO Delete Bound demo/data-es-cluster-ingest-1 standard 111m +pvc-c274f913-5452-47e1-ab42-ba584bdae297 5Gi RWO Delete Bound demo/data-es-cluster-data-0 standard 111m +``` + +The above output verifies that we have successfully expanded the volume of the Elasticsearch. + + +**Only Data Node Expansion:** +Only data node volume expansion can be done by creating an `ElasticsearchOpsRequest` manifest like below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ElasticsearchOpsRequest +metadata: + name: volume-expansion-data-nodes + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: es-cluster + volumeExpansion: + mode: "Online" + data: 5Gi +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/volume-expantion/volume-expansion-topo-data.yaml +Elasticsearchopsrequest.ops.kubedb.com/volume-expansion-data-nodes created +``` +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete Elasticsearchopsrequest -n demo volume-expansion-topology +kubectl delete es -n demo es-cluster +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Elasticsearch object](/docs/guides/elasticsearch/concepts/elasticsearch/index.md). +- Different Elasticsearch topology clustering modes [here](/docs/guides/elasticsearch/clustering/topology-cluster/simple-dedicated-cluster/index.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/images/elasticsearch/es-vertical-scaling.png b/docs/images/elasticsearch/es-vertical-scaling.png new file mode 100644 index 0000000000..79ea77332f Binary files /dev/null and b/docs/images/elasticsearch/es-vertical-scaling.png differ diff --git a/docs/images/elasticsearch/es-volume-expansion.png b/docs/images/elasticsearch/es-volume-expansion.png new file mode 100644 index 0000000000..ef791ca6d5 Binary files /dev/null and b/docs/images/elasticsearch/es-volume-expansion.png differ diff --git a/docs/images/elasticsearch/horizontal_scaling.jpg b/docs/images/elasticsearch/horizontal_scaling.jpg new file mode 100644 index 0000000000..b921b02d89 Binary files /dev/null and b/docs/images/elasticsearch/horizontal_scaling.jpg differ