From f65b55f90a4f7af6610ec696da3f886c947e1b61 Mon Sep 17 00:00:00 2001 From: obaydullahmhs Date: Sat, 17 Jan 2026 15:46:11 +0600 Subject: [PATCH 01/27] Update Kafka .spec.configuration API changes Signed-off-by: obaydullahmhs --- .../kafka/configuration/kafka-combined.yaml | 4 +- .../kafka/configuration/kafka-topology.yaml | 4 +- .../kafka/connectcluster/kcc-distributed.yaml | 4 +- .../mongodb-source-connector.yaml | 4 +- .../kafka/migration/mirror-checkpoint.yaml | 4 +- .../kafka/migration/mirror-heatbeat.yaml | 4 +- .../kafka/migration/mirror-source.yaml | 4 +- .../kafka/reconfigure/kafka-combined.yaml | 4 +- .../kafka/reconfigure/kafka-topology.yaml | 4 +- docs/guides/kafka/concepts/connectcluster.md | 37 +++++++++++++++++++ docs/guides/kafka/concepts/connector.md | 34 ++++++++++++++++- docs/guides/kafka/concepts/kafka.md | 24 +++++++++++- .../kafka/configuration/kafka-combined.md | 4 +- .../kafka/configuration/kafka-topology.md | 4 +- .../kafka/connectcluster/connectcluster.md | 14 +++---- .../guides/kafka/connectcluster/quickstart.md | 6 +-- docs/guides/kafka/migration/migration.md | 12 +++--- .../kafka/reconfigure/kafka-combined.md | 10 ++--- .../kafka/reconfigure/kafka-topology.md | 10 ++--- .../kafka/update-version/update-version.md | 4 +- 20 files changed, 142 insertions(+), 53 deletions(-) diff --git a/docs/examples/kafka/configuration/kafka-combined.yaml b/docs/examples/kafka/configuration/kafka-combined.yaml index 45449397e8..8d1da101ed 100644 --- a/docs/examples/kafka/configuration/kafka-combined.yaml +++ b/docs/examples/kafka/configuration/kafka-combined.yaml @@ -6,8 +6,8 @@ metadata: spec: replicas: 2 version: 3.9.0 - configSecret: - name: configsecret-combined + configuration: + secretName: configsecret-combined storage: accessModes: - ReadWriteOnce diff --git a/docs/examples/kafka/configuration/kafka-topology.yaml b/docs/examples/kafka/configuration/kafka-topology.yaml index 87f53b5594..841af3917d 100644 --- a/docs/examples/kafka/configuration/kafka-topology.yaml +++ b/docs/examples/kafka/configuration/kafka-topology.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: 3.9.0 - configSecret: - name: configsecret-topology + configuration: + secretName: configsecret-topology topology: broker: replicas: 2 diff --git a/docs/examples/kafka/connectcluster/kcc-distributed.yaml b/docs/examples/kafka/connectcluster/kcc-distributed.yaml index 943b237c77..63e3718711 100644 --- a/docs/examples/kafka/connectcluster/kcc-distributed.yaml +++ b/docs/examples/kafka/connectcluster/kcc-distributed.yaml @@ -11,8 +11,8 @@ spec: apiGroup: cert-manager.io kind: Issuer name: connectcluster-ca-issuer - configSecret: - name: connectcluster-custom-config + configuration: + secretName: connectcluster-custom-config replicas: 3 connectorPlugins: - postgres-3.0.5.final diff --git a/docs/examples/kafka/connectcluster/mongodb-source-connector.yaml b/docs/examples/kafka/connectcluster/mongodb-source-connector.yaml index 0a537ac57c..0f32a0c452 100644 --- a/docs/examples/kafka/connectcluster/mongodb-source-connector.yaml +++ b/docs/examples/kafka/connectcluster/mongodb-source-connector.yaml @@ -4,8 +4,8 @@ metadata: name: mongodb-source-connector namespace: demo spec: - configSecret: - name: mongodb-source-config + configuration: + secretName: mongodb-source-config connectClusterRef: name: connectcluster-quickstart namespace: demo diff --git a/docs/examples/kafka/migration/mirror-checkpoint.yaml b/docs/examples/kafka/migration/mirror-checkpoint.yaml index 23ccb47b28..12ed1f4601 100644 --- a/docs/examples/kafka/migration/mirror-checkpoint.yaml +++ b/docs/examples/kafka/migration/mirror-checkpoint.yaml @@ -34,6 +34,6 @@ spec: connectClusterRef: name: mirror-connect namespace: demo - configSecret: - name: mirror-checkpoint-config + configuration: + secretName: mirror-checkpoint-config deletionPolicy: WipeOut diff --git a/docs/examples/kafka/migration/mirror-heatbeat.yaml b/docs/examples/kafka/migration/mirror-heatbeat.yaml index 6005ad3a01..1905e91bcd 100644 --- a/docs/examples/kafka/migration/mirror-heatbeat.yaml +++ b/docs/examples/kafka/migration/mirror-heatbeat.yaml @@ -29,6 +29,6 @@ spec: connectClusterRef: name: mirror-connect namespace: demo - configSecret: - name: mirror-heartbeat-config + configuration: + secretName: mirror-heartbeat-config deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/kafka/migration/mirror-source.yaml b/docs/examples/kafka/migration/mirror-source.yaml index 9456fe686e..75e9efd091 100644 --- a/docs/examples/kafka/migration/mirror-source.yaml +++ b/docs/examples/kafka/migration/mirror-source.yaml @@ -36,6 +36,6 @@ spec: connectClusterRef: name: mirror-connect namespace: demo - configSecret: - name: mirror-source-config + configuration: + secretName: mirror-source-config deletionPolicy: WipeOut diff --git a/docs/examples/kafka/reconfigure/kafka-combined.yaml b/docs/examples/kafka/reconfigure/kafka-combined.yaml index 367ec6451e..601fe61797 100644 --- a/docs/examples/kafka/reconfigure/kafka-combined.yaml +++ b/docs/examples/kafka/reconfigure/kafka-combined.yaml @@ -6,8 +6,8 @@ metadata: spec: replicas: 2 version: 3.9.0 - configSecret: - name: kf-combined-custom-config + configuration: + secretName: kf-combined-custom-config storage: accessModes: - ReadWriteOnce diff --git a/docs/examples/kafka/reconfigure/kafka-topology.yaml b/docs/examples/kafka/reconfigure/kafka-topology.yaml index 4161592c9d..308e71671f 100644 --- a/docs/examples/kafka/reconfigure/kafka-topology.yaml +++ b/docs/examples/kafka/reconfigure/kafka-topology.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: 3.9.0 - configSecret: - name: kf-topology-custom-config + configuration: + secretName: kf-topology-custom-config topology: broker: replicas: 2 diff --git a/docs/guides/kafka/concepts/connectcluster.md b/docs/guides/kafka/concepts/connectcluster.md index 4e05bce6e4..333dbadbc3 100644 --- a/docs/guides/kafka/concepts/connectcluster.md +++ b/docs/guides/kafka/concepts/connectcluster.md @@ -54,6 +54,18 @@ spec: secretName: connectcluster-client-cert configSecret: name: custom-connectcluster-config + configuration: + secretName: custom-connectcluster-config + inline: + config.properties: | + key.converter=org.apache.kafka.connect.json.JsonConverter + value.converter=org.apache.kafka.connect.json.JsonConverter + key.converter.schemas.enable=true + value.converter.schemas.enable=true + offset.storage.topic=connect-cluster-offsets + config.storage.topic=connect-cluster-configs + status.storage.topic=connect-cluster-status + offset.flush.interval.ms=10000 replicas: 3 connectorPlugins: - gcs-0.13.0 @@ -134,6 +146,31 @@ configSecret: name: ``` +> **Note**: Use `.spec.configuration.secretName` to specify the name of the secret instead of `.spec.configSecret.name`. The field `.spec.configSecret` is deprecated and will be removed in future releases. If you still use `.spec.configSecret`, KubeDB will copy `.spec.configSecret.name` to `.spec.configuration.secretName` internally. + +### spec.configuration +`spec.configuration` is an optional field that specifies custom configuration for Kafka Connect Cluster. It has the following fields: +- `configuration.secretName` is a optional field that specifies the name of the secret that holds custom configuration files for Kafka Connect Cluster. +- `configuration.inline` is an optional field that allows you to provide custom configuration directly in the ConnectCluster object. It has the following possible keys: + - `config.properties` - is used to provide custom configuration for Kafka Connect Cluster. + +```yaml +spec: + configuration: + secretName: +``` +or +```yaml +spec: + configuration: + inline: + config.properties: | + key.converter=org.apache.kafka.connect.json.JsonConverter + value.converter=org.apache.kafka.connect.json.JsonConverter + ..... + .... +``` + ### spec.authSecret `spec.authSecret` is an optional field that points to a Secret used to hold credentials for `ConnectCluster` username and password. If not set, KubeDB operator creates a new Secret `{connectcluster-object-name}-connect-cred` for storing the username and password for each ConnectCluster object. diff --git a/docs/guides/kafka/concepts/connector.md b/docs/guides/kafka/concepts/connector.md index 85f1927d77..c8e6b3128f 100644 --- a/docs/guides/kafka/concepts/connector.md +++ b/docs/guides/kafka/concepts/connector.md @@ -31,6 +31,14 @@ metadata: spec: configSecret: name: mongodb-source-config + configuration: + secretName: mongodb-source-config + inline: + config.properties: | + connector.class=com.mongodb.* + tasks.max=1 + topic.prefix=mongodb- + connection.uri=mongodb://mongo-user: connectClusterRef: name: connectcluster-quickstart namespace: demo @@ -42,8 +50,30 @@ spec: `spec.configSecret` is a required field that specifies the name of the secret containing the configuration for the Connector. The secret should contain a key `config.properties` which contains the configuration for the Connector. ```yaml spec: - configSecret: - name: + configuration: + secretName: +``` + +> **Note**: Use `.spec.configuration.secretName` to specify the name of the secret instead of `.spec.configSecret.name`. The field `.spec.configSecret` is deprecated and will be removed in future releases. If you still use `.spec.configSecret`, KubeDB will copy `.spec.configSecret.name` to `.spec.configuration.secretName` internally. + +### spec.configuration + +`spec.configuration` is a required field that specifies the configuration for the Connector. It can either be specified inline or as a reference to a secret. +```yaml +spec: + configuration: + secretName: +``` +or +```yaml +spec: + configuration: + inline: + config.properties: | + connector.class=com.mongodb.* + tasks.max=1 + topic.prefix=mongodb- + connection.uri=mongodb://mongo-user:mongo-password@mongo-host:27017 ``` ### spec.connectClusterRef diff --git a/docs/guides/kafka/concepts/kafka.md b/docs/guides/kafka/concepts/kafka.md index 4158455336..b5d29d96bf 100644 --- a/docs/guides/kafka/concepts/kafka.md +++ b/docs/guides/kafka/concepts/kafka.md @@ -35,6 +35,18 @@ spec: name: kafka-admin-cred configSecret: name: kafka-custom-config + configuration: + secretName: kafka-custom-config + inline: + broker.properties: | + log.retention.hours=168 + log.segment.bytes=1073741824 + controller.properties: | + log.retention.hours=168 + log.segment.bytes=1073741824 + server.properties: | + log.retention.hours=168 + log.segment.bytes=1073741824 enableSSL: true healthChecker: failureThreshold: 3 @@ -180,7 +192,17 @@ Secrets provided by users are not managed by KubeDB, and therefore, won't be mod ### spec.configSecret -`spec.configSecret` is an optional field that points to a Secret used to hold custom Kafka configuration. If not set, KubeDB operator will use default configuration for Kafka. +`spec.configSecret` is an optional field that points to a Secret used to hold custom Kafka configuration. If not set, KubeDB operator will use default configuration for Kafka. This is currently not in use. Use `.spec.configuration` to provide custom configuration instead. If you still provide this field, KubeDB operator will update `spec.configuration.secretName` with the provided secret name. + +> **Note**: Use `.spec.configuration.secretName` to specify the name of the secret instead of `.spec.configSecret.name`. The field `.spec.configSecret` is deprecated and will be removed in future releases. If you still use `.spec.configSecret`, KubeDB will copy `.spec.configSecret.name` to `.spec.configuration.secretName` internally. + +### spec.configuration +`spec.configuration` is an optional field that specifies custom configuration for Kafka cluster. It has the following fields: +- `configuration.secretName` is an optional field that specifies the name of the secret that holds custom configuration files for Kafka cluster. +- `configuration.inline` is an optional field that allows you to provide custom configuration directly in the Kafka object. It has the following possible keys: + - `broker.properties` - is used to provide custom configuration for Kafka brokers. + - `controller.properties` - is used to provide custom configuration for Kafka controllers. + - `server.propertbies` - is used to provide custom configuration for both Kafka brokers and controllers. ### spec.topology diff --git a/docs/guides/kafka/configuration/kafka-combined.md b/docs/guides/kafka/configuration/kafka-combined.md index 5622394106..710ac4e885 100644 --- a/docs/guides/kafka/configuration/kafka-combined.md +++ b/docs/guides/kafka/configuration/kafka-combined.md @@ -89,8 +89,8 @@ metadata: spec: replicas: 2 version: 3.9.0 - configSecret: - name: configsecret-combined + configuration: + secretName: configsecret-combined storage: accessModes: - ReadWriteOnce diff --git a/docs/guides/kafka/configuration/kafka-topology.md b/docs/guides/kafka/configuration/kafka-topology.md index 779f8adccd..1ac224b33e 100644 --- a/docs/guides/kafka/configuration/kafka-topology.md +++ b/docs/guides/kafka/configuration/kafka-topology.md @@ -98,8 +98,8 @@ metadata: namespace: demo spec: version: 3.9.0 - configSecret: - name: configsecret-topology + configuration: + secretName: configsecret-topology topology: broker: replicas: 2 diff --git a/docs/guides/kafka/connectcluster/connectcluster.md b/docs/guides/kafka/connectcluster/connectcluster.md index 217b04961a..376b9cf301 100644 --- a/docs/guides/kafka/connectcluster/connectcluster.md +++ b/docs/guides/kafka/connectcluster/connectcluster.md @@ -99,8 +99,8 @@ spec: apiGroup: cert-manager.io kind: Issuer name: connectcluster-ca-issuer - configSecret: - name: connectcluster-custom-config + configuration: + secretName: connectcluster-custom-config replicas: 3 connectorPlugins: - postgres-3.0.5.final @@ -115,7 +115,7 @@ Here, - `spec.enableSSL` - specifies whether the ConnectCluster should be TLS secured or not. - `spec.tls.issuerRef` - specifies the name of the Issuer CR. Here, the ConnectCluster will use the `connectcluster-ca-issuer` Issuer to enable SSL/TLS. - `spec.replicas` - specifies the number of ConnectCluster workers. -- `spec.configSecret` - specifies the name of the secret that contains the custom configuration for the ConnectCluster. Here, the ConnectCluster will use the `connectcluster-custom-config` secret for custom configuration. +- `spec.configuration.secretName` - specifies the name of the secret that contains the custom configuration for the ConnectCluster. Here, the ConnectCluster will use the `connectcluster-custom-config` secret for custom configuration. - `spec.connectorPlugins` - is the name of the KafkaConnectorVersion CR. Here, mongodb, mysql, postgres, and jdbc connector-plugins will be loaded to the ConnectCluster worker nodes. - `spec.kafkaRef` specifies the Kafka instance that the ConnectCluster will connect to. Here, the ConnectCluster will connect to the Kafka instance named `kafka-prod` in the `demo` namespace. - `spec.deletionPolicy` specifies what KubeDB should do when a user try to delete ConnectCluster CR. Deletion policy `WipeOut` will delete the worker pods, secret when the ConnectCluster CR is deleted. @@ -267,8 +267,8 @@ metadata: name: postgres-source-connector namespace: demo spec: - configSecret: - name: postgres-source-connector-config + configuration: + secretName: postgres-source-connector-config connectClusterRef: name: connectcluster-distributed namespace: demo @@ -417,8 +417,8 @@ metadata: name: mysql-sink-connector namespace: demo spec: - configSecret: - name: mysql-sink-connector-config + configuration: + secretName: mysql-sink-connector-config connectClusterRef: name: connectcluster-distributed namespace: demo diff --git a/docs/guides/kafka/connectcluster/quickstart.md b/docs/guides/kafka/connectcluster/quickstart.md index 3c5fa37e70..5a87f83679 100644 --- a/docs/guides/kafka/connectcluster/quickstart.md +++ b/docs/guides/kafka/connectcluster/quickstart.md @@ -422,8 +422,8 @@ metadata: name: mongodb-source-connector namespace: demo spec: - configSecret: - name: mongodb-source-config + configuration: + secretName: mongodb-source-config connectClusterRef: name: connectcluster-quickstart namespace: demo @@ -432,7 +432,7 @@ spec: Here, -- `spec.configSecret` - is the name of the secret containing the connector configuration. +- `spec.configuration.secretName` - is the name of the secret containing the connector configuration. - `spec.connectClusterRef` - is the name of the ConnectCluster instance that the connector will run on. This is an appbinding reference of the ConnectCluster instance. - `spec.deletionPolicy` - specifies what KubeDB should do when a user try to delete Connector CR. Deletion policy `WipeOut` will delete the connector from the ConnectCluster when the Connector CR is deleted. If you want to keep the connector after deleting the Connector CR, you can set the deletion policy to `Delete`. diff --git a/docs/guides/kafka/migration/migration.md b/docs/guides/kafka/migration/migration.md index 2181dd5e59..12b5382e97 100644 --- a/docs/guides/kafka/migration/migration.md +++ b/docs/guides/kafka/migration/migration.md @@ -442,8 +442,8 @@ spec: connectClusterRef: name: mirror-connect namespace: demo - configSecret: - name: mirror-source-config + configuration: + secretName: mirror-source-config deletionPolicy: WipeOut ``` @@ -506,8 +506,8 @@ spec: connectClusterRef: name: mirror-connect namespace: demo - configSecret: - name: mirror-checkpoint-config + configuration: + secretName: mirror-checkpoint-config deletionPolicy: WipeOut ``` @@ -563,8 +563,8 @@ spec: connectClusterRef: name: mirror-connect namespace: demo - configSecret: - name: mirror-heartbeat-config + configuration: + secretName: mirror-heartbeat-config deletionPolicy: WipeOut ``` diff --git a/docs/guides/kafka/reconfigure/kafka-combined.md b/docs/guides/kafka/reconfigure/kafka-combined.md index ae6977946a..a7be8deffc 100644 --- a/docs/guides/kafka/reconfigure/kafka-combined.md +++ b/docs/guides/kafka/reconfigure/kafka-combined.md @@ -72,7 +72,7 @@ $ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" > secret/kf-combined-custom-config created ``` -In this section, we are going to create a Kafka object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `Kafka` CR that we are going to create, +In this section, we are going to create a Kafka object specifying `spec.configuration` field to apply this custom configuration. Below is the YAML of the `Kafka` CR that we are going to create, ```yaml apiVersion: kubedb.com/v1 @@ -83,8 +83,8 @@ metadata: spec: replicas: 2 version: 3.9.0 - configSecret: - name: kf-combined-custom-config + configuration: + secretName: kf-combined-custom-config storage: accessModes: - ReadWriteOnce @@ -181,7 +181,7 @@ Here, - `spec.databaseRef.name` specifies that we are reconfiguring `kafka-dev` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configSecret.name` specifies the name of the new secret. +- `spec.configuration.configSecret.name` specifies the name of the new secret. Let's create the `KafkaOpsRequest` CR we have shown above, @@ -192,7 +192,7 @@ kafkaopsrequest.ops.kubedb.com/kfops-reconfigure-combined created #### Verify the new configuration is working -If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `Kafka` object. +If everything goes well, `KubeDB` Ops-manager operator will update the `.spec.configuration` of `Kafka` object. Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, diff --git a/docs/guides/kafka/reconfigure/kafka-topology.md b/docs/guides/kafka/reconfigure/kafka-topology.md index 164c3a272b..660c6d055f 100644 --- a/docs/guides/kafka/reconfigure/kafka-topology.md +++ b/docs/guides/kafka/reconfigure/kafka-topology.md @@ -83,7 +83,7 @@ secret/kf-topology-custom-config created > **Note:** -In this section, we are going to create a Kafka object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `Kafka` CR that we are going to create, +In this section, we are going to create a Kafka object specifying `spec.configuration.secretName` field to apply this custom configuration. Below is the YAML of the `Kafka` CR that we are going to create, ```yaml apiVersion: kubedb.com/v1 @@ -93,8 +93,8 @@ metadata: namespace: demo spec: version: 3.9.0 - configSecret: - name: kf-topology-custom-config + configuration: + secretName: kf-topology-custom-config topology: broker: replicas: 2 @@ -213,7 +213,7 @@ Here, - `spec.databaseRef.name` specifies that we are reconfiguring `kafka-prod` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configSecret.name` specifies the name of the new secret. +- `spec.configuration.configSecret.name` specifies the name of the new secret. Let's create the `KafkaOpsRequest` CR we have shown above, @@ -224,7 +224,7 @@ kafkaopsrequest.ops.kubedb.com/kfops-reconfigure-topology created #### Verify the new configuration is working -If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `Kafka` object. +If everything goes well, `KubeDB` Ops-manager operator will update the `configuration` of `Kafka` object. Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, diff --git a/docs/guides/kafka/update-version/update-version.md b/docs/guides/kafka/update-version/update-version.md index 3e0e069d8d..59afd1ccfa 100644 --- a/docs/guides/kafka/update-version/update-version.md +++ b/docs/guides/kafka/update-version/update-version.md @@ -52,8 +52,8 @@ metadata: namespace: demo spec: version: 3.9.0 - configSecret: - name: configsecret-topology + configuration: + secretName: configsecret-topology topology: broker: replicas: 2 From c1fe460ab75ccad977ca7a22826c5e89244e9c59 Mon Sep 17 00:00:00 2001 From: Bonusree Date: Mon, 19 Jan 2026 18:32:52 +0600 Subject: [PATCH 02/27] confsecret to configuration Signed-off-by: Bonusree --- .../configuration/cassandra-config-file.yaml | 4 +- ...andra-reconfigure-update-topology-ops.yaml | 3 +- .../reconfigure/cassandra-topology.yaml | 4 +- .../examples/cassandra/restart/cassandra.yaml | 4 +- .../ch-custom-config-cluster.yaml | 4 +- .../ch-custom-config-standalone.yaml | 4 +- .../ch-reconfigure-ops-with-secret.yaml | 3 +- .../reconfigure/clickhouse-cluster.yaml | 4 +- .../reconfigure/clickhouse-standalone.yaml | 4 +- .../druid/quickstart/druid-quickstart.yaml | 4 +- .../es-custom-with-topology.yaml | 4 +- .../custom-config/es-custom.yaml | 4 +- docs/examples/elasticsearch/es-overview.yaml | 4 +- .../elasticsearch/kibana/es-kibana-demo.yaml | 4 +- .../elasticsearch/x-pack/es-mon-demo.yaml | 4 +- .../configuration/hazelcast-config.yaml | 4 +- .../ignite/custom-config/custom-ignite.yaml | 4 +- .../kafka-reconfigure-update-combined.yaml | 3 +- .../kafka-reconfigure-update-topology.yaml | 3 +- .../custom-config/custom-memcached.yaml | 4 +- .../config-secret-reconfigure.yaml | 3 +- .../reconfigure/memcached-config.yaml | 4 +- .../mongodb/configuration/demo-1.yaml | 4 +- .../reconfigure/mg-replicaset-config.yaml | 4 +- .../mongodb/reconfigure/mg-shard-config.yaml | 9 +-- .../reconfigure/mg-standalone-config.yaml | 4 +- .../mops-reconfigure-replicaset.yaml | 3 +- .../reconfigure/mops-reconfigure-shard.yaml | 9 +-- .../mops-reconfigure-standalone.yaml | 3 +- .../configuration/mssql-custom-config.yaml | 4 +- .../reconfigure/ms-standalone.yaml | 4 +- .../reconfigure/msops-reconfigure-ag.yaml | 3 +- .../msops-reconfigure-standalone.yaml | 3 +- .../reconfigure/mssqlserver-ag-cluster.yaml | 4 +- .../mysql/configuration/mysql-custom.yaml | 4 +- .../reconfigure/pbops-reconfigure.yaml | 3 +- .../configuration/pgpool-config-file.yaml | 4 +- .../pgpool/quickstart/quick-postgres.yaml | 4 +- .../reconfiguration/pp-custom-config.yaml | 4 +- .../reconfiguration/ppops-reconfigure.yaml | 3 +- .../custom-config/pg-custom-config.yaml | 4 +- docs/examples/postgres/pg-overview.yaml | 5 +- .../postgres/reconfigure/ha-postgres.yaml | 5 +- .../reconfigure/reconfigure-using-secret.yaml | 5 +- docs/examples/proxysql/custom-proxysql.yaml | 4 +- .../cluster/rabbit-custom-config.yaml | 4 +- .../rabbit-reconfigure-with-secret.yaml | 3 +- .../redis/custom-config/redis-custom.yaml | 4 +- .../redis/custom-config/valkey-custom.yaml | 4 +- .../redis/reconfigure/rdops-reconfigure.yaml | 3 +- .../reconfigure/sample-redis-config.yaml | 4 +- .../solr/configuration/solr-combined.yaml | 4 +- .../sl-reconfigure-custom-config.yaml | 3 +- .../solr/reconfigure/solr-combined.yaml | 4 +- .../sample-zk-configuration.yaml | 4 +- .../zkops-reconfiguration.yaml | 3 +- .../auto-backup/examples/sample-druid.yaml | 4 +- docs/guides/druid/clustering/guide/index.md | 8 +-- .../clustering/guide/yamls/druid-cluster.yaml | 4 +- docs/guides/druid/concepts/druid.md | 4 +- docs/guides/druid/concepts/druidopsrequest.md | 55 +--------------- .../druid/configuration/config-file/index.md | 9 ++- .../configuration/podtemplating/index.md | 20 +++--- docs/guides/druid/failover/guide.md | 5 +- docs/guides/druid/monitoring/overview.md | 4 +- .../monitoring/using-prometheus-operator.md | 4 +- .../yamls/druid-monitoring-builtin.yaml | 4 +- .../yamls/druid-with-monitoring.yaml | 5 +- docs/guides/druid/quickstart/guide/index.md | 6 +- docs/guides/druid/reconfigure-tls/guide.md | 29 ++++----- .../reconfigure-tls/yamls/druid-cluster.yaml | 5 +- docs/guides/druid/reconfigure/guide.md | 4 +- .../reconfigure/yamls/druid-cluster.yaml | 6 +- .../yamls/reconfigure-druid-ops.yaml | 3 +- docs/guides/druid/rotate-auth/guide.md | 4 +- docs/guides/druid/tls/guide.md | 4 +- .../druid/tls/yamls/druid-cluster-tls.yaml | 4 +- docs/guides/druid/update-version/guide.md | 4 +- .../update-version/yamls/druid-cluster.yaml | 4 +- docs/guides/druid/volume-expansion/guide.md | 64 +------------------ .../volume-expansion/yamls/druid-cluster.yaml | 5 +- .../concepts/elasticsearch/index.md | 4 +- .../configuration/using-config-file.md | 4 +- .../mssqlserver/concepts/mssqlserver.md | 6 +- .../configuration/using-config-file.md | 4 +- .../mssqlserver/reconfigure/ag_cluster.md | 11 ++-- .../mssqlserver/reconfigure/standalone.md | 8 +-- .../yamls/group-replication.yaml | 4 +- .../yamls/inndob-cluster.yaml | 4 +- .../reconfigure-steps/yamls/semi-sync.yaml | 4 +- .../reconfigure-steps/yamls/stand-alone.yaml | 4 +- docs/guides/pgbouncer/concepts/opsrequest.md | 4 +- .../reconfigure/reconfigure-pgbouncer.md | 8 +-- .../configuration/using-config-file.md | 4 +- docs/guides/postgres/gitops/gitops.md | 20 +++--- docs/guides/postgres/reconfigure/cluster.md | 7 +- docs/guides/solr/configuration/config-file.md | 4 +- docs/guides/solr/reconfigure/solr.md | 13 ++-- 98 files changed, 232 insertions(+), 371 deletions(-) diff --git a/docs/examples/cassandra/configuration/cassandra-config-file.yaml b/docs/examples/cassandra/configuration/cassandra-config-file.yaml index 82ddf21ca9..e58840a04b 100644 --- a/docs/examples/cassandra/configuration/cassandra-config-file.yaml +++ b/docs/examples/cassandra/configuration/cassandra-config-file.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: 5.0.3 - configSecret: - name: cas-configuration + configuration: + secretName: cas-configuration topology: rack: - name: r0 diff --git a/docs/examples/cassandra/reconfigure/cassandra-reconfigure-update-topology-ops.yaml b/docs/examples/cassandra/reconfigure/cassandra-reconfigure-update-topology-ops.yaml index e3e35adf7c..1cd58a7782 100644 --- a/docs/examples/cassandra/reconfigure/cassandra-reconfigure-update-topology-ops.yaml +++ b/docs/examples/cassandra/reconfigure/cassandra-reconfigure-update-topology-ops.yaml @@ -8,7 +8,6 @@ spec: databaseRef: name: cassandra-prod configuration: - configSecret: - name: new-cas-topology-custom-config + secretName: new-cas-topology-custom-config timeout: 5m apply: IfReady \ No newline at end of file diff --git a/docs/examples/cassandra/reconfigure/cassandra-topology.yaml b/docs/examples/cassandra/reconfigure/cassandra-topology.yaml index 8cd7c7d459..94cf4fec45 100644 --- a/docs/examples/cassandra/reconfigure/cassandra-topology.yaml +++ b/docs/examples/cassandra/reconfigure/cassandra-topology.yaml @@ -5,8 +5,6 @@ metadata: namespace: demo spec: version: 5.0.3 - configSecret: - name: cas-topology-custom-config topology: rack: - name: r0 @@ -29,4 +27,6 @@ spec: requests: storage: 1Gi storageType: Durable + configuration: + secretName: cas-topology-custom-config deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/cassandra/restart/cassandra.yaml b/docs/examples/cassandra/restart/cassandra.yaml index 9581706fe7..56dc65e9b1 100644 --- a/docs/examples/cassandra/restart/cassandra.yaml +++ b/docs/examples/cassandra/restart/cassandra.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: 5.0.3 - configSecret: - name: cas-configuration + configuration: + secretName: cas-configuration topology: rack: - name: r0 diff --git a/docs/examples/clickhouse/custom-config/ch-custom-config-cluster.yaml b/docs/examples/clickhouse/custom-config/ch-custom-config-cluster.yaml index 626b375ed3..40e24745a1 100644 --- a/docs/examples/clickhouse/custom-config/ch-custom-config-cluster.yaml +++ b/docs/examples/clickhouse/custom-config/ch-custom-config-cluster.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: 24.4.1 - configSecret: - name: ch-configuration + configuration: + secretName: ch-configuration clusterTopology: clickHouseKeeper: externallyManaged: false diff --git a/docs/examples/clickhouse/custom-config/ch-custom-config-standalone.yaml b/docs/examples/clickhouse/custom-config/ch-custom-config-standalone.yaml index e1c257925a..7354943749 100644 --- a/docs/examples/clickhouse/custom-config/ch-custom-config-standalone.yaml +++ b/docs/examples/clickhouse/custom-config/ch-custom-config-standalone.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: 24.4.1 - configSecret: - name: clickhouse-configuration + configuration: + secretName: clickhouse-configuration replicas: 1 storage: accessModes: diff --git a/docs/examples/clickhouse/reconfigure/ch-reconfigure-ops-with-secret.yaml b/docs/examples/clickhouse/reconfigure/ch-reconfigure-ops-with-secret.yaml index 0ab17669c7..8f9768550b 100644 --- a/docs/examples/clickhouse/reconfigure/ch-reconfigure-ops-with-secret.yaml +++ b/docs/examples/clickhouse/reconfigure/ch-reconfigure-ops-with-secret.yaml @@ -8,7 +8,6 @@ spec: databaseRef: name: clickhouse-prod configuration: - configSecret: - name: new-ch-custom-config + secretName: new-ch-custom-config timeout: 10m apply: IfReady \ No newline at end of file diff --git a/docs/examples/clickhouse/reconfigure/clickhouse-cluster.yaml b/docs/examples/clickhouse/reconfigure/clickhouse-cluster.yaml index 711436d593..18450504f1 100644 --- a/docs/examples/clickhouse/reconfigure/clickhouse-cluster.yaml +++ b/docs/examples/clickhouse/reconfigure/clickhouse-cluster.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: 24.4.1 - configSecret: - name: ch-custom-config + configuration: + secretName: ch-custom-config clusterTopology: clickHouseKeeper: externallyManaged: false diff --git a/docs/examples/clickhouse/reconfigure/clickhouse-standalone.yaml b/docs/examples/clickhouse/reconfigure/clickhouse-standalone.yaml index 502ece700e..e9e807baa8 100644 --- a/docs/examples/clickhouse/reconfigure/clickhouse-standalone.yaml +++ b/docs/examples/clickhouse/reconfigure/clickhouse-standalone.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: 24.4.1 - configSecret: - name: ch-custom-config + configuration: + secretName: ch-custom-config replicas: 1 storage: accessModes: diff --git a/docs/examples/druid/quickstart/druid-quickstart.yaml b/docs/examples/druid/quickstart/druid-quickstart.yaml index 9794578610..97149de2db 100644 --- a/docs/examples/druid/quickstart/druid-quickstart.yaml +++ b/docs/examples/druid/quickstart/druid-quickstart.yaml @@ -7,8 +7,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/examples/elasticsearch/custom-config/es-custom-with-topology.yaml b/docs/examples/elasticsearch/custom-config/es-custom-with-topology.yaml index 7e1141af87..c04a87ffa3 100644 --- a/docs/examples/elasticsearch/custom-config/es-custom-with-topology.yaml +++ b/docs/examples/elasticsearch/custom-config/es-custom-with-topology.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: xpack-8.11.1 - configSecret: - name: es-custom-config + configuration: + secretName: es-custom-config topology: master: suffix: master diff --git a/docs/examples/elasticsearch/custom-config/es-custom.yaml b/docs/examples/elasticsearch/custom-config/es-custom.yaml index 6c779f897f..25d4d4ad61 100644 --- a/docs/examples/elasticsearch/custom-config/es-custom.yaml +++ b/docs/examples/elasticsearch/custom-config/es-custom.yaml @@ -6,8 +6,8 @@ metadata: spec: version: xpack-8.11.1 replicas: 2 - configSecret: - name: es-custom-config + configuration: + secretName: es-custom-config storage: storageClassName: "standard" accessModes: diff --git a/docs/examples/elasticsearch/es-overview.yaml b/docs/examples/elasticsearch/es-overview.yaml index ce20a3c478..91c38fbb4c 100644 --- a/docs/examples/elasticsearch/es-overview.yaml +++ b/docs/examples/elasticsearch/es-overview.yaml @@ -29,8 +29,8 @@ spec: labels: app: kubedb interval: 10s - configSecret: - name: es-custom-config + configuration: + secretName: es-custom-config podTemplate: metadata: annotations: diff --git a/docs/examples/elasticsearch/kibana/es-kibana-demo.yaml b/docs/examples/elasticsearch/kibana/es-kibana-demo.yaml index c045204503..5eacb0c688 100644 --- a/docs/examples/elasticsearch/kibana/es-kibana-demo.yaml +++ b/docs/examples/elasticsearch/kibana/es-kibana-demo.yaml @@ -9,8 +9,8 @@ spec: authSecret: kind: Secret name: es-auth - configSecret: - name: es-custom-config + configuration: + secretName: es-custom-config storage: storageClassName: "standard" accessModes: diff --git a/docs/examples/elasticsearch/x-pack/es-mon-demo.yaml b/docs/examples/elasticsearch/x-pack/es-mon-demo.yaml index 38670ee6b3..c9d21039ba 100644 --- a/docs/examples/elasticsearch/x-pack/es-mon-demo.yaml +++ b/docs/examples/elasticsearch/x-pack/es-mon-demo.yaml @@ -9,8 +9,8 @@ spec: authSecret: kind: Secret name: es-auth - configSecret: - name: es-custom-config + configuration: + secretName: es-custom-config storage: storageClassName: "standard" accessModes: diff --git a/docs/examples/hazelcast/configuration/hazelcast-config.yaml b/docs/examples/hazelcast/configuration/hazelcast-config.yaml index 337135f2ff..6f8a79573c 100644 --- a/docs/examples/hazelcast/configuration/hazelcast-config.yaml +++ b/docs/examples/hazelcast/configuration/hazelcast-config.yaml @@ -8,8 +8,8 @@ spec: version: 5.5.2 licenseSecret: name: hz-license-key - configSecret: - name: hz + configuration: + secretName: hz storage: accessModes: - ReadWriteOnce diff --git a/docs/examples/ignite/custom-config/custom-ignite.yaml b/docs/examples/ignite/custom-config/custom-ignite.yaml index eb19f18c0a..14eaca91d4 100644 --- a/docs/examples/ignite/custom-config/custom-ignite.yaml +++ b/docs/examples/ignite/custom-config/custom-ignite.yaml @@ -6,8 +6,8 @@ metadata: spec: replicas: 3 version: 2.17.0 - configSecret: - name: ignite-configuration + configuration: + secretName: ignite-configuration storage: storageClassName: "standard" accessModes: diff --git a/docs/examples/kafka/reconfigure/kafka-reconfigure-update-combined.yaml b/docs/examples/kafka/reconfigure/kafka-reconfigure-update-combined.yaml index 9382a2b025..1ccfebc47a 100644 --- a/docs/examples/kafka/reconfigure/kafka-reconfigure-update-combined.yaml +++ b/docs/examples/kafka/reconfigure/kafka-reconfigure-update-combined.yaml @@ -8,7 +8,6 @@ spec: databaseRef: name: kafka-dev configuration: - configSecret: - name: new-kf-combined-custom-config + secretName: new-kf-combined-custom-config timeout: 5m apply: IfReady \ No newline at end of file diff --git a/docs/examples/kafka/reconfigure/kafka-reconfigure-update-topology.yaml b/docs/examples/kafka/reconfigure/kafka-reconfigure-update-topology.yaml index f4b9f5cc0d..daf2ff05ea 100644 --- a/docs/examples/kafka/reconfigure/kafka-reconfigure-update-topology.yaml +++ b/docs/examples/kafka/reconfigure/kafka-reconfigure-update-topology.yaml @@ -8,7 +8,6 @@ spec: databaseRef: name: kafka-prod configuration: - configSecret: - name: new-kf-topology-custom-config + secretName: new-kf-topology-custom-config timeout: 5m apply: IfReady \ No newline at end of file diff --git a/docs/examples/memcached/custom-config/custom-memcached.yaml b/docs/examples/memcached/custom-config/custom-memcached.yaml index 17c561c7ae..d57e3699ab 100644 --- a/docs/examples/memcached/custom-config/custom-memcached.yaml +++ b/docs/examples/memcached/custom-config/custom-memcached.yaml @@ -6,8 +6,8 @@ metadata: spec: replicas: 1 version: "1.6.22" - configSecret: - name: mc-configuration + configuration: + secretName: mc-configuration podTemplate: spec: containers: diff --git a/docs/examples/memcached/reconfigure/config-secret-reconfigure.yaml b/docs/examples/memcached/reconfigure/config-secret-reconfigure.yaml index cb72ce761c..fc0605e7ae 100644 --- a/docs/examples/memcached/reconfigure/config-secret-reconfigure.yaml +++ b/docs/examples/memcached/reconfigure/config-secret-reconfigure.yaml @@ -8,5 +8,4 @@ spec: databaseRef: name: memcd-quickstart configuration: - configSecret: - name: new-configuration + secretName: new-configuration diff --git a/docs/examples/memcached/reconfigure/memcached-config.yaml b/docs/examples/memcached/reconfigure/memcached-config.yaml index df88d5b121..31a8b8a0a1 100644 --- a/docs/examples/memcached/reconfigure/memcached-config.yaml +++ b/docs/examples/memcached/reconfigure/memcached-config.yaml @@ -6,6 +6,6 @@ metadata: spec: replicas: 1 version: "1.6.22" - configSecret: - name: mc-configuration + configuration: + secretName: mc-configuration deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/mongodb/configuration/demo-1.yaml b/docs/examples/mongodb/configuration/demo-1.yaml index c19959b922..f349369d81 100644 --- a/docs/examples/mongodb/configuration/demo-1.yaml +++ b/docs/examples/mongodb/configuration/demo-1.yaml @@ -13,5 +13,5 @@ spec: resources: requests: storage: 1Gi - configSecret: - name: mg-configuration + configuration: + secretName: mg-configuration diff --git a/docs/examples/mongodb/reconfigure/mg-replicaset-config.yaml b/docs/examples/mongodb/reconfigure/mg-replicaset-config.yaml index 6ab51dd5f8..1a40ae0dfb 100644 --- a/docs/examples/mongodb/reconfigure/mg-replicaset-config.yaml +++ b/docs/examples/mongodb/reconfigure/mg-replicaset-config.yaml @@ -16,5 +16,5 @@ spec: resources: requests: storage: 1Gi - configSecret: - name: mg-custom-config + configuration: + secretName: mg-custom-config diff --git a/docs/examples/mongodb/reconfigure/mg-shard-config.yaml b/docs/examples/mongodb/reconfigure/mg-shard-config.yaml index 34786ca117..54c705a4b1 100644 --- a/docs/examples/mongodb/reconfigure/mg-shard-config.yaml +++ b/docs/examples/mongodb/reconfigure/mg-shard-config.yaml @@ -8,8 +8,7 @@ spec: shardTopology: configServer: replicas: 3 - configSecret: - name: mg-custom-config + secretName: mg-custom-config storage: resources: requests: @@ -17,13 +16,11 @@ spec: storageClassName: standard mongos: replicas: 2 - configSecret: - name: mg-custom-config + secretName: mg-custom-config shard: replicas: 3 shards: 2 - configSecret: - name: mg-custom-config + secretName: mg-custom-config storage: resources: requests: diff --git a/docs/examples/mongodb/reconfigure/mg-standalone-config.yaml b/docs/examples/mongodb/reconfigure/mg-standalone-config.yaml index 89058b30dd..625c23b965 100644 --- a/docs/examples/mongodb/reconfigure/mg-standalone-config.yaml +++ b/docs/examples/mongodb/reconfigure/mg-standalone-config.yaml @@ -13,5 +13,5 @@ spec: resources: requests: storage: 1Gi - configSecret: - name: mg-custom-config + configuration: + secretName: mg-custom-config diff --git a/docs/examples/mongodb/reconfigure/mops-reconfigure-replicaset.yaml b/docs/examples/mongodb/reconfigure/mops-reconfigure-replicaset.yaml index 0392d2feb5..1b5f540cd7 100644 --- a/docs/examples/mongodb/reconfigure/mops-reconfigure-replicaset.yaml +++ b/docs/examples/mongodb/reconfigure/mops-reconfigure-replicaset.yaml @@ -9,5 +9,4 @@ spec: name: mg-replicaset configuration: replicaSet: - configSecret: - name: new-custom-config + secretName: new-custom-config diff --git a/docs/examples/mongodb/reconfigure/mops-reconfigure-shard.yaml b/docs/examples/mongodb/reconfigure/mops-reconfigure-shard.yaml index 285d686820..4f887543e0 100644 --- a/docs/examples/mongodb/reconfigure/mops-reconfigure-shard.yaml +++ b/docs/examples/mongodb/reconfigure/mops-reconfigure-shard.yaml @@ -9,11 +9,8 @@ spec: name: mg-sharding configuration: shard: - configSecret: - name: new-custom-config + secretName: new-custom-config configServer: - configSecret: - name: new-custom-config + secretName: new-custom-config mongos: - configSecret: - name: new-custom-config + secretName: new-custom-config diff --git a/docs/examples/mongodb/reconfigure/mops-reconfigure-standalone.yaml b/docs/examples/mongodb/reconfigure/mops-reconfigure-standalone.yaml index 3e0435f07c..b815976859 100644 --- a/docs/examples/mongodb/reconfigure/mops-reconfigure-standalone.yaml +++ b/docs/examples/mongodb/reconfigure/mops-reconfigure-standalone.yaml @@ -9,5 +9,4 @@ spec: name: mg-standalone configuration: standalone: - configSecret: - name: new-custom-config + secretName: new-custom-config diff --git a/docs/examples/mssqlserver/configuration/mssql-custom-config.yaml b/docs/examples/mssqlserver/configuration/mssql-custom-config.yaml index 2cf006c072..d6e31c48c8 100644 --- a/docs/examples/mssqlserver/configuration/mssql-custom-config.yaml +++ b/docs/examples/mssqlserver/configuration/mssql-custom-config.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: "2022-cu12" - configSecret: - name: ms-custom-config + configuration: + secretName: ms-custom-config replicas: 1 tls: issuerRef: diff --git a/docs/examples/mssqlserver/reconfigure/ms-standalone.yaml b/docs/examples/mssqlserver/reconfigure/ms-standalone.yaml index a6760b97d0..6beb636b65 100644 --- a/docs/examples/mssqlserver/reconfigure/ms-standalone.yaml +++ b/docs/examples/mssqlserver/reconfigure/ms-standalone.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: "2022-cu12" - configSecret: - name: ms-custom-config + configuration: + secretName: ms-custom-config replicas: 1 tls: issuerRef: diff --git a/docs/examples/mssqlserver/reconfigure/msops-reconfigure-ag.yaml b/docs/examples/mssqlserver/reconfigure/msops-reconfigure-ag.yaml index 86d2b8283d..acc94731fd 100644 --- a/docs/examples/mssqlserver/reconfigure/msops-reconfigure-ag.yaml +++ b/docs/examples/mssqlserver/reconfigure/msops-reconfigure-ag.yaml @@ -8,7 +8,6 @@ spec: databaseRef: name: mssqlserver-ag-cluster configuration: - configSecret: - name: new-custom-config + secretName: new-custom-config timeout: 5m apply: IfReady \ No newline at end of file diff --git a/docs/examples/mssqlserver/reconfigure/msops-reconfigure-standalone.yaml b/docs/examples/mssqlserver/reconfigure/msops-reconfigure-standalone.yaml index 5c0f52531a..562e78e941 100644 --- a/docs/examples/mssqlserver/reconfigure/msops-reconfigure-standalone.yaml +++ b/docs/examples/mssqlserver/reconfigure/msops-reconfigure-standalone.yaml @@ -8,7 +8,6 @@ spec: databaseRef: name: ms-standalone configuration: - configSecret: - name: new-custom-config + secretName: new-custom-config timeout: 5m apply: IfReady \ No newline at end of file diff --git a/docs/examples/mssqlserver/reconfigure/mssqlserver-ag-cluster.yaml b/docs/examples/mssqlserver/reconfigure/mssqlserver-ag-cluster.yaml index 8407bb6fcf..264b27f9d1 100644 --- a/docs/examples/mssqlserver/reconfigure/mssqlserver-ag-cluster.yaml +++ b/docs/examples/mssqlserver/reconfigure/mssqlserver-ag-cluster.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: "2022-cu12" - configSecret: - name: ms-custom-config + configuration: + secretName: ms-custom-config replicas: 3 topology: mode: AvailabilityGroup diff --git a/docs/examples/mysql/configuration/mysql-custom.yaml b/docs/examples/mysql/configuration/mysql-custom.yaml index 1625662199..4995361a5f 100644 --- a/docs/examples/mysql/configuration/mysql-custom.yaml +++ b/docs/examples/mysql/configuration/mysql-custom.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: "9.1.0" - configSecret: - name: my-custom-config + configuration: + secretName: my-custom-config storage: storageClassName: "standard" accessModes: diff --git a/docs/examples/pgbouncer/reconfigure/pbops-reconfigure.yaml b/docs/examples/pgbouncer/reconfigure/pbops-reconfigure.yaml index 705f385a0f..a267c805ed 100644 --- a/docs/examples/pgbouncer/reconfigure/pbops-reconfigure.yaml +++ b/docs/examples/pgbouncer/reconfigure/pbops-reconfigure.yaml @@ -9,7 +9,6 @@ spec: name: pb-custom configuration: pgbouncer: - configSecret: - name: new-custom-config + secretName: new-custom-config timeout: 5m apply: IfReady \ No newline at end of file diff --git a/docs/examples/pgpool/configuration/pgpool-config-file.yaml b/docs/examples/pgpool/configuration/pgpool-config-file.yaml index 946b09b890..284af8cde1 100644 --- a/docs/examples/pgpool/configuration/pgpool-config-file.yaml +++ b/docs/examples/pgpool/configuration/pgpool-config-file.yaml @@ -6,8 +6,8 @@ metadata: spec: version: "4.4.5" replicas: 1 - configSecret: - name: pp-configuration + configuration: + secretName: pp-configuration postgresRef: name: ha-postgres namespace: demo diff --git a/docs/examples/pgpool/quickstart/quick-postgres.yaml b/docs/examples/pgpool/quickstart/quick-postgres.yaml index 221e3485c6..bcf6ba82d2 100644 --- a/docs/examples/pgpool/quickstart/quick-postgres.yaml +++ b/docs/examples/pgpool/quickstart/quick-postgres.yaml @@ -6,8 +6,8 @@ metadata: spec: replicas: 3 version: "13.13" - configSecret: - name: pg-configuration + configuration: + secretName: pg-configuration storageType: Durable storage: storageClassName: "standard" diff --git a/docs/examples/pgpool/reconfiguration/pp-custom-config.yaml b/docs/examples/pgpool/reconfiguration/pp-custom-config.yaml index 683eed426c..72a0edc4c2 100644 --- a/docs/examples/pgpool/reconfiguration/pp-custom-config.yaml +++ b/docs/examples/pgpool/reconfiguration/pp-custom-config.yaml @@ -6,8 +6,8 @@ metadata: spec: version: "4.5.0" replicas: 1 - configSecret: - name: pp-custom-config + configuration: + secretName: pp-custom-config postgresRef: name: ha-postgres namespace: demo diff --git a/docs/examples/pgpool/reconfiguration/ppops-reconfigure.yaml b/docs/examples/pgpool/reconfiguration/ppops-reconfigure.yaml index 521ab406f8..49f1e14162 100644 --- a/docs/examples/pgpool/reconfiguration/ppops-reconfigure.yaml +++ b/docs/examples/pgpool/reconfiguration/ppops-reconfigure.yaml @@ -8,7 +8,6 @@ spec: databaseRef: name: pp-custom configuration: - configSecret: - name: new-custom-config + secretName: new-custom-config timeout: 5m apply: IfReady \ No newline at end of file diff --git a/docs/examples/postgres/custom-config/pg-custom-config.yaml b/docs/examples/postgres/custom-config/pg-custom-config.yaml index 5c32cfb80f..636a68c53f 100644 --- a/docs/examples/postgres/custom-config/pg-custom-config.yaml +++ b/docs/examples/postgres/custom-config/pg-custom-config.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: "13.13" - configSecret: - name: pg-custom-config + configuration: + secretName: pg-custom-config storage: storageClassName: "standard" accessModes: diff --git a/docs/examples/postgres/pg-overview.yaml b/docs/examples/postgres/pg-overview.yaml index 2acedf4b97..ccc7d9238a 100644 --- a/docs/examples/postgres/pg-overview.yaml +++ b/docs/examples/postgres/pg-overview.yaml @@ -29,9 +29,8 @@ spec: serviceMonitor: labels: app: kubedb - interval: 10s - configSecret: - name: pg-custom-config + interval configuration: + secretName: pg-custom-config podTemplate: metadata: annotations: diff --git a/docs/examples/postgres/reconfigure/ha-postgres.yaml b/docs/examples/postgres/reconfigure/ha-postgres.yaml index 3fc78acc80..d2793ab35e 100644 --- a/docs/examples/postgres/reconfigure/ha-postgres.yaml +++ b/docs/examples/postgres/reconfigure/ha-postgres.yaml @@ -5,9 +5,8 @@ metadata: namespace: demo spec: version: "16.1" - replicas: 3 - configSecret: - name: pg-configuration + replicas: configuration: + secretName: pg-configuration storageType: Durable storage: storageClassName: "standard" diff --git a/docs/examples/postgres/reconfigure/reconfigure-using-secret.yaml b/docs/examples/postgres/reconfigure/reconfigure-using-secret.yaml index 3567434bb8..93ad9d6296 100644 --- a/docs/examples/postgres/reconfigure/reconfigure-using-secret.yaml +++ b/docs/examples/postgres/reconfigure/reconfigure-using-secret.yaml @@ -7,6 +7,5 @@ spec: type: Reconfigure databaseRef: name: ha-postgres - configuration: - configSecret: - name: new-pg-configuration + configuration: + secretName: new-pg-configuration diff --git a/docs/examples/proxysql/custom-proxysql.yaml b/docs/examples/proxysql/custom-proxysql.yaml index 8baa877e83..d881865b36 100644 --- a/docs/examples/proxysql/custom-proxysql.yaml +++ b/docs/examples/proxysql/custom-proxysql.yaml @@ -8,5 +8,5 @@ spec: replicas: 1 backend: name: my-group - configSecret: - name: my-custom-config + configuration: + secretName: my-custom-config diff --git a/docs/examples/rabbitmq/cluster/rabbit-custom-config.yaml b/docs/examples/rabbitmq/cluster/rabbit-custom-config.yaml index be2241531d..ee459ad216 100644 --- a/docs/examples/rabbitmq/cluster/rabbit-custom-config.yaml +++ b/docs/examples/rabbitmq/cluster/rabbit-custom-config.yaml @@ -13,5 +13,5 @@ spec: resources: requests: storage: 1Gi - configSecret: - name: rabbit-custom-config \ No newline at end of file + configuration: + secretName: rabbit-custom-config diff --git a/docs/examples/rabbitmq/opsrequests/rabbit-reconfigure-with-secret.yaml b/docs/examples/rabbitmq/opsrequests/rabbit-reconfigure-with-secret.yaml index ee47631df1..7d32007ec9 100644 --- a/docs/examples/rabbitmq/opsrequests/rabbit-reconfigure-with-secret.yaml +++ b/docs/examples/rabbitmq/opsrequests/rabbit-reconfigure-with-secret.yaml @@ -8,7 +8,6 @@ spec: databaseRef: name: rm-cluster configuration: - configSecret: - name: new-custom-config + secretName: new-custom-config timeout: 5m apply: IfReady \ No newline at end of file diff --git a/docs/examples/redis/custom-config/redis-custom.yaml b/docs/examples/redis/custom-config/redis-custom.yaml index 9fa206dcbb..1625b56fd8 100644 --- a/docs/examples/redis/custom-config/redis-custom.yaml +++ b/docs/examples/redis/custom-config/redis-custom.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: 6.2.14 - configSecret: - name: rd-configuration + configuration: + secretName: rd-configuration storage: storageClassName: "standard" accessModes: diff --git a/docs/examples/redis/custom-config/valkey-custom.yaml b/docs/examples/redis/custom-config/valkey-custom.yaml index 3cd261191d..224dcbff5f 100644 --- a/docs/examples/redis/custom-config/valkey-custom.yaml +++ b/docs/examples/redis/custom-config/valkey-custom.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: valkey-8.1.1 - configSecret: - name: rd-configuration + configuration: + secretName: rd-configuration storage: storageClassName: "standard" accessModes: diff --git a/docs/examples/redis/reconfigure/rdops-reconfigure.yaml b/docs/examples/redis/reconfigure/rdops-reconfigure.yaml index 6dbc557238..c5aa29bc38 100644 --- a/docs/examples/redis/reconfigure/rdops-reconfigure.yaml +++ b/docs/examples/redis/reconfigure/rdops-reconfigure.yaml @@ -8,5 +8,4 @@ spec: databaseRef: name: sample-redis configuration: - configSecret: - name: new-custom-config + secretName: new-custom-config diff --git a/docs/examples/redis/reconfigure/sample-redis-config.yaml b/docs/examples/redis/reconfigure/sample-redis-config.yaml index bb12ddfa3a..d63e7f84e3 100644 --- a/docs/examples/redis/reconfigure/sample-redis-config.yaml +++ b/docs/examples/redis/reconfigure/sample-redis-config.yaml @@ -13,5 +13,5 @@ spec: resources: requests: storage: 1Gi - configSecret: - name: rd-custom-config + configuration: + secretName: rd-custom-config diff --git a/docs/examples/solr/configuration/solr-combined.yaml b/docs/examples/solr/configuration/solr-combined.yaml index f73db241f6..03c46f37fc 100644 --- a/docs/examples/solr/configuration/solr-combined.yaml +++ b/docs/examples/solr/configuration/solr-combined.yaml @@ -4,8 +4,8 @@ metadata: name: solr namespace: demo spec: - configSecret: - name: sl-combined-custom-config + configuration: + secretName: sl-combined-custom-config version: 9.6.1 replicas: 2 zookeeperRef: diff --git a/docs/examples/solr/reconfigure/sl-reconfigure-custom-config.yaml b/docs/examples/solr/reconfigure/sl-reconfigure-custom-config.yaml index 4118884c98..01227c3921 100644 --- a/docs/examples/solr/reconfigure/sl-reconfigure-custom-config.yaml +++ b/docs/examples/solr/reconfigure/sl-reconfigure-custom-config.yaml @@ -6,8 +6,7 @@ metadata: spec: apply: IfReady configuration: - configSecret: - name: sl-combined-custom-config + secretName: sl-combined-custom-config databaseRef: name: solr type: Reconfigure \ No newline at end of file diff --git a/docs/examples/solr/reconfigure/solr-combined.yaml b/docs/examples/solr/reconfigure/solr-combined.yaml index f73db241f6..03c46f37fc 100644 --- a/docs/examples/solr/reconfigure/solr-combined.yaml +++ b/docs/examples/solr/reconfigure/solr-combined.yaml @@ -4,8 +4,8 @@ metadata: name: solr namespace: demo spec: - configSecret: - name: sl-combined-custom-config + configuration: + secretName: sl-combined-custom-config version: 9.6.1 replicas: 2 zookeeperRef: diff --git a/docs/examples/zookeeper/reconfiguration/sample-zk-configuration.yaml b/docs/examples/zookeeper/reconfiguration/sample-zk-configuration.yaml index 0f929f28ff..723ebc4d41 100644 --- a/docs/examples/zookeeper/reconfiguration/sample-zk-configuration.yaml +++ b/docs/examples/zookeeper/reconfiguration/sample-zk-configuration.yaml @@ -7,8 +7,8 @@ spec: version: "3.8.3" adminServerPort: 8080 replicas: 3 - configSecret: - name: zk-configuration + configuration: + secretName: zk-configuration storage: resources: requests: diff --git a/docs/examples/zookeeper/reconfiguration/zkops-reconfiguration.yaml b/docs/examples/zookeeper/reconfiguration/zkops-reconfiguration.yaml index 844046ce08..daa3151e7b 100644 --- a/docs/examples/zookeeper/reconfiguration/zkops-reconfiguration.yaml +++ b/docs/examples/zookeeper/reconfiguration/zkops-reconfiguration.yaml @@ -8,5 +8,4 @@ spec: databaseRef: name: zk-quickstart configuration: - configSecret: - name: zk-new-configuration \ No newline at end of file + secretName: zk-new-configuration diff --git a/docs/guides/druid/backup/auto-backup/examples/sample-druid.yaml b/docs/guides/druid/backup/auto-backup/examples/sample-druid.yaml index 90090e9907..3cb45ef516 100644 --- a/docs/guides/druid/backup/auto-backup/examples/sample-druid.yaml +++ b/docs/guides/druid/backup/auto-backup/examples/sample-druid.yaml @@ -10,8 +10,8 @@ spec: version: 30.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: configsecret-combined topology: routers: replicas: 1 diff --git a/docs/guides/druid/clustering/guide/index.md b/docs/guides/druid/clustering/guide/index.md index de26f199df..ae80807d36 100644 --- a/docs/guides/druid/clustering/guide/index.md +++ b/docs/guides/druid/clustering/guide/index.md @@ -98,8 +98,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 @@ -148,7 +148,7 @@ Metadata: .: f:deepStorage: .: - f:configSecret: + f:configuration: f:type: f:deletionPolicy: f:healthChecker: @@ -520,7 +520,7 @@ Metadata: .: f:deepStorage: .: - f:configSecret: + f:configuration: f:type: f:deletionPolicy: f:healthChecker: diff --git a/docs/guides/druid/clustering/guide/yamls/druid-cluster.yaml b/docs/guides/druid/clustering/guide/yamls/druid-cluster.yaml index 7a89d0dc91..1b37cfe269 100644 --- a/docs/guides/druid/clustering/guide/yamls/druid-cluster.yaml +++ b/docs/guides/druid/clustering/guide/yamls/druid-cluster.yaml @@ -7,8 +7,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/concepts/druid.md b/docs/guides/druid/concepts/druid.md index d8728b5e5a..9ededa0456 100644 --- a/docs/guides/druid/concepts/druid.md +++ b/docs/guides/druid/concepts/druid.md @@ -31,8 +31,8 @@ metadata: spec: deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config metadataStorage: type: PostgreSQL name: pg-demo diff --git a/docs/guides/druid/concepts/druidopsrequest.md b/docs/guides/druid/concepts/druidopsrequest.md index 6a846814f2..455b06c124 100644 --- a/docs/guides/druid/concepts/druidopsrequest.md +++ b/docs/guides/druid/concepts/druidopsrequest.md @@ -159,8 +159,7 @@ spec: databaseRef: name: druid-prod configuration: - configSecret: - name: new-configsecret + secretName: new-configsecret status: conditions: - lastTransitionTime: "2024-07-25T18:22:38Z" @@ -420,54 +419,4 @@ Use IfReady, if you want to process the opsRequest only when the database is Rea | Denied | KubeDB has denied the operation requested in the DruidOpsRequest | | Skipped | KubeDB has skipped the operation requested in the DruidOpsRequest | -Important: Ops-manager Operator can skip an opsRequest, only if its execution has not been started yet & there is a newer opsRequest applied in the cluster. `spec.type` has to be same as the skipped one, in this case. - -### status.observedGeneration - -`status.observedGeneration` shows the most recent generation observed by the `DruidOpsRequest` controller. - -### status.conditions - -`status.conditions` is an array that specifies the conditions of different steps of `DruidOpsRequest` processing. Each condition entry has the following fields: - -- `types` specifies the type of the condition. DruidOpsRequest has the following types of conditions: - -| Type | Meaning | -|-------------------------------|---------------------------------------------------------------------------| -| `Progressing` | Specifies that the operation is now in the progressing state | -| `Successful` | Specifies such a state that the operation on the database was successful. | -| `HaltDatabase` | Specifies such a state that the database is halted by the operator | -| `ResumeDatabase` | Specifies such a state that the database is resumed by the operator | -| `Failed` | Specifies such a state that the operation on the database failed. | -| `StartingBalancer` | Specifies such a state that the balancer has successfully started | -| `StoppingBalancer` | Specifies such a state that the balancer has successfully stopped | -| `UpdateShardImage` | Specifies such a state that the Shard Images has been updated | -| `UpdateReplicaSetImage` | Specifies such a state that the Replicaset Image has been updated | -| `UpdateConfigServerImage` | Specifies such a state that the ConfigServer Image has been updated | -| `UpdateMongosImage` | Specifies such a state that the Mongos Image has been updated | -| `UpdatePetSetResources` | Specifies such a state that the Petset resources has been updated | -| `UpdateShardResources` | Specifies such a state that the Shard resources has been updated | -| `UpdateReplicaSetResources` | Specifies such a state that the Replicaset resources has been updated | -| `UpdateConfigServerResources` | Specifies such a state that the ConfigServer resources has been updated | -| `UpdateMongosResources` | Specifies such a state that the Mongos resources has been updated | -| `ScaleDownReplicaSet` | Specifies such a state that the scale down operation of replicaset | -| `ScaleUpReplicaSet` | Specifies such a state that the scale up operation of replicaset | -| `ScaleUpShardReplicas` | Specifies such a state that the scale up operation of shard replicas | -| `ScaleDownShardReplicas` | Specifies such a state that the scale down operation of shard replicas | -| `ScaleDownConfigServer` | Specifies such a state that the scale down operation of config server | -| `ScaleUpConfigServer` | Specifies such a state that the scale up operation of config server | -| `ScaleMongos` | Specifies such a state that the scale down operation of replicaset | -| `VolumeExpansion` | Specifies such a state that the volume expansion operaton of the database | -| `ReconfigureReplicaset` | Specifies such a state that the reconfiguration of replicaset nodes | -| `ReconfigureMongos` | Specifies such a state that the reconfiguration of mongos nodes | -| `ReconfigureShard` | Specifies such a state that the reconfiguration of shard nodes | -| `ReconfigureConfigServer` | Specifies such a state that the reconfiguration of config server nodes | - -- The `status` field is a string, with possible values `True`, `False`, and `Unknown`. - - `status` will be `True` if the current transition succeeded. - - `status` will be `False` if the current transition failed. - - `status` will be `Unknown` if the current transition was denied. -- The `message` field is a human-readable message indicating details about the condition. -- The `reason` field is a unique, one-word, CamelCase reason for the condition's last transition. -- The `lastTransitionTime` field provides a timestamp for when the operation last transitioned from one state to another. -- The `observedGeneration` shows the most recent condition transition generation observed by the controller. +Important: Ops-manager Operator can skip an opsRequest, only if an diff --git a/docs/guides/druid/configuration/config-file/index.md b/docs/guides/druid/configuration/config-file/index.md index c046161185..e65e6a3875 100644 --- a/docs/guides/druid/configuration/config-file/index.md +++ b/docs/guides/druid/configuration/config-file/index.md @@ -155,12 +155,12 @@ metadata: namespace: demo spec: version: 28.0.1 - configSecret: - name: config-secret + configuration: + secretName: config-secret deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 @@ -281,4 +281,3 @@ $ kubectl delete namespace demo [//]: # (- Monitor your Druid database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/druid/monitoring/using-prometheus-operator.md).) - Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). - diff --git a/docs/guides/druid/configuration/podtemplating/index.md b/docs/guides/druid/configuration/podtemplating/index.md index 6b4a64c0de..8315664fef 100644 --- a/docs/guides/druid/configuration/podtemplating/index.md +++ b/docs/guides/druid/configuration/podtemplating/index.md @@ -121,12 +121,12 @@ metadata: namespace: demo spec: version: 28.0.1 - configSecret: - name: config-secret + configuration: + secretName: config-secret deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: coordinators: replicas: 1 @@ -272,8 +272,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 @@ -367,8 +367,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 @@ -518,8 +518,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: podTemplate: diff --git a/docs/guides/druid/failover/guide.md b/docs/guides/druid/failover/guide.md index 030e1d676b..5207f7a6f7 100644 --- a/docs/guides/druid/failover/guide.md +++ b/docs/guides/druid/failover/guide.md @@ -155,8 +155,8 @@ spec: deletionPolicy: Delete deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: coordinators: replicas: 2 @@ -543,4 +543,3 @@ $ kubectl delete ns demo - Monitor your Druid cluster with [Prometheus integration](/docs/guides/druid/monitoring/using-builtin-prometheus.md). - Explore Druid [configuration options](/docs/guides/druid/configuration/_index.md). - Contribute to KubeDB: [contribution guidelines](/docs/CONTRIBUTING.md). - diff --git a/docs/guides/druid/monitoring/overview.md b/docs/guides/druid/monitoring/overview.md index 752599f2e8..24513f0fa3 100644 --- a/docs/guides/druid/monitoring/overview.md +++ b/docs/guides/druid/monitoring/overview.md @@ -56,8 +56,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/monitoring/using-prometheus-operator.md b/docs/guides/druid/monitoring/using-prometheus-operator.md index 1b7ef85979..746de835d2 100644 --- a/docs/guides/druid/monitoring/using-prometheus-operator.md +++ b/docs/guides/druid/monitoring/using-prometheus-operator.md @@ -163,8 +163,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/monitoring/yamls/druid-monitoring-builtin.yaml b/docs/guides/druid/monitoring/yamls/druid-monitoring-builtin.yaml index 4962c3c536..8cbb26858d 100644 --- a/docs/guides/druid/monitoring/yamls/druid-monitoring-builtin.yaml +++ b/docs/guides/druid/monitoring/yamls/druid-monitoring-builtin.yaml @@ -7,8 +7,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/monitoring/yamls/druid-with-monitoring.yaml b/docs/guides/druid/monitoring/yamls/druid-with-monitoring.yaml index aa91054f8f..796afe8b44 100644 --- a/docs/guides/druid/monitoring/yamls/druid-with-monitoring.yaml +++ b/docs/guides/druid/monitoring/yamls/druid-with-monitoring.yaml @@ -7,8 +7,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 @@ -20,4 +20,3 @@ spec: release: prometheus interval: 10s deletionPolicy: WipeOut - diff --git a/docs/guides/druid/quickstart/guide/index.md b/docs/guides/druid/quickstart/guide/index.md index 48fb3a8d7a..83c5a1720a 100644 --- a/docs/guides/druid/quickstart/guide/index.md +++ b/docs/guides/druid/quickstart/guide/index.md @@ -168,8 +168,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 @@ -650,7 +650,7 @@ Now, the Druid cluster is accessible at `localhost:8888`. Let's check the [Servi $ curl "http://localhost:8888/status/health" true ``` -From the retrieved health information above, we can see that our Druid cluster’s status is `true`, indicating that the service can receive API calls and is healthy. In the same way it possible to check the health of other druid nodes by port-forwarding the appropriate services. +From the retrieved health information above, we can see that our Druid cluster’s status is `true`, indicating that the service can receive API calls and is healthy. In the same way it is possible to check the health of other druid nodes by port-forwarding the appropriate services. ### Access the web console diff --git a/docs/guides/druid/reconfigure-tls/guide.md b/docs/guides/druid/reconfigure-tls/guide.md index 5cd0080298..e61b8aae02 100644 --- a/docs/guides/druid/reconfigure-tls/guide.md +++ b/docs/guides/druid/reconfigure-tls/guide.md @@ -51,8 +51,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 @@ -492,11 +492,10 @@ Events: Normal UpdatePetSets 85s KubeDB Ops-manager Operator successfully reconciled the Druid with tls configuration Warning get pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 79s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 Warning evict pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 79s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 - Warning check pod running; ConditionStatus:False; PodName:druid-cluster-historicals-0 74s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:druid-cluster-historicals-0 - Warning check pod running; ConditionStatus:True; PodName:druid-cluster-historicals-0 69s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-cluster-historicals-0 + Warning check pod running; ConditionStatus:True; PodName:druid-cluster-historicals-0 74s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-cluster-historicals-0 Warning get pod; ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 64s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 Warning evict pod; ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 64s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 - Warning check pod running; ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 59s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 + Warning check pod running: ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 59s KubeDB Ops-manager Operator check pod running: ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 Warning get pod; ConditionStatus:True; PodName:druid-cluster-brokers-0 54s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-brokers-0 Warning evict pod; ConditionStatus:True; PodName:druid-cluster-brokers-0 54s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-cluster-brokers-0 Warning check pod running; ConditionStatus:True; PodName:druid-cluster-brokers-0 49s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-cluster-brokers-0 @@ -877,10 +876,10 @@ Events: Normal CertificateSynced 93s KubeDB Ops-manager Operator Successfully synced all certificates Warning get certificate; ConditionStatus:True 88s KubeDB Ops-manager Operator get certificate; ConditionStatus:True Warning check ready condition; ConditionStatus:True 88s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True - Warning issuing condition; ConditionStatus:True 88s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Warning issuing condition: ConditionStatus:True 88s KubeDB Ops-manager Operator issuing condition: ConditionStatus:True Warning get certificate; ConditionStatus:True 88s KubeDB Ops-manager Operator get certificate; ConditionStatus:True Warning check ready condition; ConditionStatus:True 88s KubeDB Ops-manager Operator check ready condition; ConditionStatus:True - Warning issuing condition; ConditionStatus:True 88s KubeDB Ops-manager Operator issuing condition; ConditionStatus:True + Warning issuing condition; ConditionStatus:True 88s KubeDB Ops-manager Operator issuing condition: ConditionStatus:True Normal CertificateSynced 87s KubeDB Ops-manager Operator Successfully synced all certificates Normal UpdatePetSets 82s KubeDB Ops-manager Operator successfully reconciled the Druid with tls configuration Warning get pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 77s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 @@ -1169,7 +1168,7 @@ Status: Status: True Type: CheckPodRunning--druid-cluster-routers-0 Last Transition Time: 2024-10-28T14:25:25Z - Message: get pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + Message: get pod: ConditionStatus:True; PodName:druid-cluster-coordinators-0 Observed Generation: 1 Status: True Type: GetPod--druid-cluster-coordinators-0 @@ -1232,7 +1231,7 @@ Events: Normal Successful 19s KubeDB Ops-manager Operator Successfully resumed Druid database: demo/druid-cluster for DruidOpsRequest: drops-update-issuer ``` -Now, Lets exec into a druid node and find out the ca subject to see if it matches the one we have provided. +Now, let's exec into a druid node and find out the ca subject to see if it matches the one we have provided. ```bash $ kubectl exec -it druid-cluster-broker-0 -- bash @@ -1474,10 +1473,10 @@ Events: Warning get pod; ConditionStatus:True; PodName:druid-cluster-brokers-0 98s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-brokers-0 Warning evict pod; ConditionStatus:True; PodName:druid-cluster-brokers-0 98s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-cluster-brokers-0 Warning check pod running; ConditionStatus:True; PodName:druid-cluster-brokers-0 93s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-cluster-brokers-0 - Warning get pod; ConditionStatus:True; PodName:druid-cluster-routers-0 88s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-routers-0 + Warning get pod: ConditionStatus:True; PodName:druid-cluster-routers-0 88s KubeDB Ops-manager Operator get pod: ConditionStatus:True; PodName:druid-cluster-routers-0 Warning evict pod; ConditionStatus:True; PodName:druid-cluster-routers-0 88s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-cluster-routers-0 Warning check pod running; ConditionStatus:True; PodName:druid-cluster-routers-0 83s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-cluster-routers-0 - Warning get pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 78s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + Warning get pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 78s KubeDB Ops-manager Operator get pod: ConditionStatus:True; PodName:druid-cluster-coordinators-0 Warning evict pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 78s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 Warning check pod running; ConditionStatus:True; PodName:druid-cluster-coordinators-0 73s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-cluster-coordinators-0 Warning get pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 68s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 @@ -1486,13 +1485,13 @@ Events: Warning get pod; ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 58s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 Warning evict pod; ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 58s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 Warning check pod running; ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 53s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 - Warning get pod; ConditionStatus:True; PodName:druid-cluster-brokers-0 48s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-brokers-0 + Warning get pod; ConditionStatus:True; PodName:druid-cluster-brokers-0 48s KubeDB Ops-manager Operator get pod: ConditionStatus:True; PodName:druid-cluster-brokers-0 Warning evict pod; ConditionStatus:True; PodName:druid-cluster-brokers-0 48s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-cluster-brokers-0 Warning check pod running; ConditionStatus:True; PodName:druid-cluster-brokers-0 43s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-cluster-brokers-0 - Warning get pod; ConditionStatus:True; PodName:druid-cluster-routers-0 38s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-routers-0 + Warning get pod: ConditionStatus:True; PodName:druid-cluster-routers-0 38s KubeDB Ops-manager Operator get pod: ConditionStatus:True; PodName:druid-cluster-routers-0 Warning evict pod; ConditionStatus:True; PodName:druid-cluster-routers-0 38s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-cluster-routers-0 Warning check pod running; ConditionStatus:True; PodName:druid-cluster-routers-0 33s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-cluster-routers-0 - Warning get pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 28s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + Warning get pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 28s KubeDB Ops-manager Operator get pod: ConditionStatus:True; PodName:druid-cluster-coordinators-0 Warning evict pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 28s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 Warning check pod running; ConditionStatus:True; PodName:druid-cluster-coordinators-0 23s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-cluster-coordinators-0 Normal RestartNodes 18s KubeDB Ops-manager Operator Successfully restarted all nodes @@ -1500,7 +1499,7 @@ Events: Normal Successful 18s KubeDB Ops-manager Operator Successfully resumed Druid database: demo/druid-cluster for DruidOpsRequest: drops-remove ``` -Now, Let's exec into one of the broker node and find out that TLS is disabled or not. +Now, Lets exec into one of the broker node and find out that TLS is disabled or not. ```bash $$ kubectl exec -it -n demo druid-cluster-broker-0 -- druid-configs.sh --bootstrap-server localhost:9092 --command-config /opt/druid/config/clientauth.properties --describe --entity-type brokers --all | grep 'ssl.keystore' diff --git a/docs/guides/druid/reconfigure-tls/yamls/druid-cluster.yaml b/docs/guides/druid/reconfigure-tls/yamls/druid-cluster.yaml index 6351c2ddda..1b37cfe269 100644 --- a/docs/guides/druid/reconfigure-tls/yamls/druid-cluster.yaml +++ b/docs/guides/druid/reconfigure-tls/yamls/druid-cluster.yaml @@ -7,10 +7,9 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 deletionPolicy: Delete - diff --git a/docs/guides/druid/reconfigure/guide.md b/docs/guides/druid/reconfigure/guide.md index cf85960a9c..88f5847a76 100644 --- a/docs/guides/druid/reconfigure/guide.md +++ b/docs/guides/druid/reconfigure/guide.md @@ -104,8 +104,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/reconfigure/yamls/druid-cluster.yaml b/docs/guides/druid/reconfigure/yamls/druid-cluster.yaml index f7a695b062..1b37cfe269 100644 --- a/docs/guides/druid/reconfigure/yamls/druid-cluster.yaml +++ b/docs/guides/druid/reconfigure/yamls/druid-cluster.yaml @@ -7,9 +7,9 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 - deletionPolicy: WipeOut + deletionPolicy: Delete diff --git a/docs/guides/druid/reconfigure/yamls/reconfigure-druid-ops.yaml b/docs/guides/druid/reconfigure/yamls/reconfigure-druid-ops.yaml index cc5f789a54..477bcfd12b 100644 --- a/docs/guides/druid/reconfigure/yamls/reconfigure-druid-ops.yaml +++ b/docs/guides/druid/reconfigure/yamls/reconfigure-druid-ops.yaml @@ -8,5 +8,4 @@ spec: databaseRef: name: druid-cluster configuration: - configSecret: - name: new-config \ No newline at end of file + secretName: new-config diff --git a/docs/guides/druid/rotate-auth/guide.md b/docs/guides/druid/rotate-auth/guide.md index 9955a5bbd0..5a4889ab48 100644 --- a/docs/guides/druid/rotate-auth/guide.md +++ b/docs/guides/druid/rotate-auth/guide.md @@ -100,8 +100,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/tls/guide.md b/docs/guides/druid/tls/guide.md index ead99ecbd5..f288518abf 100644 --- a/docs/guides/druid/tls/guide.md +++ b/docs/guides/druid/tls/guide.md @@ -157,8 +157,8 @@ spec: name: druid-ca-issuer deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/tls/yamls/druid-cluster-tls.yaml b/docs/guides/druid/tls/yamls/druid-cluster-tls.yaml index 902b5b36d4..ceacfde3ce 100644 --- a/docs/guides/druid/tls/yamls/druid-cluster-tls.yaml +++ b/docs/guides/druid/tls/yamls/druid-cluster-tls.yaml @@ -13,8 +13,8 @@ spec: name: druid-ca-issuer deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/update-version/guide.md b/docs/guides/druid/update-version/guide.md index 2ff20d7fee..338f203641 100644 --- a/docs/guides/druid/update-version/guide.md +++ b/docs/guides/druid/update-version/guide.md @@ -102,8 +102,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/update-version/yamls/druid-cluster.yaml b/docs/guides/druid/update-version/yamls/druid-cluster.yaml index 7a89d0dc91..1b37cfe269 100644 --- a/docs/guides/druid/update-version/yamls/druid-cluster.yaml +++ b/docs/guides/druid/update-version/yamls/druid-cluster.yaml @@ -7,8 +7,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/volume-expansion/guide.md b/docs/guides/druid/volume-expansion/guide.md index d9a110aa18..4b4f41509a 100644 --- a/docs/guides/druid/volume-expansion/guide.md +++ b/docs/guides/druid/volume-expansion/guide.md @@ -121,8 +121,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: historicals: replicas: 1 @@ -437,62 +437,4 @@ Events: Warning is pvc patched; ConditionStatus:True 8m59s KubeDB Ops-manager Operator is pvc patched; ConditionStatus:True Warning compare storage; ConditionStatus:False 8m59s KubeDB Ops-manager Operator compare storage; ConditionStatus:False Warning get pod; ConditionStatus:True 8m54s KubeDB Ops-manager Operator get pod; ConditionStatus:True - Warning get pvc; ConditionStatus:True 8m54s KubeDB Ops-manager Operator get pvc; ConditionStatus:True - Warning get pod; ConditionStatus:True 8m49s KubeDB Ops-manager Operator get pod; ConditionStatus:True - Warning get pvc; ConditionStatus:True 8m49s KubeDB Ops-manager Operator get pvc; ConditionStatus:True - Warning get pod; ConditionStatus:True 8m44s KubeDB Ops-manager Operator get pod; ConditionStatus:True - Warning get pvc; ConditionStatus:True 8m44s KubeDB Ops-manager Operator get pvc; ConditionStatus:True - Warning get pod; ConditionStatus:True 8m39s KubeDB Ops-manager Operator get pod; ConditionStatus:True - Warning get pvc; ConditionStatus:True 8m39s KubeDB Ops-manager Operator get pvc; ConditionStatus:True - Warning compare storage; ConditionStatus:True 8m39s KubeDB Ops-manager Operator compare storage; ConditionStatus:True - Warning create; ConditionStatus:True 8m39s KubeDB Ops-manager Operator create; ConditionStatus:True - Warning is ops req patched; ConditionStatus:True 8m39s KubeDB Ops-manager Operator is ops req patched; ConditionStatus:True - Warning get pod; ConditionStatus:True 8m34s KubeDB Ops-manager Operator get pod; ConditionStatus:True - Warning is druid running; ConditionStatus:False 8m31s KubeDB Ops-manager Operator is druid running; ConditionStatus:False - Warning get pod; ConditionStatus:True 8m29s KubeDB Ops-manager Operator get pod; ConditionStatus:True - Warning get pod; ConditionStatus:True 8m24s KubeDB Ops-manager Operator get pod; ConditionStatus:True - Warning get pod; ConditionStatus:True 8m19s KubeDB Ops-manager Operator get pod; ConditionStatus:True - Normal UpdateHistoricalsNodePVCs 8m14s KubeDB Ops-manager Operator successfully updated historicals node PVC sizes - Normal UpdatePetSets 7m59s KubeDB Ops-manager Operator successfully reconciled the Druid resources - Warning get pet set; ConditionStatus:True 7m54s KubeDB Ops-manager Operator get pet set; ConditionStatus:True - Warning get pet set; ConditionStatus:True 7m54s KubeDB Ops-manager Operator get pet set; ConditionStatus:True - Normal ReadyPetSets 7m54s KubeDB Ops-manager Operator PetSet is recreated - Normal Starting 7m54s KubeDB Ops-manager Operator Resuming Druid database: demo/druid-cluster - Normal Successful 7m54s KubeDB Ops-manager Operator Successfully resumed Druid database: demo/druid-cluster for DruidOpsRequest: dr-volume-exp -``` - -Now, we are going to verify from the `Petset`, and the `Persistent Volumes` whether the volume of the database has expanded to meet the desired state, Let's check, - -```bash -$ kubectl get petset -n demo druid-cluster-historicals -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' -"3Gi" - -$ kubectl get petset -n demo druid-cluster-middleManagers -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' -"2Gi" - -$ kubectl get pv -n demo -NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE -pvc-0bf49077-1c7a-4943-bb17-1dffd1626dcd 2Gi RWO Delete Bound demo/druid-cluster-segment-cache-druid-cluster-historicals-0 longhorn 23m -pvc-59ed4914-53b3-4f18-a6aa-7699c2b738e2 2Gi RWO Delete Bound demo/druid-cluster-base-task-dir-druid-cluster-middlemanagers-0 longhorn 23m -``` - -The above output verifies that we have successfully expanded the volume of the Druid. - -## Cleaning Up - -To clean up the Kubernetes resources created by this tutorial, run: - -```bash -kubectl delete druidopsrequest -n demo dr-volume-exp -kubectl delete dr -n demo druid-cluster -kubectl delete ns demo -``` - -## Next Steps - -- Detail concepts of [Druid object](/docs/guides/druid/concepts/druid.md). -- Different Druid topology clustering modes [here](/docs/guides/druid/clustering/_index.md). -- Monitor your Druid database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/druid/monitoring/using-prometheus-operator.md). -- -[//]: # (- Monitor your Druid database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/druid/monitoring/using-builtin-prometheus.md).) -- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). + Warning get pvc: diff --git a/docs/guides/druid/volume-expansion/yamls/druid-cluster.yaml b/docs/guides/druid/volume-expansion/yamls/druid-cluster.yaml index cb8e321237..59a09ce093 100644 --- a/docs/guides/druid/volume-expansion/yamls/druid-cluster.yaml +++ b/docs/guides/druid/volume-expansion/yamls/druid-cluster.yaml @@ -7,8 +7,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: historicals: replicas: 1 @@ -31,4 +31,3 @@ spec: routers: replicas: 1 deletionPolicy: Delete - diff --git a/docs/guides/elasticsearch/concepts/elasticsearch/index.md b/docs/guides/elasticsearch/concepts/elasticsearch/index.md index 4837294544..240f8f52dc 100644 --- a/docs/guides/elasticsearch/concepts/elasticsearch/index.md +++ b/docs/guides/elasticsearch/concepts/elasticsearch/index.md @@ -35,8 +35,8 @@ spec: kind: Secret name: es-admin-cred externallyManaged: false - configSecret: - name: es-custom-config + configuration: + secretName: es-custom-config enableSSL: true internalUsers: metrics_exporter: {} diff --git a/docs/guides/mongodb/configuration/using-config-file.md b/docs/guides/mongodb/configuration/using-config-file.md index 553474d845..487f9a8810 100644 --- a/docs/guides/mongodb/configuration/using-config-file.md +++ b/docs/guides/mongodb/configuration/using-config-file.md @@ -103,8 +103,8 @@ spec: resources: requests: storage: 1Gi - configSecret: - name: mg-configuration + configuration: + secretName: mg-configuration ``` ```bash diff --git a/docs/guides/mssqlserver/concepts/mssqlserver.md b/docs/guides/mssqlserver/concepts/mssqlserver.md index e97eb0d3f1..9efdfcde89 100644 --- a/docs/guides/mssqlserver/concepts/mssqlserver.md +++ b/docs/guides/mssqlserver/concepts/mssqlserver.md @@ -31,11 +31,11 @@ metadata: name: mssqlserver namespace: demo spec: + configuration: + secretName: mssql-custom-config authSecret: kind: Secret - name: mssqlserver-auth - configSecret: - name: mssqlserver-custom-config + name: mssql-admin-cred topology: availabilityGroup: databases: diff --git a/docs/guides/mssqlserver/configuration/using-config-file.md b/docs/guides/mssqlserver/configuration/using-config-file.md index e4d0158891..aae6e75a39 100644 --- a/docs/guides/mssqlserver/configuration/using-config-file.md +++ b/docs/guides/mssqlserver/configuration/using-config-file.md @@ -144,8 +144,8 @@ metadata: namespace: demo spec: version: "2022-cu12" - configSecret: - name: ms-custom-config + configuration: + secretName: ms-custom-config replicas: 1 tls: issuerRef: diff --git a/docs/guides/mssqlserver/reconfigure/ag_cluster.md b/docs/guides/mssqlserver/reconfigure/ag_cluster.md index 1ce0a35ffa..7ce8d9289d 100644 --- a/docs/guides/mssqlserver/reconfigure/ag_cluster.md +++ b/docs/guides/mssqlserver/reconfigure/ag_cluster.md @@ -107,8 +107,8 @@ metadata: namespace: demo spec: version: "2022-cu12" - configSecret: - name: ms-custom-config + configuration: + secretName: ms-custom-config replicas: 3 topology: mode: AvailabilityGroup @@ -223,8 +223,9 @@ spec: databaseRef: name: mssqlserver-ag-cluster configuration: - configSecret: - name: new-custom-config +- configSecret: +- name: new-custom-config ++ secretName: new-custom-config timeout: 5m apply: IfReady ``` @@ -233,7 +234,7 @@ Here, - `spec.databaseRef.name` specifies that we are reconfiguring `mssqlserver-ag-cluster` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.customConfig.replicaSet.configSecret.name` specifies the name of the new secret. +- `spec.configuration.secretName` specifies the name of the new secret. - Have a look [here](/docs/guides/mssqlserver/concepts/opsrequest.md#spectimeout) on the respective sections to understand the `timeout` & `apply` fields. Let's create the `MSSQLServerOpsRequest` CR we have shown above, diff --git a/docs/guides/mssqlserver/reconfigure/standalone.md b/docs/guides/mssqlserver/reconfigure/standalone.md index 3a9790a135..9fe1745e4b 100644 --- a/docs/guides/mssqlserver/reconfigure/standalone.md +++ b/docs/guides/mssqlserver/reconfigure/standalone.md @@ -218,8 +218,7 @@ spec: databaseRef: name: ms-standalone configuration: - configSecret: - name: new-custom-config + secretName: new-custom-config timeout: 5m apply: IfReady ``` @@ -228,7 +227,7 @@ Here, - `spec.databaseRef.name` specifies that we are reconfiguring `ms-standalone` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configuration.configSecret.name` specifies the name of the new secret. +- `spec.configuration.secretName` specifies the name of the new secret. - Have a look [here](/docs/guides/mssqlserver/concepts/opsrequest.md#spectimeout) on the respective sections to understand the `timeout` & `apply` fields. Let's create the `MSSQLServerOpsRequest` CR we have shown above, @@ -270,8 +269,7 @@ Metadata: Spec: Apply: IfReady Configuration: - Config Secret: - Name: new-custom-config + Secret Name: new-custom-config Database Ref: Name: ms-standalone Timeout: 5m diff --git a/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/group-replication.yaml b/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/group-replication.yaml index 0eea7af7e4..fcb7e74b11 100644 --- a/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/group-replication.yaml +++ b/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/group-replication.yaml @@ -8,8 +8,8 @@ spec: topology: mode: GroupReplication replicas: 3 - configSecret: - name: my-configuration + configuration: + secretName: my-configuration storageType: Durable storage: storageClassName: "standard" diff --git a/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/inndob-cluster.yaml b/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/inndob-cluster.yaml index d2b420ba92..bf7ed58ee6 100644 --- a/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/inndob-cluster.yaml +++ b/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/inndob-cluster.yaml @@ -11,8 +11,8 @@ spec: router: replicas: 1 replicas: 3 - configSecret: - name: my-configuration + configuration: + secretName: my-configuration storageType: Durable storage: storageClassName: "standard" diff --git a/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/semi-sync.yaml b/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/semi-sync.yaml index 2547a7e436..af603022dd 100644 --- a/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/semi-sync.yaml +++ b/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/semi-sync.yaml @@ -12,8 +12,8 @@ spec: sourceTimeout: 23h errantTransactionRecoveryPolicy: PseudoTransaction replicas: 3 - configSecret: - name: my-configuration + configuration: + secretName: my-configuration storageType: Durable storage: storageClassName: "standard" diff --git a/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/stand-alone.yaml b/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/stand-alone.yaml index c0c457e9bb..85b340c0fc 100644 --- a/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/stand-alone.yaml +++ b/docs/guides/mysql/reconfigure/reconfigure-steps/yamls/stand-alone.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: "9.1.0" - configSecret: - name: my-configuration + configuration: + secretName: my-configuration storageType: Durable storage: storageClassName: "standard" diff --git a/docs/guides/pgbouncer/concepts/opsrequest.md b/docs/guides/pgbouncer/concepts/opsrequest.md index 50430e0b38..83bc5836b6 100644 --- a/docs/guides/pgbouncer/concepts/opsrequest.md +++ b/docs/guides/pgbouncer/concepts/opsrequest.md @@ -125,8 +125,8 @@ spec: name: pgbouncer-server configuration: pgbouncer: - configSecret: - name: new-custom-config + configuration: + secretName: new-custom-config ``` Here, we are going to describe the various sections of a `PgBouncerOpsRequest` crd. diff --git a/docs/guides/pgbouncer/reconfigure/reconfigure-pgbouncer.md b/docs/guides/pgbouncer/reconfigure/reconfigure-pgbouncer.md index 4e0e3c4f5d..9caa82aec8 100644 --- a/docs/guides/pgbouncer/reconfigure/reconfigure-pgbouncer.md +++ b/docs/guides/pgbouncer/reconfigure/reconfigure-pgbouncer.md @@ -173,8 +173,8 @@ spec: name: pb-custom configuration: pgbouncer: - configSecret: - name: new-custom-config + configuration: + secretName: new-custom-config timeout: 5m apply: IfReady ``` @@ -183,7 +183,7 @@ Here, - `spec.databaseRef.name` specifies that we are reconfiguring `pb-csutom` pgbouncer. - `spec.type` specifies that we are performing `Reconfigure` on our pgbouncer. -- `spec.configuration.pgbouncer.configSecret.name` specifies the name of the new secret. +- `spec.configuration.pgbouncer.configuration.secretName` specifies the name of the new secret. - Have a look [here](/docs/guides/pgbouncer/concepts/opsrequest.md#spectimeout) on the respective sections to understand the `timeout` & `apply` fields. Let's create the `PgBouncerOpsRequest` CR we have shown above, @@ -544,7 +544,7 @@ As we can see from the configuration of running pgbouncer, the value of `auth_ty ### Remove config -This will remove all the custom config previously provided. After this Ops-manager operator will merge the new given config with the default config and apply this. +This will remove all the custom config previously provided. After this Ops-manager will merge the new given config with the default config and apply this. - `spec.databaseRef.name` specifies that we are reconfiguring `pb-custom` pgbouncer. - `spec.type` specifies that we are performing `Reconfigure` on our pgbouncer. diff --git a/docs/guides/postgres/configuration/using-config-file.md b/docs/guides/postgres/configuration/using-config-file.md index f4c5189dce..d1752d460d 100644 --- a/docs/guides/postgres/configuration/using-config-file.md +++ b/docs/guides/postgres/configuration/using-config-file.md @@ -94,8 +94,8 @@ metadata: namespace: demo spec: version: "13.13" - configSecret: - name: pg-configuration + configuration: + secretName: pg-configuration storage: storageClassName: "standard" accessModes: diff --git a/docs/guides/postgres/gitops/gitops.md b/docs/guides/postgres/gitops/gitops.md index dcf60c48ee..d8f28e5f64 100644 --- a/docs/guides/postgres/gitops/gitops.md +++ b/docs/guides/postgres/gitops/gitops.md @@ -355,8 +355,8 @@ metadata: name: ha-postgres namespace: demo spec: - configSecret: - name: pg-configuration + configuration: + secretName: pg-configuration replicas: 5 version: "16.6" storageType: Durable @@ -463,8 +463,8 @@ spec: authSecret: kind: Secret name: pg-rotate-auth - configSecret: - name: pg-configuration + configuration: + secretName: pg-configuration replicas: 5 version: "16.6" storageType: Durable @@ -581,8 +581,8 @@ spec: authSecret: kind: Secret name: pg-rotate-auth - configSecret: - name: pg-configuration + configuration: + secretName: pg-configuration replicas: 5 version: "16.6" storageType: Durable @@ -672,8 +672,8 @@ spec: authSecret: kind: Secret name: pg-rotate-auth - configSecret: - name: pg-configuration + configuration: + secretName: pg-configuration replicas: 5 version: "17.4" storageType: Durable @@ -762,8 +762,8 @@ spec: authSecret: kind: Secret name: pg-rotate-auth - configSecret: - name: pg-configuration + configuration: + secretName: pg-configuration replicas: 5 version: "17.4" storageType: Durable diff --git a/docs/guides/postgres/reconfigure/cluster.md b/docs/guides/postgres/reconfigure/cluster.md index ff4e3a63c8..7c91bcee88 100644 --- a/docs/guides/postgres/reconfigure/cluster.md +++ b/docs/guides/postgres/reconfigure/cluster.md @@ -69,8 +69,8 @@ metadata: spec: version: "16.1" replicas: 3 - configSecret: - name: pg-configuration + configuration: + secretName: pg-configuration storageType: Durable storage: storageClassName: "standard" @@ -158,8 +158,7 @@ spec: databaseRef: name: ha-postgres configuration: - configSecret: - name: new-pg-configuration + secretName: new-pg-configuration ``` Here, diff --git a/docs/guides/solr/configuration/config-file.md b/docs/guides/solr/configuration/config-file.md index 98f90de09a..ce97dc0cdc 100644 --- a/docs/guides/solr/configuration/config-file.md +++ b/docs/guides/solr/configuration/config-file.md @@ -88,8 +88,8 @@ metadata: name: solr namespace: demo spec: - configSecret: - name: sl-custom-config + configuration: + secretName: sl-custom-config version: 9.6.1 replicas: 2 zookeeperRef: diff --git a/docs/guides/solr/reconfigure/solr.md b/docs/guides/solr/reconfigure/solr.md index 12513a66ae..1a491e03a5 100644 --- a/docs/guides/solr/reconfigure/solr.md +++ b/docs/guides/solr/reconfigure/solr.md @@ -90,8 +90,10 @@ metadata: name: solr namespace: demo spec: - configSecret: - name: sl-custom-config +- configSecret: +- name: sl-custom-config ++ configuration: ++ secretName: sl-custom-config version: 9.6.1 replicas: 2 zookeeperRef: @@ -215,8 +217,9 @@ metadata: spec: apply: IfReady configuration: - configSecret: - name: new-sl-custom-config +- configSecret: +- name: new-sl-custom-config ++ secretName: new-sl-custom-config databaseRef: name: solr type: Reconfigure @@ -359,7 +362,7 @@ solr@solr-0:/opt/solr-9.6.1$ cat /var/solr/solr.xml /var/solr/data ${solr.sharedLib:},/opt/solr/contrib/gcs-repository/lib,/opt/solr/contrib/prometheus-exporter/lib,/opt/solr/contrib/s3-repository/lib,/opt/solr/dist ${solr.allowPaths:} - ${solr.max.booleanClauses:2030} + ${solr.max.booleanClauses:2024} ${connTimeout:60000} ${socketTimeout:600000} From f6708724748d751a47695caa64863ac6783cb6cf Mon Sep 17 00:00:00 2001 From: Bonusree Date: Tue, 20 Jan 2026 17:15:55 +0600 Subject: [PATCH 03/27] config Signed-off-by: Bonusree --- docs/guides/cassandra/concepts/cassandra.md | 8 +- .../cassandra/concepts/cassandra.md.bak | 387 ++++++++ .../cassandra/concepts/cassandraopsrequest.md | 4 +- .../concepts/cassandraopsrequest.md.bak | 428 ++++++++ .../configuration/using-config-file.md | 10 +- .../configuration/using-config-file.md.bak | 158 +++ .../reconfigure/cassandra-topology.md | 12 +- .../reconfigure/cassandra-topology.md.bak | 534 ++++++++++ docs/guides/cassandra/restart/restart.md | 4 +- docs/guides/cassandra/restart/restart.md.bak | 224 +++++ docs/guides/clickhouse/concepts/clickhouse.md | 8 +- .../clickhouse/concepts/clickhouse.md.bak | 374 +++++++ .../concepts/clickhouseopsrequest.md | 4 +- .../concepts/clickhouseopsrequest.md.bak | 445 +++++++++ .../configuration/using-config-file.md | 10 +- .../configuration/using-config-file.md.bak | 155 +++ .../clickhouse/reconfigure/reconfigure.md | 12 +- .../clickhouse/reconfigure/reconfigure.md.bak | 614 ++++++++++++ docs/guides/druid/autoscaler/compute/guide.md | 4 +- .../druid/autoscaler/compute/guide.md.bak | 864 +++++++++++++++++ .../compute/yamls/druid-cluster.yaml | 4 +- .../compute/yamls/druid-cluster.yaml.bak | 31 + docs/guides/druid/autoscaler/storage/guide.md | 4 +- .../druid/autoscaler/storage/guide.md.bak | 896 +++++++++++++++++ .../storage/yamls/druid-cluster.yaml | 4 +- .../storage/yamls/druid-cluster.yaml.bak | 40 + .../examples/sample-druid.yaml | 4 +- .../examples/sample-druid.yaml.bak | 15 + .../druid/backup/application-level/index.md | 4 +- .../backup/application-level/index.md.bak | 791 +++++++++++++++ .../auto-backup/examples/sample-druid-2.yaml | 4 +- .../examples/sample-druid-2.yaml.bak | 23 + docs/guides/druid/backup/auto-backup/index.md | 8 +- .../druid/backup/auto-backup/index.md.bak | 817 ++++++++++++++++ .../examples/sample-druid.yaml | 4 +- .../examples/sample-druid.yaml.bak | 21 + .../backup/cross-ns-dependencies/index.md | 4 +- .../backup/cross-ns-dependencies/index.md.bak | 891 +++++++++++++++++ .../examples/common/sample-druid.yaml | 4 +- .../examples/common/sample-druid.yaml.bak | 15 + .../logical/examples/restored-druid.yaml | 4 +- .../logical/examples/restored-druid.yaml.bak | 17 + .../backup/logical/examples/sample-druid.yaml | 4 +- .../logical/examples/sample-druid.yaml.bak | 15 + docs/guides/druid/backup/logical/index.md | 8 +- docs/guides/druid/backup/logical/index.md.bak | 750 ++++++++++++++ docs/guides/druid/concepts/druid.md | 8 +- docs/guides/druid/concepts/druid.md.bak | 535 ++++++++++ .../config-file/yamls/druid-with-config.yaml | 8 +- .../yamls/druid-with-config.yaml.bak | 17 + .../podtemplating/yamls/druid-cluster.yaml | 8 +- .../yamls/druid-cluster.yaml.bak | 43 + .../yamls/druid-node-selector.yaml | 4 +- .../yamls/druid-node-selector.yaml.bak | 20 + .../yamls/druid-with-tolerations.yaml | 4 +- .../yamls/druid-with-tolerations.yaml.bak | 58 ++ .../yamls/druid-without-tolerations.yaml | 4 +- .../yamls/druid-without-tolerations.yaml.bak | 15 + .../monitoring/using-builtin-prometheus.md | 4 +- .../using-builtin-prometheus.md.bak | 372 +++++++ docs/guides/druid/reconfigure/guide.md | 6 +- docs/guides/druid/reconfigure/guide.md.bak | 704 ++++++++++++++ docs/guides/druid/restart/guide.md | 4 +- docs/guides/druid/restart/guide.md.bak | 283 ++++++ .../druid/restart/yamls/druid-cluster.yaml | 4 +- .../restart/yamls/druid-cluster.yaml.bak | 16 + .../druid/scaling/horizontal-scaling/guide.md | 4 +- .../scaling/horizontal-scaling/guide.md.bak | 603 ++++++++++++ .../yamls/druid-cluster.yaml | 4 +- .../yamls/druid-cluster.yaml.bak | 16 + .../druid/scaling/vertical-scaling/guide.md | 4 +- .../scaling/vertical-scaling/guide.md.bak | 454 +++++++++ .../vertical-scaling/yamls/druid-cluster.yaml | 4 +- .../yamls/druid-cluster.yaml.bak | 15 + .../concepts/elasticsearch-dashboard/index.md | 8 +- .../elasticsearch-dashboard/index.md.bak | 137 +++ .../concepts/elasticsearch/index.md | 8 +- .../concepts/elasticsearch/index.md.bak | 917 ++++++++++++++++++ .../configuration/combined-cluster/index.md | 4 +- .../combined-cluster/index.md.bak | 514 ++++++++++ .../combined-cluster/yamls/es-combined.yaml | 4 +- .../yamls/es-combined.yaml.bak | 20 + .../configuration/jvm-options/index.md | 4 +- .../configuration/jvm-options/index.md.bak | 139 +++ .../jvm-options/yamls/elasticsearch.yaml | 4 +- .../jvm-options/yamls/elasticsearch.yaml.bak | 43 + .../configuration/overview/index.md | 6 +- .../configuration/overview/index.md.bak | 112 +++ .../configuration/topology-cluster/index.md | 4 +- .../topology-cluster/index.md.bak | 540 +++++++++++ .../topology-cluster/yamls/es-topology.yaml | 4 +- .../yamls/es-topology.yaml.bak | 41 + .../plugins-backup/s3-repository/index.md | 4 +- .../plugins-backup/s3-repository/index.md.bak | 409 ++++++++ .../s3-repository/yamls/elasticsearch.yaml | 4 +- .../yamls/elasticsearch.yaml.bak | 24 + .../plugins/search-guard/x-pack-monitoring.md | 6 +- .../search-guard/x-pack-monitoring.md.bak | 505 ++++++++++ .../concepts/hazelcast-opsrequest.md | 4 +- .../concepts/hazelcast-opsrequest.md.bak | 308 ++++++ docs/guides/hazelcast/concepts/hazelcast.md | 4 +- .../hazelcast/concepts/hazelcast.md.bak | 389 ++++++++ .../configuration/hazelcast-config.md | 4 +- .../configuration/hazelcast-config.md.bak | 179 ++++ docs/guides/ignite/concepts/ignite.md | 8 +- docs/guides/ignite/concepts/ignite.md.bak | 246 +++++ docs/guides/ignite/concepts/opsrequest.md | 4 +- docs/guides/ignite/concepts/opsrequest.md.bak | 198 ++++ .../custom-configuration/using-config-file.md | 8 +- .../using-config-file.md.bak | 176 ++++ docs/guides/ignite/reconfigure/reconfigure.md | 12 +- .../ignite/reconfigure/reconfigure.md.bak | 300 ++++++ docs/guides/kafka/concepts/connectcluster.md | 14 +- .../kafka/concepts/connectcluster.md.bak | 390 ++++++++ docs/guides/kafka/concepts/connector.md | 10 +- docs/guides/kafka/concepts/connector.md.bak | 107 ++ docs/guides/kafka/concepts/kafka.md | 10 +- docs/guides/kafka/concepts/kafka.md.bak | 461 +++++++++ docs/guides/kafka/concepts/kafkaopsrequest.md | 8 +- .../kafka/concepts/kafkaopsrequest.md.bak | 622 ++++++++++++ .../kafka/reconfigure/kafka-combined.md | 6 +- .../kafka/reconfigure/kafka-combined.md.bak | 506 ++++++++++ .../kafka/reconfigure/kafka-topology.md | 6 +- .../kafka/reconfigure/kafka-topology.md.bak | 625 ++++++++++++ docs/guides/mariadb/concepts/mariadb/index.md | 4 +- .../mariadb/concepts/mariadb/index.md.bak | 396 ++++++++ .../using-config-file/examples/md-custom.yaml | 4 +- .../examples/md-custom.yaml.bak | 18 + .../configuration/using-config-file/index.md | 8 +- .../using-config-file/index.md.bak | 183 ++++ .../examples/reconfigure-using-secret.yaml | 4 +- .../reconfigure-using-secret.yaml.bak | 12 + .../examples/sample-mariadb-config.yaml | 4 +- .../examples/sample-mariadb-config.yaml.bak | 19 + .../mariadb/reconfigure/cluster/index.md | 12 +- .../mariadb/reconfigure/cluster/index.md.bak | 596 ++++++++++++ .../examples/reconfigure-using-secret.yaml | 4 +- .../reconfigure-using-secret.yaml.bak | 12 + .../examples/sample-mariadb-config.yaml | 4 +- .../examples/sample-mariadb-config.yaml.bak | 19 + .../mariadb/reconfigure/standalone/index.md | 12 +- .../reconfigure/standalone/index.md.bak | 587 +++++++++++ docs/guides/memcached/concepts/memcached.md | 8 +- .../memcached/concepts/memcached.md.bak | 275 ++++++ .../custom-configuration/using-config-file.md | 8 +- .../using-config-file.md.bak | 182 ++++ .../memcached/reconfigure/reconfigure.md | 12 +- .../memcached/reconfigure/reconfigure.md.bak | 472 +++++++++ docs/guides/mongodb/concepts/mongodb.md | 24 +- docs/guides/mongodb/concepts/mongodb.md.bak | 676 +++++++++++++ docs/guides/mongodb/concepts/opsrequest.md | 20 +- .../guides/mongodb/concepts/opsrequest.md.bak | 783 +++++++++++++++ .../configuration/using-config-file.md | 10 +- .../configuration/using-config-file.md.bak | 205 ++++ docs/guides/mongodb/monitoring/overview.md | 4 +- .../guides/mongodb/monitoring/overview.md.bak | 105 ++ docs/guides/mongodb/reconfigure/replicaset.md | 16 +- .../mongodb/reconfigure/replicaset.md.bak | 645 ++++++++++++ docs/guides/mongodb/reconfigure/sharding.md | 36 +- .../mongodb/reconfigure/sharding.md.bak | 571 +++++++++++ docs/guides/mongodb/reconfigure/standalone.md | 12 +- .../mongodb/reconfigure/standalone.md.bak | 590 +++++++++++ .../kmip-encryption/examples/mg.yaml | 4 +- .../kmip-encryption/examples/mg.yaml.bak | 28 + .../kmip-encryption/index.md | 4 +- .../kmip-encryption/index.md.bak | 297 ++++++ .../mssqlserver/concepts/mssqlserver.md | 4 +- .../mssqlserver/concepts/mssqlserver.md.bak | 594 ++++++++++++ .../configuration/using-config-file.md | 4 +- .../configuration/using-config-file.md.bak | 259 +++++ .../mssqlserver/reconfigure/ag_cluster.md | 2 +- .../mssqlserver/reconfigure/ag_cluster.md.bak | 570 +++++++++++ .../mssqlserver/reconfigure/standalone.md | 6 +- .../mssqlserver/reconfigure/standalone.md.bak | 533 ++++++++++ docs/guides/mysql/concepts/database/index.md | 8 +- .../mysql/concepts/database/index.md.bak | 446 +++++++++ .../mysql/configuration/config-file/index.md | 8 +- .../configuration/config-file/index.md.bak | 231 +++++ .../config-file/yamls/mysql-custom.yaml | 4 +- .../config-file/yamls/mysql-custom.yaml.bak | 16 + .../reconfigure/reconfigure-steps/index.md | 24 +- .../reconfigure-steps/index.md.bak | 591 +++++++++++ .../concepts/perconaxtradb/index.md | 4 +- .../concepts/perconaxtradb/index.md.bak | 371 +++++++ .../using-config-file/examples/px-custom.yaml | 4 +- .../examples/px-custom.yaml.bak | 18 + .../configuration/using-config-file/index.md | 8 +- .../using-config-file/index.md.bak | 193 ++++ .../examples/reconfigure-using-secret.yaml | 4 +- .../reconfigure-using-secret.yaml.bak | 12 + .../cluster/examples/sample-pxc-config.yaml | 4 +- .../examples/sample-pxc-config.yaml.bak | 20 + .../reconfigure/cluster/index.md | 12 +- .../reconfigure/cluster/index.md.bak | 636 ++++++++++++ .../reconfigure/reconfigure-pgbouncer.md | 2 +- .../reconfigure/reconfigure-pgbouncer.md.bak | 563 +++++++++++ docs/guides/pgpool/concepts/opsrequest.md | 4 +- docs/guides/pgpool/concepts/opsrequest.md.bak | 328 +++++++ docs/guides/pgpool/concepts/pgpool.md | 12 +- docs/guides/pgpool/concepts/pgpool.md.bak | 385 ++++++++ .../pgpool/configuration/using-config-file.md | 10 +- .../configuration/using-config-file.md.bak | 185 ++++ .../pgpool/configuration/using-init-config.md | 2 +- .../configuration/using-init-config.md.bak | 211 ++++ .../pgpool/reconfigure/reconfigure-pgpool.md | 12 +- .../reconfigure/reconfigure-pgpool.md.bak | 759 +++++++++++++++ .../postgres/concepts/postgres-gitops.md | 8 +- .../postgres/concepts/postgres-gitops.md.bak | 492 ++++++++++ docs/guides/postgres/concepts/postgres.md | 8 +- docs/guides/postgres/concepts/postgres.md.bak | 451 +++++++++ .../configuration/using-config-file.md | 4 +- .../configuration/using-config-file.md.bak | 211 ++++ docs/guides/postgres/reconfigure/cluster.md | 4 +- .../postgres/reconfigure/cluster.md.bak | 380 ++++++++ .../proxysql/concepts/proxysql/index.md | 8 +- .../proxysql/concepts/proxysql/index.md.bak | 412 ++++++++ docs/guides/rabbitmq/concepts/opsrequest.md | 4 +- .../rabbitmq/concepts/opsrequest.md.bak | 386 ++++++++ docs/guides/rabbitmq/concepts/rabbitmq.md | 8 +- docs/guides/rabbitmq/concepts/rabbitmq.md.bak | 335 +++++++ .../configuration/using-config-file.md | 10 +- .../configuration/using-config-file.md.bak | 174 ++++ docs/guides/rabbitmq/monitoring/overview.md | 4 +- .../rabbitmq/monitoring/overview.md.bak | 83 ++ .../rabbitmq/reconfigure/reconfigure.md | 12 +- .../rabbitmq/reconfigure/reconfigure.md.bak | 329 +++++++ docs/guides/redis/concepts/redis.md | 8 +- docs/guides/redis/concepts/redis.md.bak | 441 +++++++++ docs/guides/redis/configuration/redis.md | 8 +- docs/guides/redis/configuration/redis.md.bak | 167 ++++ docs/guides/redis/configuration/valkey.md | 8 +- docs/guides/redis/configuration/valkey.md.bak | 161 +++ docs/guides/redis/reconfigure/redis.md | 12 +- docs/guides/redis/reconfigure/redis.md.bak | 396 ++++++++ docs/guides/redis/reconfigure/valkey.md | 12 +- docs/guides/redis/reconfigure/valkey.md.bak | 396 ++++++++ .../guides/singlestore/concepts/opsrequest.md | 12 +- .../singlestore/concepts/opsrequest.md.bak | 475 +++++++++ .../singlestore/concepts/singlestore.md | 8 +- .../singlestore/concepts/singlestore.md.bak | 362 +++++++ .../configuration/config-file/index.md | 10 +- .../configuration/config-file/index.md.bak | 248 +++++ .../config-file/yamls/sdb-custom.yaml | 8 +- .../config-file/yamls/sdb-custom.yaml.bak | 58 ++ .../reconfigure/reconfigure-steps/index.md | 8 +- .../reconfigure-steps/index.md.bak | 477 +++++++++ .../reconfigure-steps/yamls/custom-sdb.yaml | 8 +- .../yamls/custom-sdb.yaml.bak | 57 ++ docs/guides/solr/concepts/solr.md | 4 +- docs/guides/solr/concepts/solr.md.bak | 468 +++++++++ docs/guides/solr/configuration/config-file.md | 2 +- .../solr/configuration/config-file.md.bak | 180 ++++ docs/guides/solr/reconfigure/solr.md | 4 +- docs/guides/solr/reconfigure/solr.md.bak | 617 ++++++++++++ docs/guides/zookeeper/concepts/opsrequest.md | 4 +- .../zookeeper/concepts/opsrequest.md.bak | 388 ++++++++ docs/guides/zookeeper/concepts/zookeeper.md | 8 +- .../zookeeper/concepts/zookeeper.md.bak | 394 ++++++++ .../zookeeper/reconfigure/reconfigure.md | 12 +- .../zookeeper/reconfigure/reconfigure.md.bak | 529 ++++++++++ 260 files changed, 42684 insertions(+), 461 deletions(-) create mode 100644 docs/guides/cassandra/concepts/cassandra.md.bak create mode 100644 docs/guides/cassandra/concepts/cassandraopsrequest.md.bak create mode 100644 docs/guides/cassandra/configuration/using-config-file.md.bak create mode 100644 docs/guides/cassandra/reconfigure/cassandra-topology.md.bak create mode 100644 docs/guides/cassandra/restart/restart.md.bak create mode 100644 docs/guides/clickhouse/concepts/clickhouse.md.bak create mode 100644 docs/guides/clickhouse/concepts/clickhouseopsrequest.md.bak create mode 100644 docs/guides/clickhouse/configuration/using-config-file.md.bak create mode 100644 docs/guides/clickhouse/reconfigure/reconfigure.md.bak create mode 100644 docs/guides/druid/autoscaler/compute/guide.md.bak create mode 100644 docs/guides/druid/autoscaler/compute/yamls/druid-cluster.yaml.bak create mode 100644 docs/guides/druid/autoscaler/storage/guide.md.bak create mode 100644 docs/guides/druid/autoscaler/storage/yamls/druid-cluster.yaml.bak create mode 100644 docs/guides/druid/backup/application-level/examples/sample-druid.yaml.bak create mode 100644 docs/guides/druid/backup/application-level/index.md.bak create mode 100644 docs/guides/druid/backup/auto-backup/examples/sample-druid-2.yaml.bak create mode 100644 docs/guides/druid/backup/auto-backup/index.md.bak create mode 100644 docs/guides/druid/backup/cross-ns-dependencies/examples/sample-druid.yaml.bak create mode 100644 docs/guides/druid/backup/cross-ns-dependencies/index.md.bak create mode 100644 docs/guides/druid/backup/customization/examples/common/sample-druid.yaml.bak create mode 100644 docs/guides/druid/backup/logical/examples/restored-druid.yaml.bak create mode 100644 docs/guides/druid/backup/logical/examples/sample-druid.yaml.bak create mode 100644 docs/guides/druid/backup/logical/index.md.bak create mode 100644 docs/guides/druid/concepts/druid.md.bak create mode 100644 docs/guides/druid/configuration/config-file/yamls/druid-with-config.yaml.bak create mode 100644 docs/guides/druid/configuration/podtemplating/yamls/druid-cluster.yaml.bak create mode 100644 docs/guides/druid/configuration/podtemplating/yamls/druid-node-selector.yaml.bak create mode 100644 docs/guides/druid/configuration/podtemplating/yamls/druid-with-tolerations.yaml.bak create mode 100644 docs/guides/druid/configuration/podtemplating/yamls/druid-without-tolerations.yaml.bak create mode 100644 docs/guides/druid/monitoring/using-builtin-prometheus.md.bak create mode 100644 docs/guides/druid/reconfigure/guide.md.bak create mode 100644 docs/guides/druid/restart/guide.md.bak create mode 100644 docs/guides/druid/restart/yamls/druid-cluster.yaml.bak create mode 100644 docs/guides/druid/scaling/horizontal-scaling/guide.md.bak create mode 100644 docs/guides/druid/scaling/horizontal-scaling/yamls/druid-cluster.yaml.bak create mode 100644 docs/guides/druid/scaling/vertical-scaling/guide.md.bak create mode 100644 docs/guides/druid/scaling/vertical-scaling/yamls/druid-cluster.yaml.bak create mode 100644 docs/guides/elasticsearch/concepts/elasticsearch-dashboard/index.md.bak create mode 100644 docs/guides/elasticsearch/concepts/elasticsearch/index.md.bak create mode 100644 docs/guides/elasticsearch/configuration/combined-cluster/index.md.bak create mode 100644 docs/guides/elasticsearch/configuration/combined-cluster/yamls/es-combined.yaml.bak create mode 100644 docs/guides/elasticsearch/configuration/jvm-options/index.md.bak create mode 100644 docs/guides/elasticsearch/configuration/jvm-options/yamls/elasticsearch.yaml.bak create mode 100644 docs/guides/elasticsearch/configuration/overview/index.md.bak create mode 100644 docs/guides/elasticsearch/configuration/topology-cluster/index.md.bak create mode 100644 docs/guides/elasticsearch/configuration/topology-cluster/yamls/es-topology.yaml.bak create mode 100644 docs/guides/elasticsearch/plugins-backup/s3-repository/index.md.bak create mode 100644 docs/guides/elasticsearch/plugins-backup/s3-repository/yamls/elasticsearch.yaml.bak create mode 100644 docs/guides/elasticsearch/plugins/search-guard/x-pack-monitoring.md.bak create mode 100644 docs/guides/hazelcast/concepts/hazelcast-opsrequest.md.bak create mode 100644 docs/guides/hazelcast/concepts/hazelcast.md.bak create mode 100644 docs/guides/hazelcast/configuration/hazelcast-config.md.bak create mode 100644 docs/guides/ignite/concepts/ignite.md.bak create mode 100644 docs/guides/ignite/concepts/opsrequest.md.bak create mode 100644 docs/guides/ignite/custom-configuration/using-config-file.md.bak create mode 100644 docs/guides/ignite/reconfigure/reconfigure.md.bak create mode 100644 docs/guides/kafka/concepts/connectcluster.md.bak create mode 100644 docs/guides/kafka/concepts/connector.md.bak create mode 100644 docs/guides/kafka/concepts/kafka.md.bak create mode 100644 docs/guides/kafka/concepts/kafkaopsrequest.md.bak create mode 100644 docs/guides/kafka/reconfigure/kafka-combined.md.bak create mode 100644 docs/guides/kafka/reconfigure/kafka-topology.md.bak create mode 100644 docs/guides/mariadb/concepts/mariadb/index.md.bak create mode 100644 docs/guides/mariadb/configuration/using-config-file/examples/md-custom.yaml.bak create mode 100644 docs/guides/mariadb/configuration/using-config-file/index.md.bak create mode 100644 docs/guides/mariadb/reconfigure/cluster/examples/reconfigure-using-secret.yaml.bak create mode 100644 docs/guides/mariadb/reconfigure/cluster/examples/sample-mariadb-config.yaml.bak create mode 100644 docs/guides/mariadb/reconfigure/cluster/index.md.bak create mode 100644 docs/guides/mariadb/reconfigure/standalone/examples/reconfigure-using-secret.yaml.bak create mode 100644 docs/guides/mariadb/reconfigure/standalone/examples/sample-mariadb-config.yaml.bak create mode 100644 docs/guides/mariadb/reconfigure/standalone/index.md.bak create mode 100644 docs/guides/memcached/concepts/memcached.md.bak create mode 100644 docs/guides/memcached/custom-configuration/using-config-file.md.bak create mode 100644 docs/guides/memcached/reconfigure/reconfigure.md.bak create mode 100644 docs/guides/mongodb/concepts/mongodb.md.bak create mode 100644 docs/guides/mongodb/concepts/opsrequest.md.bak create mode 100644 docs/guides/mongodb/configuration/using-config-file.md.bak create mode 100644 docs/guides/mongodb/monitoring/overview.md.bak create mode 100644 docs/guides/mongodb/reconfigure/replicaset.md.bak create mode 100644 docs/guides/mongodb/reconfigure/sharding.md.bak create mode 100644 docs/guides/mongodb/reconfigure/standalone.md.bak create mode 100644 docs/guides/mongodb/vault-integration/kmip-encryption/examples/mg.yaml.bak create mode 100644 docs/guides/mongodb/vault-integration/kmip-encryption/index.md.bak create mode 100644 docs/guides/mssqlserver/concepts/mssqlserver.md.bak create mode 100644 docs/guides/mssqlserver/configuration/using-config-file.md.bak create mode 100644 docs/guides/mssqlserver/reconfigure/ag_cluster.md.bak create mode 100644 docs/guides/mssqlserver/reconfigure/standalone.md.bak create mode 100644 docs/guides/mysql/concepts/database/index.md.bak create mode 100644 docs/guides/mysql/configuration/config-file/index.md.bak create mode 100644 docs/guides/mysql/configuration/config-file/yamls/mysql-custom.yaml.bak create mode 100644 docs/guides/mysql/reconfigure/reconfigure-steps/index.md.bak create mode 100644 docs/guides/percona-xtradb/concepts/perconaxtradb/index.md.bak create mode 100644 docs/guides/percona-xtradb/configuration/using-config-file/examples/px-custom.yaml.bak create mode 100644 docs/guides/percona-xtradb/configuration/using-config-file/index.md.bak create mode 100644 docs/guides/percona-xtradb/reconfigure/cluster/examples/reconfigure-using-secret.yaml.bak create mode 100644 docs/guides/percona-xtradb/reconfigure/cluster/examples/sample-pxc-config.yaml.bak create mode 100644 docs/guides/percona-xtradb/reconfigure/cluster/index.md.bak create mode 100644 docs/guides/pgbouncer/reconfigure/reconfigure-pgbouncer.md.bak create mode 100644 docs/guides/pgpool/concepts/opsrequest.md.bak create mode 100644 docs/guides/pgpool/concepts/pgpool.md.bak create mode 100644 docs/guides/pgpool/configuration/using-config-file.md.bak create mode 100644 docs/guides/pgpool/configuration/using-init-config.md.bak create mode 100644 docs/guides/pgpool/reconfigure/reconfigure-pgpool.md.bak create mode 100644 docs/guides/postgres/concepts/postgres-gitops.md.bak create mode 100644 docs/guides/postgres/concepts/postgres.md.bak create mode 100644 docs/guides/postgres/configuration/using-config-file.md.bak create mode 100644 docs/guides/postgres/reconfigure/cluster.md.bak create mode 100644 docs/guides/proxysql/concepts/proxysql/index.md.bak create mode 100644 docs/guides/rabbitmq/concepts/opsrequest.md.bak create mode 100644 docs/guides/rabbitmq/concepts/rabbitmq.md.bak create mode 100644 docs/guides/rabbitmq/configuration/using-config-file.md.bak create mode 100644 docs/guides/rabbitmq/monitoring/overview.md.bak create mode 100644 docs/guides/rabbitmq/reconfigure/reconfigure.md.bak create mode 100644 docs/guides/redis/concepts/redis.md.bak create mode 100644 docs/guides/redis/configuration/redis.md.bak create mode 100644 docs/guides/redis/configuration/valkey.md.bak create mode 100644 docs/guides/redis/reconfigure/redis.md.bak create mode 100644 docs/guides/redis/reconfigure/valkey.md.bak create mode 100644 docs/guides/singlestore/concepts/opsrequest.md.bak create mode 100644 docs/guides/singlestore/concepts/singlestore.md.bak create mode 100644 docs/guides/singlestore/configuration/config-file/index.md.bak create mode 100644 docs/guides/singlestore/configuration/config-file/yamls/sdb-custom.yaml.bak create mode 100644 docs/guides/singlestore/reconfigure/reconfigure-steps/index.md.bak create mode 100644 docs/guides/singlestore/reconfigure/reconfigure-steps/yamls/custom-sdb.yaml.bak create mode 100644 docs/guides/solr/concepts/solr.md.bak create mode 100644 docs/guides/solr/configuration/config-file.md.bak create mode 100644 docs/guides/solr/reconfigure/solr.md.bak create mode 100644 docs/guides/zookeeper/concepts/opsrequest.md.bak create mode 100644 docs/guides/zookeeper/concepts/zookeeper.md.bak create mode 100644 docs/guides/zookeeper/reconfigure/reconfigure.md.bak diff --git a/docs/guides/cassandra/concepts/cassandra.md b/docs/guides/cassandra/concepts/cassandra.md index 0c8d87a3f8..052aa228c2 100644 --- a/docs/guides/cassandra/concepts/cassandra.md +++ b/docs/guides/cassandra/concepts/cassandra.md @@ -32,8 +32,8 @@ spec: authSecret: kind: Secret name: cassandra-admin-cred - configSecret: - name: cassandra-custom-config + configuration: + secretName: cassandra-custom-config healthChecker: failureThreshold: 3 periodSeconds: 20 @@ -148,9 +148,9 @@ type: Opaque Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). -### spec.configSecret +### spec.configuration -`spec.configSecret` is an optional field that points to a Secret used to hold custom Cassandra configuration. If not set, KubeDB operator will use default configuration for Cassandra. +`spec.configuration` is an optional field that points to a Secret used to hold custom Cassandra configuration. If not set, KubeDB operator will use default configuration for Cassandra. ### spec.topology diff --git a/docs/guides/cassandra/concepts/cassandra.md.bak b/docs/guides/cassandra/concepts/cassandra.md.bak new file mode 100644 index 0000000000..7c6cbe5635 --- /dev/null +++ b/docs/guides/cassandra/concepts/cassandra.md.bak @@ -0,0 +1,387 @@ +--- +title: Cassandra CRD +menu: + docs_{{ .version }}: + identifier: cas-cassandra-concepts + name: Cassandra + parent: cas-concepts-cassandra + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Cassandra + +## What is Cassandra + +`Cassandra` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for [Cassandra](https://cassandra.apache.org/) in a Kubernetes native way. You only need to describe the desired database configuration in a `Cassandra` object, and the KubeDB operator will create Kubernetes objects in the desired state for you. + +## Cassandra Spec + +As with all other Kubernetes objects, a Cassandra needs `apiVersion`, `kind`, and `metadata` fields. It also needs a `.spec` section. Below is an example Cassandra object. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Cassandra +metadata: + name: cassandra + namespace: demo +spec: + authSecret: + kind: Secret + name: cassandra-admin-cred + configuration: + secretName: cassandra-custom-config + healthChecker: + failureThreshold: 3 + periodSeconds: 20 + timeoutSeconds: 10 + keystoreCredSecret: + kind: Secret + name: cassandra-keystore-cred + deletionPolicy: DoNotTerminate + tls: + certificates: + - alias: server + secretName: cassandra-server-cert + - alias: client + secretName: cassandra-client-cert + issuerRef: + apiGroup: cert-manager.io + kind: Issuer + name: cassandra-ca-issuer + topology: + rack: + - name: r0 + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + podTemplate: + spec: + containers: + - name: cassandra + resources: + limits: + memory: 4Gi + cpu: 2000m + requests: + memory: 1Gi + cpu: 500m + securityContext: + runAsUser: 999 + fsGroup: 999 + monitor: + agent: prometheus.io/operator + prometheus: + exporter: + port: 56790 + serviceMonitor: + labels: + release: prometheus + interval: 10s + version: 5.0.3 +``` + +### spec.version + +`spec.version` is a required field specifying the name of the [CassandraVersion](/docs/guides/cassandra/concepts/cassandraversion.md) crd where the docker images are specified. Currently, when you install KubeDB, it creates the following `Cassandra` resources, + +- `4.1.8` +- `5.0.3` + +### spec.replicas + +`spec.replicas` the number of members in Cassandra replicaset. + +If `spec.topology` is set, then `spec.replicas` needs to be empty. Instead use `spec.topology.rack[ind].replicas`. + +KubeDB uses `PodDisruptionBudget` to ensure that majority of these replicas are available during [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions) so that quorum is maintained. + +### spec.authSecret + +`spec.authSecret` is an optional field that points to a Secret used to hold credentials for `cassandra` admin user. If not set, KubeDB operator creates a new Secret `{cassandra-object-name}-auth` for storing the password for `admin` user for each Cassandra object. + +We can use this field in 3 mode. +1. Using an external secret. In this case, You need to create an auth secret first with required fields, then specify the secret name when creating the Cassandra object using `spec.authSecret.name` & set `spec.authSecret.externallyManaged` to true. +```yaml +authSecret: + name: + externallyManaged: true +``` + +2. Specifying the secret name only. In this case, You need to specify the secret name when creating the Cassandra object using `spec.authSecret.name`. `externallyManaged` is by default false. +```yaml +authSecret: + name: +``` + +3. Let KubeDB do everything for you. In this case, no work for you. + +AuthSecret contains a `user` key and a `password` key which contains the `username` and `password` respectively for Cassandra `admin` user. + +Example: + +```bash +$ kubectl create secret generic cassandra-auth -n demo \ +--from-literal=username=jhon-doe \ +--from-literal=password=6q8u_2jMOW-OOZXk +secret "cassandra-auth" created +``` + +```yaml +apiVersion: v1 +data: + password: NnE4dV8yak1PVy1PT1pYaw== + username: amhvbi1kb2U= +kind: Secret +metadata: + name: cassandra-auth + namespace: demo +type: Opaque +``` + +Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). + +### spec.configSecret + +`spec.configSecret` is an optional field that points to a Secret used to hold custom Cassandra configuration. If not set, KubeDB operator will use default configuration for Cassandra. + +### spec.topology + +`spec.topology` represents the topology configuration for Cassandra cluster. + +When `spec.topology` is set, the following fields needs to be empty, otherwise validating webhook will throw error. + +- `spec.replicas` +- `spec.podTemplate` +- `spec.storage` + +#### spec.topology.rack + +`rack` represents a logical grouping of nodes of Cassandra cluster. `spec.topology.rack[]` is an array of RackSpec. It is a mandatory field when `spec.topology` is specified. Each RackSpec describes the configuration of a single rack — including its name, number of replicas, pod template, and storage options. + +Available configurable fields: + +- `name` (`: "rack-east"`) — is a `mandatory` field that specifies the unique name of the rack. Cassandra uses this name to assign and distribute replicas logically across racks. + +- `replicas` (`: "3"`) — is an `optional` field to specify the number of Cassandra nodes (pods) to deploy in this rack. This field must hold a value greater than `0`. + +- `podTemplate` (`: ""`) — is an `optional` field that allows you to customize pod-level configurations (like affinity, tolerations, nodeSelector, container resources) for pods within this rack. + +- `storage` (`: "resources.requests.storage: 10Gi"`) — is an optional field to define how persistent storage should be configured for the pods in this rack. It uses a standard PersistentVolumeClaimSpec format. + +- `storageType` (`: "Durable"`) — is an `optional` field to specify whether the pods in this rack should use `Durable` (persistent disk-backed) or `Ephemeral` (temporary) storage. Defaults to `Durable`. + +### spec.tls + +`spec.tls` specifies the TLS/SSL configurations. The KubeDB operator supports TLS management by using the [cert-manager](https://cert-manager.io/). + +```yaml +spec: + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: cassandra-issuer + certificates: + - alias: server + privateKey: + encoding: PKCS8 + secretName: cassandra-client-cert + subject: + organizations: + - kubedb + - alias: http + privateKey: + encoding: PKCS8 + secretName: cassandra-server-cert + subject: + organizations: + - kubedb +``` + +The `spec.tls` contains the following fields: + +- `tls.issuerRef` - is an `optional` field that references to the `Issuer` or `ClusterIssuer` custom resource object of [cert-manager](https://cert-manager.io/docs/concepts/issuer/). It is used to generate the necessary certificate secrets for Cassandra. If the `issuerRef` is not specified, the operator creates a self-signed CA and also creates necessary certificate (valid: 365 days) secrets using that CA. + - `apiGroup` - is the group name of the resource that is being referenced. Currently, the only supported value is `cert-manager.io`. + - `kind` - is the type of resource that is being referenced. The supported values are `Issuer` and `ClusterIssuer`. + - `name` - is the name of the resource ( `Issuer` or `ClusterIssuer` ) that is being referenced. + +- `tls.certificates` - is an `optional` field that specifies a list of certificate configurations used to configure the certificates. It has the following fields: + - `alias` - represents the identifier of the certificate. It has the following possible value: + - `server` - is used for the server certificate configuration. + - `client` - is used for the client certificate configuration. + + - `secretName` - ( `string` | `"-alias-cert"` ) - specifies the k8s secret name that holds the certificates. + + - `subject` - specifies an `X.509` distinguished name (DN). It has the following configurable fields: + - `organizations` ( `[]string` | `nil` ) - is a list of organization names. + - `organizationalUnits` ( `[]string` | `nil` ) - is a list of organization unit names. + - `countries` ( `[]string` | `nil` ) - is a list of country names (ie. Country Codes). + - `localities` ( `[]string` | `nil` ) - is a list of locality names. + - `provinces` ( `[]string` | `nil` ) - is a list of province names. + - `streetAddresses` ( `[]string` | `nil` ) - is a list of street addresses. + - `postalCodes` ( `[]string` | `nil` ) - is a list of postal codes. + - `serialNumber` ( `string` | `""` ) is a serial number. + + For more details, visit [here](https://golang.org/pkg/crypto/x509/pkix/#Name). + + - `duration` ( `string` | `""` ) - is the period during which the certificate is valid. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300m"`, `"1.5h"` or `"20h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + - `renewBefore` ( `string` | `""` ) - is a specifiable time before expiration duration. + - `dnsNames` ( `[]string` | `nil` ) - is a list of subject alt names. + - `ipAddresses` ( `[]string` | `nil` ) - is a list of IP addresses. + - `uris` ( `[]string` | `nil` ) - is a list of URI Subject Alternative Names. + - `emailAddresses` ( `[]string` | `nil` ) - is a list of email Subject Alternative Names. + +### spec.monitor + +Cassandra managed by KubeDB can be monitored with Prometheus operator out-of-the-box. To learn more, +- [Monitor Apache Cassandra with Prometheus operator](/docs/guides/cassandra/monitoring/using-prometheus-operator.md) +- [Monitor Apache Cassandra with Built-in Prometheus](/docs/guides/cassandra/monitoring/using-builtin-prometheus.md) + +### spec.podTemplate + +KubeDB allows providing a template for database pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the PetSet created for Cassandra cluster. + +KubeDB accept following fields to set in `spec.podTemplate:` + +- metadata: + - annotations (pod's annotation) + - labels (pod's labels) +- controller: + - annotations (petset's annotation) + - labels (petset's labels) +- spec: + - containers + - volumes + - podPlacementPolicy + - initContainers + - containers + - imagePullSecrets + - nodeSelector + - serviceAccountName + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + +You can check out the full list [here](https://github.com/kmodules/offshoot-api/blob/master/api/v2/types.go#L26C1-L279C1). +Uses of some field of `spec.podTemplate` is described below, + +#### spec.podTemplate.spec.tolerations + +The `spec.podTemplate.spec.tolerations` is an optional field. This can be used to specify the pod's tolerations. + +#### spec.podTemplate.spec.volumes + +The `spec.podTemplate.spec.volumes` is an optional field. This can be used to provide the list of volumes that can be mounted by containers belonging to the pod. + +#### spec.podTemplate.spec.podPlacementPolicy + +`spec.podTemplate.spec.podPlacementPolicy` is an optional field. This can be used to provide the reference of the `podPlacementPolicy`. `name` of the podPlacementPolicy is referred under this attribute. This will be used by our Petset controller to place the db pods throughout the region, zone & nodes according to the policy. It utilizes kubernetes affinity & podTopologySpreadContraints feature to do so. +```yaml +spec: + podPlacementPolicy: + name: default +``` + +#### spec.podTemplate.spec.nodeSelector + +`spec.podTemplate.spec.nodeSelector` is an optional field that specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). To learn more, see [here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) . + +### spec.serviceTemplates + +You can also provide template for the services created by KubeDB operator for Cassandra cluster through `spec.serviceTemplates`. This will allow you to set the type and other properties of the services. + +KubeDB allows following fields to set in `spec.serviceTemplates`: +- `alias` represents the identifier of the service. It has the following possible value: + - `stats` for is used for the `exporter` service identification. + +There are two options for providing serviceTemplates: + - To provide `serviceTemplates` for a specific service, the `serviceTemplates.ports.port` should be equal to the port of that service and `serviceTemplate` will be used for that particular service only. + - However, to provide a common `serviceTemplates`, `serviceTemplates.ports.port` should be empty. + +- metadata: + - labels + - annotations +- spec: + - type + - ports + - clusterIP + - externalIPs + - loadBalancerIP + - loadBalancerSourceRanges + - externalTrafficPolicy + - healthCheckNodePort + - sessionAffinityConfig + +See [here](https://github.com/kmodules/offshoot-api/blob/kubernetes-1.21.1/api/v1/types.go#L237) to understand these fields in detail. + + +#### spec.podTemplate.spec.containers + +The `spec.podTemplate.spec.containers` can be used to provide the list containers and their configurations for to the database pod. some of the fields are described below, + +##### spec.podTemplate.spec.containers[].name +The `spec.podTemplate.spec.containers[].name` field used to specify the name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + +##### spec.podTemplate.spec.containers[].args +`spec.podTemplate.spec.containers[].args` is an optional field. This can be used to provide additional arguments to database installation. + +##### spec.podTemplate.spec.containers[].env + +`spec.podTemplate.spec.containers[].env` is an optional field that specifies the environment variables to pass to the Cassandra containers. + +##### spec.podTemplate.spec.containers[].resources + +`spec.podTemplate.spec.containers[].resources` is an optional field. This can be used to request compute resources required by containers of the database pods. To learn more, visit [here](http://kubernetes.io/docs/user-guide/compute-resources/). + +### spec.deletionPolicy + +`deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `Cassandra` crd or which resources KubeDB should keep or delete when you delete `Cassandra` crd. KubeDB provides following four deletion policies: + +- DoNotTerminate +- WipeOut +- Halt +- Delete + +When `deletionPolicy` is `DoNotTerminate`, KubeDB takes advantage of `ValidationWebhook` feature in Kubernetes 1.9.0 or later clusters to implement `DoNotTerminate` feature. If admission webhook is enabled, `DoNotTerminate` prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. + +Following table show what KubeDB does when you delete Cassandra crd for different termination policies, + +| Behavior | DoNotTerminate | Halt | Delete | WipeOut | +| ----------------------------------- | :------------: | :------: | :------: | :------: | +| 1. Block Delete operation | ✓ | ✗ | ✗ | ✗ | +| 2. Delete PetSet | ✗ | ✓ | ✓ | ✓ | +| 3. Delete Services | ✗ | ✓ | ✓ | ✓ | +| 4. Delete PVCs | ✗ | ✗ | ✓ | ✓ | +| 5. Delete Secrets | ✗ | ✗ | ✗ | ✓ | +| 6. Delete Snapshots | ✗ | ✗ | ✗ | ✓ | + +If you don't specify `spec.deletionPolicy` KubeDB uses `Delete` termination policy by default. + + +## spec.healthChecker +It defines the attributes for the health checker. +- `spec.healthChecker.periodSeconds` specifies how often to perform the health check. +- `spec.healthChecker.timeoutSeconds` specifies the number of seconds after which the probe times out. +- `spec.healthChecker.failureThreshold` specifies minimum consecutive failures for the healthChecker to be considered failed. +- `spec.healthChecker.disableWriteCheck` specifies whether to disable the writeCheck or not. + +Know details about KubeDB Health checking from this [blog post](https://appscode.com/blog/post/kubedb-health-checker/). + +## Next Steps + +- Learn how to use KubeDB to run Apache Cassandra cluster [here](/docs/guides/cassandra/README.md). +- Monitor your Cassandra cluster with KubeDB using [`out-of-the-box` Prometheus operator](/docs/guides/cassandra/monitoring/using-prometheus-operator.md). +- Detail concepts of [CassandraVersion object](/docs/guides/cassandra/concepts/cassandraversion.md). + +[//]: # (- Learn to use KubeDB managed Cassandra objects using [CLIs](/docs/guides/cassandra/cli/cli.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/cassandra/concepts/cassandraopsrequest.md b/docs/guides/cassandra/concepts/cassandraopsrequest.md index e95297ab0b..0b0c03a77b 100644 --- a/docs/guides/cassandra/concepts/cassandraopsrequest.md +++ b/docs/guides/cassandra/concepts/cassandraopsrequest.md @@ -149,8 +149,8 @@ spec: databaseRef: name: cassandra-prod configuration: - configSecret: - name: new-configsecret + configuration: + secretName: new-configsecret status: conditions: - lastTransitionTime: "2025-07-25T18:22:38Z" diff --git a/docs/guides/cassandra/concepts/cassandraopsrequest.md.bak b/docs/guides/cassandra/concepts/cassandraopsrequest.md.bak new file mode 100644 index 0000000000..e95297ab0b --- /dev/null +++ b/docs/guides/cassandra/concepts/cassandraopsrequest.md.bak @@ -0,0 +1,428 @@ +--- +title: CassandraOpsRequests CRD +menu: + docs_{{ .version }}: + identifier: cas-opsrequest-concepts + name: CassandraOpsRequest + parent: cas-concepts-cassandra + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + + +> New to KubeDB? Please start [here](/docs/README.md). + +# CassandraOpsRequest + +## What is CassandraOpsRequest + +`CassandraOpsRequest` is a Kubernetes `Custom Resource Definitions` (CRD). It provides a declarative configuration for [Cassandra](https://cassandra.apache.org/) administrative operations like database version updating, horizontal scaling, vertical scaling etc. in a Kubernetes native way. + +## CassandraOpsRequest CRD Specifications + +Like any official Kubernetes resource, a `CassandraOpsRequest` has `TypeMeta`, `ObjectMeta`, `Spec` and `Status` sections. + +Here, some sample `CassandraOpsRequest` CRs for different administrative operations is given below: + +Sample `CassandraOpsRequest` for updating database: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: CassandraOpsRequest +metadata: + name: update-version + namespace: demo +spec: + type: UpdateVersion + databaseRef: + name: cassandra-prod + updateVersion: + targetVersion: 5.0.3 +status: + conditions: + - lastTransitionTime: "2025-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +Sample `CassandraOpsRequest` Objects for Horizontal Scaling of different component of the database: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: CassandraOpsRequest +metadata: + name: casops-hscale-down + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: cassandra-prod + horizontalScaling: + node: 4 +status: + conditions: + - lastTransitionTime: "2025-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +Sample `CassandraOpsRequest` Objects for Vertical Scaling of different component of the database: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: CassandraOpsRequest +metadata: + name: casops-vscale + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: cassandra-prod + verticalScaling: + node: + resources: + requests: + memory: "2Gi" + cpu: "1" + limits: + memory: "4Gi" + cpu: "3" +status: + conditions: + - lastTransitionTime: "2025-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +Sample `CassandraOpsRequest` Objects for Reconfiguring different cassandra mode: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: CassandraOpsRequest +metadata: + name: casops-reconfiugre + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: cassandra-prod + configuration: + applyConfig: + cassandra.yaml: | + authenticator: PasswordAuthenticator +status: + conditions: + - lastTransitionTime: "2025-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: CassandraOpsRequest +metadata: + name: casops-reconfiugre + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: cassandra-prod + configuration: + configSecret: + name: new-configsecret +status: + conditions: + - lastTransitionTime: "2025-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +Sample `CassandraOpsRequest` Objects for Volume Expansion of different database components: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: CassandraOpsRequest +metadata: + name: casops-volume-exp + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: cassandra-prod + volumeExpansion: + mode: "Online" + node: 2Gi +status: + conditions: + - lastTransitionTime: "2025-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +Sample `CassandraOpsRequest` Objects for Reconfiguring TLS of the database: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: CassandraOpsRequest +metadata: + name: casops-add-tls + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: cassandra-prod + tls: + issuerRef: + name: cas-issuer + kind: Issuer + apiGroup: "cert-manager.io" + certificates: + - alias: client + emailAddresses: + - abc@appscode.com +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: CassandraOpsRequest +metadata: + name: casops-rotate + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: cassandra-dev + tls: + rotateCertificates: true +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: CassandraOpsRequest +metadata: + name: casops-change-issuer + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: cassandra-prod + tls: + issuerRef: + name: cas-new-issuer + kind: Issuer + apiGroup: "cert-manager.io" +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: CassandraOpsRequest +metadata: + name: casops-remove + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: cassandra-prod + tls: + remove: true +``` + +Here, we are going to describe the various sections of a `CassandraOpsRequest` crd. + +A `CassandraOpsRequest` object has the following fields in the `spec` section. + +### spec.databaseRef + +`spec.databaseRef` is a required field that point to the [Cassandra](/docs/guides/cassandra/concepts/cassandra.md) object for which the administrative operations will be performed. This field consists of the following sub-field: + +- **spec.databaseRef.name :** specifies the name of the [Cassandra](/docs/guides/cassandra/concepts/cassandra.md) object. + +### spec.type + +`spec.type` specifies the kind of operation that will be applied to the database. Currently, the following types of operations are allowed in `CassandraOpsRequest`. + +- `UpdateVersion` +- `HorizontalScaling` +- `VerticalScaling` +- `VolumeExpansion` +- `Reconfigure` +- `ReconfigureTLS` +- `Restart` + +> You can perform only one type of operation on a single `CassandraOpsRequest` CR. For example, if you want to update your database and scale up its replica then you have to create two separate `CassandraOpsRequest`. At first, you have to create a `CassandraOpsRequest` for updating. Once it is completed, then you can create another `CassandraOpsRequest` for scaling. + +### spec.updateVersion + +If you want to update you Cassandra version, you have to specify the `spec.updateVersion` section that specifies the desired version information. This field consists of the following sub-field: + +- `spec.updateVersion.targetVersion` refers to a [CassandraVersion](/docs/guides/cassandra/concepts/cassandraversion.md) CR that contains the Cassandra version information where you want to update. + +> You can only update between Cassandra versions. KubeDB does not support downgrade for Cassandra. + +### spec.horizontalScaling.node + +If you want to scale-up or scale-down your Cassandra cluster or different components of it, you have to specify `spec.horizontalScaling.node` section. + +### spec.verticalScaling.node + +`spec.verticalScaling.node` is a required field specifying the information of `Cassandra` resources like `cpu`, `memory` etc that will be scaled. +this has the below structure: + +```yaml +requests: + memory: "200Mi" + cpu: "0.1" +limits: + memory: "300Mi" + cpu: "0.2" +``` + +Here, when you specify the resource request, the scheduler uses this information to decide which node to place the container of the Pod on and when you specify a resource limit for the container, the `kubelet` enforces those limits so that the running container is not allowed to use more of that resource than the limit you set. You can found more details from [here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +### spec.volumeExpansion + +> To use the volume expansion feature the storage class must support volume expansion + +If you want to expand the volume of your Cassandra cluster or different components of it, you have to specify `spec.volumeExpansion` section. This field consists of the following sub-field: + +- `spec.mode` specifies the volume expansion mode. Supported values are `Online` & `Offline`. The default is `Online`. +- `spec.volumeExpansion.node` indicates the desired size for the persistent volume for a Cassandra cluster. + + +All of them refer to [Quantity](https://v1-22.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#quantity-resource-core) types of Kubernetes. + +Example usage of this field is given below: + +```yaml +spec: + volumeExpansion: + node: "2Gi" +``` + +This will expand the volume size of all the combined nodes to 2 GB. + +### spec.configuration + +If you want to reconfigure your Running Cassandra cluster or different components of it with new custom configuration, you have to specify `spec.configuration` section. This field consists of the following sub-field: + +- `spec.configuration.configSecret` points to a secret in the same namespace of a Cassandra resource, which contains the new custom configurations. If there are any configSecret set before in the database, this secret will replace it. + +- `applyConfig` is a map where key supports 3 values, namely `server.properties`, `broker.properties`, `controller.properties`. And value represents the corresponding configurations. + +```yaml + applyConfig: + cassandra.yaml: | + authenticator: PasswordAuthenticator +``` + +- `removeCustomConfig` is a boolean field. Specify this field to true if you want to remove all the custom configuration from the deployed cassandra cluster. + +### spec.tls + +If you want to reconfigure the TLS configuration of your Cassandra i.e. add TLS, remove TLS, update issuer/cluster issuer or Certificates and rotate the certificates, you have to specify `spec.tls` section. This field consists of the following sub-field: + +- `spec.tls.issuerRef` specifies the issuer name, kind and api group. +- `spec.tls.certificates` specifies the certificates. You can learn more about this field from [here](/docs/guides/cassandra/concepts/cassandra.md#spectls). +- `spec.tls.rotateCertificates` specifies that we want to rotate the certificate of this cassandra. +- `spec.tls.remove` specifies that we want to remove tls from this cassandra. + +### spec.timeout +As we internally retry the ops request steps multiple times, This `timeout` field helps the users to specify the timeout for those steps of the ops request (in second). +If a step doesn't finish within the specified timeout, the ops request will result in failure. + +### spec.apply +This field controls the execution of obsRequest depending on the database state. It has two supported values: `Always` & `IfReady`. +Use IfReady, if you want to process the opsRequest only when the database is Ready. And use Always, if you want to process the execution of opsReq irrespective of the Database state. + +### CassandraOpsRequest `Status` + +`.status` describes the current state and progress of a `CassandraOpsRequest` operation. It has the following fields: + +### status.phase + +`status.phase` indicates the overall phase of the operation for this `CassandraOpsRequest`. It can have the following three values: + +| Phase | Meaning | +|-------------|----------------------------------------------------------------------------------| +| Successful | KubeDB has successfully performed the operation requested in the CassandraOpsRequest | +| Progressing | KubeDB has started the execution of the applied CassandraOpsRequest | +| Failed | KubeDB has failed the operation requested in the CassandraOpsRequest | +| Denied | KubeDB has denied the operation requested in the CassandraOpsRequest | +| Skipped | KubeDB has skipped the operation requested in the CassandraOpsRequest | + +Important: Ops-manager Operator can skip an opsRequest, only if its execution has not been started yet & there is a newer opsRequest applied in the cluster. `spec.type` has to be same as the skipped one, in this case. + +### status.observedGeneration + +`status.observedGeneration` shows the most recent generation observed by the `CassandraOpsRequest` controller. + +### status.conditions + +`status.conditions` is an array that specifies the conditions of different steps of `CassandraOpsRequest` processing. Each condition entry has the following fields: + +- `types` specifies the type of the condition. CassandraOpsRequest has the following types of conditions: + +| Type | Meaning | +|-------------------------------|---------------------------------------------------------------------------| +| `Progressing` | Specifies that the operation is now in the progressing state | +| `Successful` | Specifies such a state that the operation on the database was successful. | +| `HaltDatabase` | Specifies such a state that the database is halted by the operator | +| `ResumeDatabase` | Specifies such a state that the database is resumed by the operator | +| `Failed` | Specifies such a state that the operation on the database failed. | +| `StartingBalancer` | Specifies such a state that the balancer has successfully started | +| `StoppingBalancer` | Specifies such a state that the balancer has successfully stopped | +| `UpdateReplicaSetImage` | Specifies such a state that the Replicaset Image has been updated | +| `UpdateConfigServerImage` | Specifies such a state that the ConfigServer Image has been updated | +| `UpdatePetSetResources` | Specifies such a state that the Petset resources has been updated | +| `UpdateShardResources` | Specifies such a state that the Shard resources has been updated | +| `UpdateReplicaSetResources` | Specifies such a state that the Replicaset resources has been updated | +| `UpdateConfigServerResources` | Specifies such a state that the ConfigServer resources has been updated | +| `ScaleDownReplicaSet` | Specifies such a state that the scale down operation of replicaset | +| `ScaleUpReplicaSet` | Specifies such a state that the scale up operation of replicaset | +| `ScaleUpShardReplicas` | Specifies such a state that the scale up operation of shard replicas | +| `ScaleDownShardReplicas` | Specifies such a state that the scale down operation of shard replicas | +| `ScaleDownConfigServer` | Specifies such a state that the scale down operation of config server | +| `ScaleUpConfigServer` | Specifies such a state that the scale up operation of config server | +| `VolumeExpansion` | Specifies such a state that the volume expansion operaton of the database | +| `ReconfigureReplicaset` | Specifies such a state that the reconfiguration of replicaset nodes | +| `ReconfigureConfigServer` | Specifies such a state that the reconfiguration of config server nodes | + +- The `status` field is a string, with possible values `True`, `False`, and `Unknown`. + - `status` will be `True` if the current transition succeeded. + - `status` will be `False` if the current transition failed. + - `status` will be `Unknown` if the current transition was denied. +- The `message` field is a human-readable message indicating details about the condition. +- The `reason` field is a unique, one-word, CamelCase reason for the condition's last transition. +- The `lastTransitionTime` field provides a timestamp for when the operation last transitioned from one state to another. +- The `observedGeneration` shows the most recent condition transition generation observed by the controller. diff --git a/docs/guides/cassandra/configuration/using-config-file.md b/docs/guides/cassandra/configuration/using-config-file.md index 09ed2a0caf..8659339a2e 100644 --- a/docs/guides/cassandra/configuration/using-config-file.md +++ b/docs/guides/cassandra/configuration/using-config-file.md @@ -33,11 +33,11 @@ KubeDB supports providing custom configuration for Cassandra. This tutorial will ## Overview -Cassandra allows configuring via configuration file. The default configuration file for Cassandra deployed by `KubeDB` can be found in `/etc/cassandra/cassandra.yaml`. When `spec.configSecret` is set to cassandra, KubeDB operator will get the secret and after that it will validate the values of the secret and then will keep the validated customizable configurations from the user and merge it with the remaining default config. After all that this secret will be mounted to cassandra for use it as the configuration file. +Cassandra allows configuring via configuration file. The default configuration file for Cassandra deployed by `KubeDB` can be found in `/etc/cassandra/cassandra.yaml`. When `spec.configuration` is set to cassandra, KubeDB operator will get the secret and after that it will validate the values of the secret and then will keep the validated customizable configurations from the user and merge it with the remaining default config. After all that this secret will be mounted to cassandra for use it as the configuration file. > To learn available configuration option of Cassandra see [Configuration Options](https://cassandra.apache.org/doc/4.0/cassandra/getting_started/configuring.html). -At first, you have to create a secret with your configuration file contents as the value of this key `cassandra.yaml`. Then, you have to specify the name of this secret in `spec.configSecret.name` section while creating cassandra CRO. +At first, you have to create a secret with your configuration file contents as the value of this key `cassandra.yaml`. Then, you have to specify the name of this secret in `spec.configuration.secretName` section while creating cassandra CRO. ## Custom Configuration @@ -77,7 +77,7 @@ read_request_timeout: 6000ms write_request_timeout: 2500ms ``` -Now, create cassandra crd specifying `spec.configSecret` field. +Now, create cassandra crd specifying `spec.configuration` field. ```yaml apiVersion: kubedb.com/v1alpha2 @@ -87,8 +87,8 @@ metadata: namespace: demo spec: version: 5.0.3 - configSecret: - name: cas-configuration + configuration: + secretName: cas-configuration topology: rack: - name: r0 diff --git a/docs/guides/cassandra/configuration/using-config-file.md.bak b/docs/guides/cassandra/configuration/using-config-file.md.bak new file mode 100644 index 0000000000..6f7e342119 --- /dev/null +++ b/docs/guides/cassandra/configuration/using-config-file.md.bak @@ -0,0 +1,158 @@ +--- +title: Configuring cassandra Using Config File +menu: + docs_{{ .version }}: + identifier: cas-configuration-using-config-file + name: Configure Using Config File + parent: cas-configuration + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Using Custom Configuration File + +KubeDB supports providing custom configuration for Cassandra. This tutorial will show you how to use KubeDB to run a Cassandra with custom configuration. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. Run the following command to prepare your cluster for this tutorial: + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: The yaml files used in this tutorial are stored in [docs/examples/cassandra](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/cassandra) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +Cassandra allows configuring via configuration file. The default configuration file for Cassandra deployed by `KubeDB` can be found in `/etc/cassandra/cassandra.yaml`. When `spec.configSecret` is set to cassandra, KubeDB operator will get the secret and after that it will validate the values of the secret and then will keep the validated customizable configurations from the user and merge it with the remaining default config. After all that this secret will be mounted to cassandra for use it as the configuration file. + +> To learn available configuration option of Cassandra see [Configuration Options](https://cassandra.apache.org/doc/4.0/cassandra/getting_started/configuring.html). + +At first, you have to create a secret with your configuration file contents as the value of this key `cassandra.yaml`. Then, you have to specify the name of this secret in `spec.configuration.secretName` section while creating cassandra CRO. + +## Custom Configuration + +At first, create `cassandra.yaml` file containing required configuration settings. + +```bash +$ cat cassandra.yaml +read_request_timeout: 6000ms +write_request_timeout: 2500ms +``` + +Now, create the secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo cas-configuration --from-file=./cassandra.yaml +secret/cas-configuration created +``` + +Verify the secret has the configuration file. + +```bash +$ kubectl get secret -n demo cas-configuration -o yaml +apiVersion: v1 +data: + cassandra.yaml: cmVhZF9yZXF1ZXN0X3RpbWVvdXQ6IDYwMDBtcwp3cml0ZV9yZXF1ZXN0X3RpbWVvdXQ6IDI1MDBtcwo= +kind: Secret +metadata: + creationTimestamp: "2025-07-15T08:53:26Z" + name: cas-configuration + namespace: demo + resourceVersion: "105786" + uid: 135c819c-fba6-4800-9ae0-fac35312fab2 +type: Opaque + +$ echo cmVhZF9yZXF1ZXN0X3RpbWVvdXQ6IDYwMDBtcwp3cml0ZV9yZXF1ZXN0X3RpbWVvdXQ6IDI1MDBtcwo= | base64 -d +read_request_timeout: 6000ms +write_request_timeout: 2500ms +``` + +Now, create cassandra crd specifying `spec.configSecret` field. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Cassandra +metadata: + name: cas-custom-config + namespace: demo +spec: + version: 5.0.3 + configuration: + secretName: cas-configuration + topology: + rack: + - name: r0 + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageType: Durable + deletionPolicy: WipeOut + +``` + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/cassandra/configuration/cassandra-config-file.yaml +cassandra.kubedb.com/cas-custom-config created +``` + +Now, wait a few minutes. KubeDB operator will create necessary petset, services, secret etc. If everything goes well, we will see that a pod with the name `cas-custom-config-0` has been created. + +Check that the petset's pod is running + +```bash +$ kubectl get pod -n demo cas-custom-config-rack-r0-0 +NAME READY STATUS RESTARTS AGE +cas-custom-config-rack-r0-0 1/1 Running 0 36s +``` + +Now, we will check if the cassandra has started with the custom configuration we have provided. + +Now, you can exec into the cassandra pod and find if the custom configuration is there, + +```bash +$ kubectl exec -it -n demo cas-custom-config-rack-r0-0 -- bash +Defaulted container "cassandra" out of: cassandra, cassandra-init (init), medusa-init (init) +[cassandra@cas-custom-config-rack-r0-0 /]$ cat /etc/cassandra/cassandra.yaml | grep request_timeout +read_request_timeout: 6000ms +range_request_timeout: 10000ms +write_request_timeout: 2500ms +counter_write_request_timeout: 5000ms +truncate_request_timeout: 60000ms +request_timeout: 10000ms +[cassandra@cas-custom-config-rack-r0-0 /]$ exit +exit +``` + +As we can see from the configuration of running cassandra, the value of `read_request_timeout` and `write_request_timeout` has been set to our desired value successfully. + +## Cleaning up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete -n demo cas/cas-custom-config +kubectl delete -n demo secret cas-configuration +kubectl delete ns demo +``` + +## Next Steps + +- Monitor your cassandra database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/cassandra/monitoring/using-prometheus-operator.md). +- Monitor your Pgpool database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/cassandra/monitoring/using-builtin-prometheus.md). +- Detail concepts of [Cassandra object](/docs/guides/cassandra/concepts/cassandra.md). +- Detail concepts of [CassandraVersion object](/docs/guides/cassandra/concepts/cassandraversion.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/cassandra/reconfigure/cassandra-topology.md b/docs/guides/cassandra/reconfigure/cassandra-topology.md index 585e911b77..1b1553fba1 100644 --- a/docs/guides/cassandra/reconfigure/cassandra-topology.md +++ b/docs/guides/cassandra/reconfigure/cassandra-topology.md @@ -75,7 +75,7 @@ secret/cas-topology-custom-config created ``` -In this section, we are going to create a Cassandra object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `Cassandra` CR that we are going to create, +In this section, we are going to create a Cassandra object specifying `spec.configuration` field to apply this custom configuration. Below is the YAML of the `Cassandra` CR that we are going to create, ```yaml apiVersion: kubedb.com/v1alpha2 @@ -85,8 +85,8 @@ metadata: namespace: demo spec: version: 5.0.3 - configSecret: - name: cas-topology-custom-config + configuration: + secretName: cas-topology-custom-config topology: rack: - name: r0 @@ -195,8 +195,8 @@ spec: databaseRef: name: cassandra-prod configuration: - configSecret: - name: new-cas-topology-custom-config + configuration: + secretName: new-cas-topology-custom-config timeout: 5m apply: IfReady ``` @@ -205,7 +205,7 @@ Here, - `spec.databaseRef.name` specifies that we are reconfiguring `cassandra-prod` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configSecret.name` specifies the name of the new secret. +- `spec.configuration.secretName` specifies the name of the new secret. Let's create the `CassandraOpsRequest` CR we have shown above, diff --git a/docs/guides/cassandra/reconfigure/cassandra-topology.md.bak b/docs/guides/cassandra/reconfigure/cassandra-topology.md.bak new file mode 100644 index 0000000000..f4b7d90b6e --- /dev/null +++ b/docs/guides/cassandra/reconfigure/cassandra-topology.md.bak @@ -0,0 +1,534 @@ +--- +title: Reconfigure Cassandra Topology +menu: + docs_{{ .version }}: + identifier: cas-reconfigure-topology + name: Topology + parent: cas-reconfigure + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure Cassandra Topology Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a Cassandra Topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Cassandra](/docs/guides/cassandra/concepts/cassandra.md) + - [CassandraOpsRequest](/docs/guides/cassandra/concepts/cassandraopsrequest.md) + - [Reconfigure Overview](/docs/guides/cassandra/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/cassandra](/docs/examples/cassandra) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `Cassandra` Topology cluster using a supported version by `KubeDB` operator. Then we are going to apply `CassandraOpsRequest` to reconfigure its configuration. + +### Prepare Cassandra Topology Cluster + +Now, we are going to deploy a `Cassandra` topology cluster with version `5.0.3`. + +### Deploy Cassandra + +At first, we will create a secret with the `cassandra.yaml` file containing required configuration settings. + +**cassandra.yaml:** + +```properties +read_request_timeout: 6000ms +write_request_timeout: 2500ms +``` + +Here, `read_request_timeout` is set to `6000ms`, whereas the default value is `5000ms` and `write_request_timeout` is set to `2500ms`, whereas the default value is 2000ms. + +Let's create a k8s secret containing the above configuration where the file name will be the key and the file-content as the value: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: cas-topology-custom-config + namespace: demo +stringData: + cassandra.yaml: |- + read_request_timeout: 6000ms + write_request_timeout: 2500ms +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/cassandra/reconfigure/cassandra-topology-custom-config-secret.yaml +secret/cas-topology-custom-config created +``` + + +In this section, we are going to create a Cassandra object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `Cassandra` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Cassandra +metadata: + name: cassandra-prod + namespace: demo +spec: + version: 5.0.3 + configuration: + secretName: cas-topology-custom-config + topology: + rack: + - name: r0 + replicas: 2 + podTemplate: + spec: + containers: + - name: cassandra + resources: + limits: + memory: 2Gi + cpu: 2 + requests: + memory: 1Gi + cpu: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageType: Durable + deletionPolicy: WipeOut +``` + +Let's create the `Cassandra` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/cassandra/reconfigure/cassandra-topology.yaml +cassandra.kubedb.com/cassandra-prod created +``` + +Now, wait until `cassandra-prod` has status `Ready`. i.e, + +```bash +$ kubectl get cas -n demo -w +NAME TYPE VERSION STATUS AGE +cassandra-prod kubedb.com/v1alpha2 5.0.3 Provisioning 48s +cassandra-prod kubedb.com/v1alpha2 5.0.3 Provisioning 81s +. +. +cassandra-prod kubedb.com/v1alpha2 5.0.3 Ready 105s +``` + +Now, we will check if the cassandra has started with the custom configuration we have provided. + +Exec into the Cassandra pod and execute the following commands to see the configurations: +```bash +$ kubectl exec -it -n demo cassandra-prod-rack-r0-0 -- bash +Defaulted container "cassandra" out of: cassandra, cassandra-init (init), medusa-init (init) +[cassandra@cassandra-prod-rack-r0-0 /]$ cat /etc/cassandra/cassandra.yaml | grep request_timeout +read_request_timeout: 6000ms +range_request_timeout: 10000ms +write_request_timeout: 2500ms +counter_write_request_timeout: 5000ms +truncate_request_timeout: 60000ms +request_timeout: 10000ms +``` +Here, we can see that our given configuration is applied to the Cassandra cluster . `read_request_timeout` is set to `6000ms` from the default value `5000ms`. + +### Reconfigure using new config secret + +Now we will reconfigure this cluster to set `read_request_timeout` to `6500ms`. + +Now, update our `cassandra.yaml` file with the new configuration. + +**cassandra.yaml:** + +```properties +read_request_timeout=6500ms +``` + +Then, we will create a new secret with this configuration file. + +At first, create `cassandra.yaml` file containing required configuration settings. + +```bash +$ cat cassandra.yaml +read_request_timeout: 6500ms +``` + +Now, create the secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo new-cas-topology-custom-config --from-file=./cassandra.yaml +secret/new-cas-topology-custom-config created +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/cassandra/reconfigure/new-cassandra-topology-custom-config-secret.yaml +secret/new-cas-topology-custom-config created +``` + +#### Create CassandraOpsRequest + +Now, we will use this secret to replace the previous secret using a `CassandraOpsRequest` CR. The `CassandraOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: CassandraOpsRequest +metadata: + name: casops-reconfigure-topology + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: cassandra-prod + configuration: + configuration: + secretName: new-cas-topology-custom-config + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `cassandra-prod` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.secretName` specifies the name of the new secret. + +Let's create the `CassandraOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/cassandra/reconfigure/cassandra-reconfigure-update-topology-ops.yaml +cassandraopsrequest.ops.kubedb.com/casops-reconfigure-topology created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `Cassandra` object. + +Let's wait for `CassandraOpsRequest` to be `Successful`. Run the following command to watch `CassandraOpsRequest` CR, + +```bash +$ kubectl get cassandraopsrequests -n demo +NAME TYPE STATUS AGE +casops-reconfigure-topology Reconfigure Successful 2m53s +``` + +We can see from the above output that the `CassandraOpsRequest` has succeeded. If we describe the `CassandraOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe cassandraopsrequest -n demo casops-reconfigure-topology +Name: casops-reconfigure-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: CassandraOpsRequest +Metadata: + Creation Timestamp: 2025-07-22T09:14:45Z + Generation: 1 + Resource Version: 141080 + UID: 35eba2d1-6a7f-4288-8529-11c086c85cb9 +Spec: + Apply: IfReady + Configuration: + Config Secret: + Name: new-cas-topology-custom-config + Database Ref: + Name: cassandra-prod + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2025-07-22T09:14:45Z + Message: Cassandra ops-request has started to reconfigure Cassandra nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2025-07-22T09:14:53Z + Message: successfully reconciled the Cassandra with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2025-07-22T09:17:38Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2025-07-22T09:14:58Z + Message: get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 + Observed Generation: 1 + Status: True + Type: GetPod--cassandra-prod-rack-r0-0 + Last Transition Time: 2025-07-22T09:14:58Z + Message: evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 + Observed Generation: 1 + Status: True + Type: EvictPod--cassandra-prod-rack-r0-0 + Last Transition Time: 2025-07-22T09:15:03Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2025-07-22T09:15:38Z + Message: get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 + Observed Generation: 1 + Status: True + Type: GetPod--cassandra-prod-rack-r0-1 + Last Transition Time: 2025-07-22T09:15:38Z + Message: evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 + Observed Generation: 1 + Status: True + Type: EvictPod--cassandra-prod-rack-r0-1 + Last Transition Time: 2025-07-22T09:17:38Z + Message: Successfully completed reconfigure Cassandra + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m18s KubeDB Ops-manager Operator Start processing for CassandraOpsRequest: demo/casops-reconfigure-topology + Normal Starting 3m18s KubeDB Ops-manager Operator Pausing Cassandra databse: demo/cassandra-prod + Normal Successful 3m18s KubeDB Ops-manager Operator Successfully paused Cassandra database: demo/cassandra-prod for CassandraOpsRequest: casops-reconfigure-topology + Normal UpdatePetSets 3m10s KubeDB Ops-manager Operator successfully reconciled the Cassandra with new configure + Warning get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 3m5s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 + Warning evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 3m5s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 + Warning running pod; ConditionStatus:False 3m KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 2m25s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 + Warning evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 2m25s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 + Warning get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 105s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 + Warning evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 105s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 + Warning get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 65s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 + Warning evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 65s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 + Normal RestartNodes 25s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 25s KubeDB Ops-manager Operator Resuming Cassandra database: demo/cassandra-prod + Normal Successful 25s KubeDB Ops-manager Operator Successfully resumed Cassandra database: demo/cassandra-prod for CassandraOpsRequest: casops-reconfigure-topology +``` + +Now let's exec one of the instance to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo cassandra-prod-rack-r0-0 -- bash +Defaulted container "cassandra" out of: cassandra, cassandra-init (init), medusa-init (init) +[cassandra@cassandra-prod-rack-r0-0 /]$ cat /etc/cassandra/cassandra.yaml | grep request_timeout +read_request_timeout: 6500ms +range_request_timeout: 10000ms +write_request_timeout: 2500ms +counter_write_request_timeout: 5000ms +truncate_request_timeout: 60000ms +request_timeout: 10000ms +``` + +As we can see from the configuration of ready cassandra, the value of `read_request_timeout` has been changed from `6000ms` to `6500ms`. So the reconfiguration of the cluster is successful. + + +### Reconfigure using apply config + +Now we will reconfigure this cluster again to set `read_request_timeout` to `5500ms`. This time we won't use a new secret. We will use the `applyConfig` field of the `CassandraOpsRequest`. This will merge the new config in the existing secret. + +#### Create CassandraOpsRequest + +Now, we will use the new configuration in the `applyConfig` field in the `CassandraOpsRequest` CR. The `CassandraOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: CassandraOpsRequest +metadata: + name: casops-reconfigure-apply-topology + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: cassandra-prod + configuration: + applyConfig: + cassandra.yaml: |- + read_request_timeout: 5500ms + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `cassandra-prod` cluster. +- `spec.type` specifies that we are performing `Reconfigure` on cassandra. +- `spec.configuration.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `CassandraOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/cassandra/reconfigure/cassandra-reconfigure-apply-topology.yaml +cassandraopsrequest.ops.kubedb.com/casops-reconfigure-apply-topology created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `CassandraOpsRequest` to be `Successful`. Run the following command to watch `CassandraOpsRequest` CR, + +```bash +$ kubectl get cassandraopsrequests -n demo casops-reconfigure-apply-topology +NAME TYPE STATUS AGE +casops-reconfigure-apply-topology Reconfigure Successful 55s +``` + +We can see from the above output that the `CassandraOpsRequest` has succeeded. If we describe the `CassandraOpsRequest` we will get an overview of the steps that were followed to reconfigure the cluster. + + + +```bash +$ kubectl describe cassandraopsrequest -n demo casops-reconfigure-apply-topology +Name: casops-reconfigure-apply-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: CassandraOpsRequest +Metadata: + Creation Timestamp: 2025-07-22T09:38:59Z + Generation: 1 + Resource Version: 144161 + UID: 9b7144c6-4b6a-4095-b87e-5a0630e29dae +Spec: + Apply: IfReady + Configuration: + Apply Config: + cassandra.yaml: read_request_timeout: 5500ms + Database Ref: + Name: cassandra-prod + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2025-07-22T09:40:24Z + Message: Cassandra ops-request has started to reconfigure Cassandra nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2025-07-22T09:40:27Z + Message: Successfully prepared user provided custom config secret + Observed Generation: 1 + Reason: PrepareCustomConfig + Status: True + Type: PrepareCustomConfig + Last Transition Time: 2025-07-22T09:40:32Z + Message: successfully reconciled the Cassandra with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2025-07-22T09:43:17Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2025-07-22T09:40:37Z + Message: get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 + Observed Generation: 1 + Status: True + Type: GetPod--cassandra-prod-rack-r0-0 + Last Transition Time: 2025-07-22T09:40:37Z + Message: evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 + Observed Generation: 1 + Status: True + Type: EvictPod--cassandra-prod-rack-r0-0 + Last Transition Time: 2025-07-22T09:40:42Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2025-07-22T09:41:17Z + Message: get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 + Observed Generation: 1 + Status: True + Type: GetPod--cassandra-prod-rack-r0-1 + Last Transition Time: 2025-07-22T09:41:17Z + Message: evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 + Observed Generation: 1 + Status: True + Type: EvictPod--cassandra-prod-rack-r0-1 + Last Transition Time: 2025-07-22T09:43:18Z + Message: Successfully completed reconfigure Cassandra + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m31s KubeDB Ops-manager Operator Start processing for CassandraOpsRequest: demo/casops-reconfigure-apply-topology + Normal Starting 3m31s KubeDB Ops-manager Operator Pausing Cassandra databse: demo/cassandra-prod + Normal Successful 3m31s KubeDB Ops-manager Operator Successfully paused Cassandra database: demo/cassandra-prod for CassandraOpsRequest: casops-reconfigure-apply-topology + Normal UpdatePetSets 3m23s KubeDB Ops-manager Operator successfully reconciled the Cassandra with new configure + Warning get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 3m18s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 + Warning evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 3m18s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 + Warning running pod; ConditionStatus:False 3m13s KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 2m38s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 + Warning evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 2m38s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 + Warning get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 118s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 + Warning evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 118s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-0 + Warning get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 78s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 + Warning evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 78s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:cassandra-prod-rack-r0-1 + Normal RestartNodes 37s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 37s KubeDB Ops-manager Operator Resuming Cassandra database: demo/cassandra-prod + Normal Successful 37s KubeDB Ops-manager Operator Successfully resumed Cassandra database: demo/cassandra-prod for CassandraOpsRequest: casops-reconfigure-apply-topology +``` + +Now let's exec into one of the instance to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo cassandra-prod-rack-r0-0 -- bash +Defaulted container "cassandra" out of: cassandra, cassandra-init (init), medusa-init (init) +[cassandra@cassandra-prod-rack-r0-0 /]$ cat /etc/cassandra/cassandra.yaml | grep request_timeout +read_request_timeout: 5500ms +range_request_timeout: 10000ms +write_request_timeout: 2500ms +counter_write_request_timeout: 5000ms +truncate_request_timeout: 60000ms +request_timeout: 10000ms``` + +As we can see from the configuration of ready cassandra, the value of `read_request_timeout` has been changed from `125` to `150`. So the reconfiguration of the database using the `applyConfig` field is successful. + + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete cas -n demo cassandra-dev +kubectl delete cassandraopsrequest -n demo casops-reconfigure-apply-topology casops-reconfigure-topology +kubectl delete secret -n demo cas-topology-custom-config new-cas-topology-custom-config +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Cassandra object](/docs/guides/cassandra/concepts/cassandra.md). +- Monitor your Cassandra database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/cassandra/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Cassandra database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/cassandra/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/cassandra/restart/restart.md b/docs/guides/cassandra/restart/restart.md index 5a870c3f73..331b00d286 100644 --- a/docs/guides/cassandra/restart/restart.md +++ b/docs/guides/cassandra/restart/restart.md @@ -43,8 +43,8 @@ metadata: namespace: demo spec: version: 5.0.3 - configSecret: - name: cas-configuration + configuration: + secretName: cas-configuration topology: rack: - name: r0 diff --git a/docs/guides/cassandra/restart/restart.md.bak b/docs/guides/cassandra/restart/restart.md.bak new file mode 100644 index 0000000000..5a870c3f73 --- /dev/null +++ b/docs/guides/cassandra/restart/restart.md.bak @@ -0,0 +1,224 @@ +--- +title: Restart Cassandra +menu: + docs_{{ .version }}: + identifier: cas-restart-details + name: Restart Cassandra + parent: cas-restart + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Restart Cassandra + +KubeDB supports restarting the Cassandra database via a CassandraOpsRequest. Restarting is useful if some pods are got stuck in some phase, or they are not working correctly. This tutorial will show you how to use that. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + +```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/cassandra](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/cassandra) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Deploy Cassandra + +In this section, we are going to deploy a Cassandra database using KubeDB. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Cassandra +metadata: + name: cassandra-prod + namespace: demo +spec: + version: 5.0.3 + configSecret: + name: cas-configuration + topology: + rack: + - name: r0 + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageType: Durable + deletionPolicy: WipeOut +``` + +Let's create the `Cassandra` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/cassandra/restart/cassandra.yaml +cassandra.kubedb.com/cassandra-prod created +``` + +## Apply Restart opsRequest + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: CassandraOpsRequest +metadata: + name: restart + namespace: demo +spec: + type: Restart + databaseRef: + name: cassandra-prod + timeout: 3m + apply: Always +``` + +- `spec.type` specifies the Type of the ops Request +- `spec.databaseRef` holds the name of the Cassandra CR. It should be available in the same namespace as the opsRequest +- The meaning of `spec.timeout` & `spec.apply` fields will be found [here](/docs/guides/cassandra/concepts/cassandraopsrequest.md#spectimeout) + +> Note: The method of restarting the combined node is exactly same as above. All you need, is to specify the corresponding Cassandra name in `spec.databaseRef.name` section. + +Let's create the `CassandraOpsRequest` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/cassandra/restart/ops.yaml +cassandraopsrequest.ops.kubedb.com/restart created +``` + +Now the Ops-manager operator will first restart the controller pods, then broker of the referenced cassandra. + +```shell +$ kubectl get casops -n demo +NAME TYPE STATUS AGE +restart Restart Successful 119s + +$ kubectl get casops -n demo restart -oyaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: CassandraOpsRequest +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"ops.kubedb.com/v1alpha1","kind":"CassandraOpsRequest","metadata":{"annotations":{},"name":"restart","namespace":"demo"},"spec":{"apply":"Always","databaseRef":{"name":"cassandra-prod"},"timeout":"3m","type":"Restart"}} + creationTimestamp: "2025-07-26T10:12:10Z" + generation: 1 + name: restart + namespace: demo + resourceVersion: "24434" + uid: 956a374e-1d6f-4f68-828f-cfed4410b175 +spec: + apply: Always + databaseRef: + name: cassandra-prod + timeout: 3m + type: Restart +status: + conditions: + - lastTransitionTime: "2025-07-26T10:12:10Z" + message: Cassandra ops-request has started to restart cassandra nodes + observedGeneration: 1 + reason: Restart + status: "True" + type: Restart + - lastTransitionTime: "2025-07-26T10:12:18Z" + message: get pod; ConditionStatus:True; PodName:cassandra-prod-controller-0 + observedGeneration: 1 + status: "True" + type: GetPod--cassandra-prod-controller-0 + - lastTransitionTime: "2025-07-26T10:12:18Z" + message: evict pod; ConditionStatus:True; PodName:cassandra-prod-controller-0 + observedGeneration: 1 + status: "True" + type: EvictPod--cassandra-prod-controller-0 + - lastTransitionTime: "2025-07-26T10:12:23Z" + message: check pod running; ConditionStatus:True; PodName:cassandra-prod-controller-0 + observedGeneration: 1 + status: "True" + type: CheckPodRunning--cassandra-prod-controller-0 + - lastTransitionTime: "2025-07-26T10:12:28Z" + message: get pod; ConditionStatus:True; PodName:cassandra-prod-controller-1 + observedGeneration: 1 + status: "True" + type: GetPod--cassandra-prod-controller-1 + - lastTransitionTime: "2025-07-26T10:12:28Z" + message: evict pod; ConditionStatus:True; PodName:cassandra-prod-controller-1 + observedGeneration: 1 + status: "True" + type: EvictPod--cassandra-prod-controller-1 + - lastTransitionTime: "2025-07-26T10:12:38Z" + message: check pod running; ConditionStatus:True; PodName:cassandra-prod-controller-1 + observedGeneration: 1 + status: "True" + type: CheckPodRunning--cassandra-prod-controller-1 + - lastTransitionTime: "2025-07-26T10:12:43Z" + message: get pod; ConditionStatus:True; PodName:cassandra-prod-broker-0 + observedGeneration: 1 + status: "True" + type: GetPod--cassandra-prod-broker-0 + - lastTransitionTime: "2025-07-26T10:12:43Z" + message: evict pod; ConditionStatus:True; PodName:cassandra-prod-broker-0 + observedGeneration: 1 + status: "True" + type: EvictPod--cassandra-prod-broker-0 + - lastTransitionTime: "2025-07-26T10:13:18Z" + message: check pod running; ConditionStatus:True; PodName:cassandra-prod-broker-0 + observedGeneration: 1 + status: "True" + type: CheckPodRunning--cassandra-prod-broker-0 + - lastTransitionTime: "2025-07-26T10:13:23Z" + message: get pod; ConditionStatus:True; PodName:cassandra-prod-broker-1 + observedGeneration: 1 + status: "True" + type: GetPod--cassandra-prod-broker-1 + - lastTransitionTime: "2025-07-26T10:13:23Z" + message: evict pod; ConditionStatus:True; PodName:cassandra-prod-broker-1 + observedGeneration: 1 + status: "True" + type: EvictPod--cassandra-prod-broker-1 + - lastTransitionTime: "2025-07-26T10:13:28Z" + message: check pod running; ConditionStatus:True; PodName:cassandra-prod-broker-1 + observedGeneration: 1 + status: "True" + type: CheckPodRunning--cassandra-prod-broker-1 + - lastTransitionTime: "2025-07-26T10:13:33Z" + message: Successfully Restarted Cassandra nodes + observedGeneration: 1 + reason: RestartNodes + status: "True" + type: RestartNodes + - lastTransitionTime: "2025-07-26T10:13:33Z" + message: Controller has successfully restart the Cassandra replicas + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete cassandraopsrequest -n demo restart +kubectl delete cassandra -n demo cassandra-prod +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Cassandra object](/docs/guides/cassandra/concepts/cassandra.md). +- Monitor your Cassandra database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/cassandra/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Cassandra database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/cassandra/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/clickhouse/concepts/clickhouse.md b/docs/guides/clickhouse/concepts/clickhouse.md index ad0012b264..65c6ec7925 100644 --- a/docs/guides/clickhouse/concepts/clickhouse.md +++ b/docs/guides/clickhouse/concepts/clickhouse.md @@ -33,8 +33,8 @@ spec: authSecret: kind: Secret name: clickhouse-auth - configSecret: - name: ch-configuration + configuration: + secretName: ch-configuration clusterTopology: clickHouseKeeper: externallyManaged: false @@ -152,9 +152,9 @@ type: Opaque Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). -### spec.configSecret +### spec.configuration -`spec.configSecret` is an optional field that points to a Secret used to hold custom ClickHouse configuration. If not set, KubeDB operator will use default configuration for ClickHouse. +`spec.configuration` is an optional field that points to a Secret used to hold custom ClickHouse configuration. If not set, KubeDB operator will use default configuration for ClickHouse. ### spec.clusterTopology diff --git a/docs/guides/clickhouse/concepts/clickhouse.md.bak b/docs/guides/clickhouse/concepts/clickhouse.md.bak new file mode 100644 index 0000000000..b73d80b7c6 --- /dev/null +++ b/docs/guides/clickhouse/concepts/clickhouse.md.bak @@ -0,0 +1,374 @@ +--- +title: ClickHouse CRD +menu: + docs_{{ .version }}: + identifier: ch-clickhouse-concepts + name: ClickHouse + parent: ch-concepts-clickhouse + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# ClickHouse + +## What is ClickHouse + +`ClickHouse` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for [ClickHouse](https://clickhouse.com/) in a Kubernetes native way. You only need to describe the desired database configuration in a `ClickHouse` object, and the KubeDB operator will create Kubernetes objects in the desired state for you. + +## ClickHouse Spec + +As with all other Kubernetes objects, a ClickHouse needs `apiVersion`, `kind`, and `metadata` fields. It also needs a `.spec` section. Below is an example ClickHouse object. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: ClickHouse +metadata: + name: ch + namespace: demo +spec: + version: 24.4.1 + authSecret: + kind: Secret + name: clickhouse-auth + configuration: + secretName: ch-configuration + clusterTopology: + clickHouseKeeper: + externallyManaged: false + spec: + replicas: 3 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + cluster: + name: appscode-cluster + shards: 2 + replicas: 2 + podTemplate: + spec: + containers: + - name: clickhouse + resources: + limits: + memory: 4Gi + requests: + cpu: 500m + memory: 512Mi + initContainers: + - name: clickhouse-init + resources: + limits: + memory: 1Gi + requests: + cpu: 500m + memory: 512Mi + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + sslVerificationMode: relaxed + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: ch-issuer + certificates: + - alias: server + subject: + organizations: + - kubedb:server + dnsNames: + - localhost + ipAddresses: + - "127.0.0.1" + deletionPolicy: WipeOut +``` + +### spec.version + +`spec.version` is a required field specifying the name of the [ClickHouseVersion](/docs/guides/clickhouse/concepts/clickhouseversion.md) crd where the docker images are specified. Currently, when you install KubeDB, it creates the following `ClickHouse` resources, + +- `24.4.1` +- `25.7.1` + +### spec.replicas + +`spec.replicas` the number of members in ClickHouse replicaset of Standalone mode. In Standalone Mode replica should be 1. + +If `spec.clusterTopology` is set, then `spec.replicas` needs to be empty. Instead use `spec.clusterTopology.cluster.replicas`. + +KubeDB uses `PodDisruptionBudget` to ensure that majority of these replicas are available during [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions) so that quorum is maintained. + +### spec.authSecret + +`spec.authSecret` is an optional field that points to a Secret used to hold credentials for `clickhouse` admin user. If not set, KubeDB operator creates a new Secret `{clickhouse-object-name}-auth` for storing the password for `admin` user for each ClickHouse object. + +We can use this field in 3 mode. +1. Using an external secret. In this case, You need to create an auth secret first with required fields, then specify the secret name when creating the ClickHouse object using `spec.authSecret.name` & set `spec.authSecret.externallyManaged` to true. +```yaml +authSecret: + name: + externallyManaged: true +``` + +2. Specifying the secret name only. In this case, You need to specify the secret name when creating the ClickHouse object using `spec.authSecret.name`. `externallyManaged` is by default false. +```yaml +authSecret: + name: +``` + +3. Let KubeDB do everything for you. In this case, no work for you. + +AuthSecret contains a `user` key and a `password` key which contains the `username` and `password` respectively for ClickHouse `admin` user. + +Example: + +```bash +$ kubectl create secret generic clickhouse-auth -n demo \ +--from-literal=username=jhon-doe \ +--from-literal=password=6q8u_2jMOW-OOZXk +secret "clickhouse-auth" created +``` + +```yaml +apiVersion: v1 +data: + password: NnE4dV8yak1PVy1PT1pYaw== + username: amhvbi1kb2U= +kind: Secret +metadata: + name: clickhouse-auth + namespace: demo +type: Opaque +``` + +Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). + +### spec.configSecret + +`spec.configSecret` is an optional field that points to a Secret used to hold custom ClickHouse configuration. If not set, KubeDB operator will use default configuration for ClickHouse. + +### spec.clusterTopology + +`spec.clusterTopology` represents the topology configuration for ClickHouse cluster. + +When `spec.clusterTopology` is set, the following fields needs to be empty, otherwise validating webhook will throw error. + +- `spec.replicas` +- `spec.podTemplate` +- `spec.storage` + +#### spec.clusterTopology.cluster + +`cluster` is an optional field that provides a way to configure clickhouse cluster. +Available configurable fields: + +- `name` (`: "appscode-cluster"`) — is a `mandatory` field that specifies the unique name of the cluster. All cluster name should be unique. +- `shards` (`: "2""`) - is an `optional` field to specify the number of clickhouse shards in the cluster. Shard represents a horizontal partition of data, meaning it holds a subset of the entire dataset This field must hold a value greater than `0`. +- `replicas` (`: "3"`) — is an `optional` field to specify the number of ClickHouse nodes (pods) to deploy in this shard. This field must hold a value greater than `0`. A replica is a complete copy of the data within a specific shard. Multiple replicas of a shard are maintained on different servers to provide redundancy. +- `podTemplate` (`: ""`) — is an `optional` field that allows you to customize pod-level configurations (like affinity, tolerations, nodeSelector, container resources) for pods within this shard. +- `storage` (`: "resources.requests.storage: 10Gi"`) — is an optional field to define how persistent storage should be configured for the pods in this shard. It uses a standard PersistentVolumeClaimSpec format. +- `storageType` (`: "Durable"`) — is an `optional` field to specify whether the pods in this rack should use `Durable` (persistent disk-backed) or `Ephemeral` (temporary) storage. Defaults to `Durable`. + +### spec.tls + +`spec.tls` specifies the TLS/SSL configurations. The KubeDB operator supports TLS management by using the [cert-manager](https://cert-manager.io/). + +```yaml +spec: + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: ch-issuer + certificates: + - alias: server + subject: + organizations: + - kubedb:server + dnsNames: + - localhost + ipAddresses: + - "127.0.0.1" +``` + +The `spec.tls` contains the following fields: + +- `tls.issuerRef` - is an `optional` field that references to the `Issuer` or `ClusterIssuer` custom resource object of [cert-manager](https://cert-manager.io/docs/concepts/issuer/). It is used to generate the necessary certificate secrets for ClickHouse. If the `issuerRef` is not specified, the operator creates a self-signed CA and also creates necessary certificate (valid: 365 days) secrets using that CA. + - `apiGroup` - is the group name of the resource that is being referenced. Currently, the only supported value is `cert-manager.io`. + - `kind` - is the type of resource that is being referenced. The supported values are `Issuer` and `ClusterIssuer`. + - `name` - is the name of the resource ( `Issuer` or `ClusterIssuer` ) that is being referenced. + +- `tls.certificates` - is an `optional` field that specifies a list of certificate configurations used to configure the certificates. It has the following fields: + - `alias` - represents the identifier of the certificate. It has the following possible value: + - `server` - is used for the server certificate configuration. + - `client` - is used for the client certificate configuration. + + - `secretName` - ( `string` | `"-alias-cert"` ) - specifies the k8s secret name that holds the certificates. + + - `subject` - specifies an `X.509` distinguished name (DN). It has the following configurable fields: + - `organizations` ( `[]string` | `nil` ) - is a list of organization names. + - `organizationalUnits` ( `[]string` | `nil` ) - is a list of organization unit names. + - `countries` ( `[]string` | `nil` ) - is a list of country names (ie. Country Codes). + - `localities` ( `[]string` | `nil` ) - is a list of locality names. + - `provinces` ( `[]string` | `nil` ) - is a list of province names. + - `streetAddresses` ( `[]string` | `nil` ) - is a list of street addresses. + - `postalCodes` ( `[]string` | `nil` ) - is a list of postal codes. + - `serialNumber` ( `string` | `""` ) is a serial number. + + For more details, visit [here](https://golang.org/pkg/crypto/x509/pkix/#Name). + + - `duration` ( `string` | `""` ) - is the period during which the certificate is valid. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300m"`, `"1.5h"` or `"20h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + - `renewBefore` ( `string` | `""` ) - is a specifiable time before expiration duration. + - `dnsNames` ( `[]string` | `nil` ) - is a list of subject alt names. + - `ipAddresses` ( `[]string` | `nil` ) - is a list of IP addresses. + - `uris` ( `[]string` | `nil` ) - is a list of URI Subject Alternative Names. + - `emailAddresses` ( `[]string` | `nil` ) - is a list of email Subject Alternative Names. + +### spec.podTemplate + +KubeDB allows providing a template for database pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the PetSet created for ClickHouse cluster. + +KubeDB accept following fields to set in `spec.podTemplate:` + +- metadata: + - annotations (pod's annotation) + - labels (pod's labels) +- controller: + - annotations (petset's annotation) + - labels (petset's labels) +- spec: + - containers + - volumes + - podPlacementPolicy + - initContainers + - containers + - imagePullSecrets + - nodeSelector + - serviceAccountName + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + +You can check out the full list [here](https://github.com/kmodules/offshoot-api/blob/master/api/v2/types.go#L26C1-L279C1). +Uses of some field of `spec.podTemplate` is described below, + +#### spec.podTemplate.spec.tolerations + +The `spec.podTemplate.spec.tolerations` is an optional field. This can be used to specify the pod's tolerations. + +#### spec.podTemplate.spec.volumes + +The `spec.podTemplate.spec.volumes` is an optional field. This can be used to provide the list of volumes that can be mounted by containers belonging to the pod. + +#### spec.podTemplate.spec.podPlacementPolicy + +`spec.podTemplate.spec.podPlacementPolicy` is an optional field. This can be used to provide the reference of the `podPlacementPolicy`. `name` of the podPlacementPolicy is referred under this attribute. This will be used by our Petset controller to place the db pods throughout the region, zone & nodes according to the policy. It utilizes kubernetes affinity & podTopologySpreadContraints feature to do so. +```yaml +spec: + podPlacementPolicy: + name: default +``` + +#### spec.podTemplate.spec.nodeSelector + +`spec.podTemplate.spec.nodeSelector` is an optional field that specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). To learn more, see [here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) . + +### spec.serviceTemplates + +You can also provide template for the services created by KubeDB operator for ClickHouse cluster through `spec.serviceTemplates`. This will allow you to set the type and other properties of the services. + +KubeDB allows following fields to set in `spec.serviceTemplates`: +- `alias` represents the identifier of the service. It has the following possible value: + - `stats` for is used for the `exporter` service identification. + +There are two options for providing serviceTemplates: +- To provide `serviceTemplates` for a specific service, the `serviceTemplates.ports.port` should be equal to the port of that service and `serviceTemplate` will be used for that particular service only. +- However, to provide a common `serviceTemplates`, `serviceTemplates.ports.port` should be empty. + +- metadata: + - labels + - annotations +- spec: + - type + - ports + - clusterIP + - externalIPs + - loadBalancerIP + - loadBalancerSourceRanges + - externalTrafficPolicy + - healthCheckNodePort + - sessionAffinityConfig + +See [here](https://github.com/kmodules/offshoot-api/blob/kubernetes-1.21.1/api/v1/types.go#L237) to understand these fields in detail. + + +#### spec.podTemplate.spec.containers + +The `spec.podTemplate.spec.containers` can be used to provide the list containers and their configurations for to the database pod. some of the fields are described below, + +##### spec.podTemplate.spec.containers[].name +The `spec.podTemplate.spec.containers[].name` field used to specify the name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + +##### spec.podTemplate.spec.containers[].args +`spec.podTemplate.spec.containers[].args` is an optional field. This can be used to provide additional arguments to database installation. + +##### spec.podTemplate.spec.containers[].env + +`spec.podTemplate.spec.containers[].env` is an optional field that specifies the environment variables to pass to the ClickHouse containers. + +##### spec.podTemplate.spec.containers[].resources + +`spec.podTemplate.spec.containers[].resources` is an optional field. This can be used to request compute resources required by containers of the database pods. To learn more, visit [here](http://kubernetes.io/docs/user-guide/compute-resources/). + +### spec.deletionPolicy + +`deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `ClickHouse` crd or which resources KubeDB should keep or delete when you delete `ClickHouse` crd. KubeDB provides following four deletion policies: + +- DoNotTerminate +- WipeOut +- Halt +- Delete + +When `deletionPolicy` is `DoNotTerminate`, KubeDB takes advantage of `ValidationWebhook` feature in Kubernetes 1.9.0 or later clusters to implement `DoNotTerminate` feature. If admission webhook is enabled, `DoNotTerminate` prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. + +Following table show what KubeDB does when you delete ClickHouse crd for different termination policies, + +| Behavior | DoNotTerminate | Halt | Delete | WipeOut | +| ----------------------------------- | :------------: | :------: | :------: | :------: | +| 1. Block Delete operation | ✓ | ✗ | ✗ | ✗ | +| 2. Delete PetSet | ✗ | ✓ | ✓ | ✓ | +| 3. Delete Services | ✗ | ✓ | ✓ | ✓ | +| 4. Delete PVCs | ✗ | ✗ | ✓ | ✓ | +| 5. Delete Secrets | ✗ | ✗ | ✗ | ✓ | +| 6. Delete Snapshots | ✗ | ✗ | ✗ | ✓ | + +If you don't specify `spec.deletionPolicy` KubeDB uses `Delete` termination policy by default. + + +## spec.healthChecker +It defines the attributes for the health checker. +- `spec.healthChecker.periodSeconds` specifies how often to perform the health check. +- `spec.healthChecker.timeoutSeconds` specifies the number of seconds after which the probe times out. +- `spec.healthChecker.failureThreshold` specifies minimum consecutive failures for the healthChecker to be considered failed. +- `spec.healthChecker.disableWriteCheck` specifies whether to disable the writeCheck or not. + +Know details about KubeDB Health checking from this [blog post](https://appscode.com/blog/post/kubedb-health-checker/). + +## Next Steps + +- Learn how to use KubeDB to run Apache ClickHouse cluster [here](/docs/guides/clickhouse/README.md). +- Detail concepts of [ClickHouseVersion object](/docs/guides/clickhouse/concepts/clickhouseversion.md). + +[//]: # (- Learn to use KubeDB managed ClickHouse objects using [CLIs](/docs/guides/clickhouse/cli/cli.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/clickhouse/concepts/clickhouseopsrequest.md b/docs/guides/clickhouse/concepts/clickhouseopsrequest.md index 44f720a732..72929e96a8 100644 --- a/docs/guides/clickhouse/concepts/clickhouseopsrequest.md +++ b/docs/guides/clickhouse/concepts/clickhouseopsrequest.md @@ -154,8 +154,8 @@ metadata: spec: apply: IfReady configuration: - configSecret: - name: ch-custom-config + configuration: + secretName: ch-custom-config databaseRef: name: clickhouse-prod type: Reconfigure diff --git a/docs/guides/clickhouse/concepts/clickhouseopsrequest.md.bak b/docs/guides/clickhouse/concepts/clickhouseopsrequest.md.bak new file mode 100644 index 0000000000..44f720a732 --- /dev/null +++ b/docs/guides/clickhouse/concepts/clickhouseopsrequest.md.bak @@ -0,0 +1,445 @@ +--- +title: ClickHouseOpsRequests CRD +menu: + docs_{{ .version }}: + identifier: ch-opsrequest-concepts + name: ClickHouseOpsRequest + parent: ch-concepts-clickhouse + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + + +> New to KubeDB? Please start [here](/docs/README.md). + +# ClickHouseOpsRequest + +## What is ClickHouseOpsRequest + +`ClickHouseOpsRequest` is a Kubernetes `Custom Resource Definitions` (CRD). It provides a declarative configuration for [ClickHouse](https://clickhouse.com/) administrative operations like database version updating, horizontal scaling, vertical scaling etc. in a Kubernetes native way. + +## ClickHouseOpsRequest CRD Specifications + +Like any official Kubernetes resource, a `ClickHouseOpsRequest` has `TypeMeta`, `ObjectMeta`, `Spec` and `Status` sections. + +Here, some sample `ClickHouseOpsRequest` CRs for different administrative operations is given below: + +Sample `ClickHouseOpsRequest` for updating database: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ClickHouseOpsRequest +metadata: + name: update-version + namespace: demo +spec: + apply: IfReady + databaseRef: + name: clickhouse-prod + type: UpdateVersion + updateVersion: + targetVersion: 25.7.1 +status: + conditions: + - lastTransitionTime: "2025-08-21T07:54:21Z" + message: Successfully completed update clickhouse version + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +Sample `ClickHouseOpsRequest` for Horizontal Scaling of Database Cluster: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ClickHouseOpsRequest +metadata: + name: chops-scale-horizontal-up + namespace: demo +spec: + apply: IfReady + databaseRef: + name: clickhouse-prod + horizontalScaling: + replicas: 3 + type: HorizontalScaling +status: + conditions: + - lastTransitionTime: "2025-08-21T08:04:41Z" + message: Successfully completed horizontally scale ClickHouse cluster + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +Sample `ClickHouseOpsRequest` for Vertical Scaling of Database: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ClickHouseOpsRequest +metadata: + name: ch-scale-vertical-cluster + namespace: demo +spec: + apply: IfReady + databaseRef: + name: clickhouse-prod + type: VerticalScaling + verticalScaling: + node: + resources: + limits: + cpu: "2" + memory: 2Gi + requests: + cpu: "2" + memory: 2Gi +status: + conditions: + - lastTransitionTime: "2025-08-21T08:15:43Z" + message: Successfully completed the vertical scaling for ClickHouse + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +Sample `ClickHouseOpsRequest` Objects for Reconfiguring ClickHouse database with config: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ClickHouseOpsRequest +metadata: + name: chops-reconfiugre + namespace: demo +spec: + apply: IfReady + configuration: + applyConfig: + config.yaml: | + profiles: + default: + max_query_size: 180000 + databaseRef: + name: clickhouse-prod + type: Reconfigure +status: + conditions: + - lastTransitionTime: "2025-08-21T08:27:41Z" + message: Successfully completed reconfigure ClickHouse + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` +Sample `ClickHouseOpsRequest` Objects for Reconfiguring ClickHouse database with secret: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ClickHouseOpsRequest +metadata: + name: chops-reconfiugre + namespace: demo +spec: + apply: IfReady + configuration: + configSecret: + name: ch-custom-config + databaseRef: + name: clickhouse-prod + type: Reconfigure +status: + conditions: + - lastTransitionTime: "2025-08-21T10:00:04Z" + message: Successfully completed reconfigure ClickHouse + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +Sample `ClickHouseOpsRequest` Objects for Volume Expansion of ClickHouse: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ClickHouseOpsRequest +metadata: + name: ch-offline-volume-expansion + namespace: demo +spec: + apply: IfReady + databaseRef: + name: clickhouse-prod + type: VolumeExpansion + volumeExpansion: + mode: Offline + node: 2Gi +status: + conditions: + - lastTransitionTime: "2025-08-21T10:36:53Z" + message: Successfully completed volumeExpansion for ClickHouse + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful + +``` + +Sample `ClickHouseOpsRequest` Objects for Reconfiguring TLS of the database: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ClickHouseOpsRequest +metadata: + name: chops-add-tls + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: clickhouse-prod + tls: + sslVerificationMode: "strict" + issuerRef: + apiGroup: cert-manager.io + kind: Issuer + name: ch-issuer + certificates: + - alias: server + subject: + organizations: + - kubedb:server + dnsNames: + - localhost + ipAddresses: + - "127.0.0.1" +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ClickHouseOpsRequest +metadata: + name: chops-rotate + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: clickhouse-prod + tls: + rotateCertificates: true +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ClickHouseOpsRequest +metadata: + name: chops-update-issuer + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: clickhouse-prod + tls: + issuerRef: + name: ch-new-issuer + kind: Issuer + apiGroup: "cert-manager.io" +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ClickHouseOpsRequest +metadata: + name: chops-remove-tls + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: clickhouse-prod + tls: + remove: true +``` + +Here, we are going to describe the various sections of a `ClickHouseOpsRequest` crd. + +A `ClickHouseOpsRequest` object has the following fields in the `spec` section. + +### spec.databaseRef + +`spec.databaseRef` is a required field that point to the [ClickHouse](/docs/guides/clickhouse/concepts/clickhouse.md) object for which the administrative operations will be performed. This field consists of the following sub-field: + +- **spec.databaseRef.name :** specifies the name of the [ClickHouse](/docs/guides/clickhouse/concepts/clickhouse.md) object. + +### spec.type + +`spec.type` specifies the kind of operation that will be applied to the database. Currently, the following types of operations are allowed in `ClickHouseOpsRequest`. + +- `UpdateVersion` +- `HorizontalScaling` +- `VerticalScaling` +- `VolumeExpansion` +- `Reconfigure` +- `ReconfigureTLS` +- `Restart` + +> You can perform only one type of operation on a single `ClickHouseOpsRequest` CR. For example, if you want to update your database and scale up its replica then you have to create two separate `ClickHouseOpsRequest`. At first, you have to create a `ClickHouseOpsRequest` for updating. Once it is completed, then you can create another `ClickHouseOpsRequest` for scaling. + +### spec.updateVersion + +If you want to update you ClickHouse version, you have to specify the `spec.updateVersion` section that specifies the desired version information. This field consists of the following sub-field: + +- `spec.updateVersion.targetVersion` refers to a [ClickHouseVersion](/docs/guides/clickhouse/concepts/clickhouseversion.md) CR that contains the ClickHouse version information where you want to update. + +> You can only update between ClickHouse versions. KubeDB does not support downgrade for ClickHouse. + +### spec.horizontalScaling.node + +If you want to scale-up or scale-down your ClickHouse cluster or different components of it, you have to specify `spec.horizontalScaling.node` section. + +### spec.verticalScaling.node + +`spec.verticalScaling.node` is a required field specifying the information of `ClickHouse` resources like `cpu`, `memory` etc that will be scaled. +this has the below structure: + +```yaml +requests: + memory: "200Mi" + cpu: "0.1" +limits: + memory: "300Mi" + cpu: "0.2" +``` + +Here, when you specify the resource request, the scheduler uses this information to decide which node to place the container of the Pod on and when you specify a resource limit for the container, the `kubelet` enforces those limits so that the running container is not allowed to use more of that resource than the limit you set. You can found more details from [here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +### spec.volumeExpansion + +> To use the volume expansion feature the storage class must support volume expansion + +If you want to expand the volume of your ClickHouse cluster or different components of it, you have to specify `spec.volumeExpansion` section. This field consists of the following sub-field: + +- `spec.mode` specifies the volume expansion mode. Supported values are `Online` & `Offline`. The default is `Online`. +- `spec.volumeExpansion.node` indicates the desired size for the persistent volume for a ClickHouse cluster. + +Example usage of this field is given below: + +```yaml +spec: + volumeExpansion: + node: "2Gi" +``` + +This will expand the volume size of all the combined nodes to 2 GB. + +### spec.configuration + +If you want to reconfigure your Running ClickHouse cluster or different components of it with new custom configuration, you have to specify `spec.configuration` section. This field consists of the following sub-field: + +- `spec.configuration.configSecret` points to a secret in the same namespace of a ClickHouse resource, which contains the new custom configurations. If there are any configSecret set before in the database, this secret will replace it. + +- `applyConfig` is a map where the key represents the target config file (e.g., config.yaml) and the value contains the corresponding configuration content. + +```yaml + applyConfig: + config.yaml: | + profiles: + default: + max_query_size: 180000 +``` + +- `removeCustomConfig` is a boolean field. Specify this field to true if you want to remove all the custom configuration from the deployed clickhouse cluster. + +### spec.tls + +If you want to reconfigure the TLS configuration of your ClickHouse i.e. add TLS, remove TLS, update issuer/cluster issuer or Certificates and rotate the certificates, you have to specify `spec.tls` section. This field consists of the following sub-field: + +- `spec.tls.issuerRef` specifies the issuer name, kind and api group. +- `spec.tls.certificates` specifies the certificates. You can learn more about this field from [here](/docs/guides/clickhouse/concepts/clickhouse.md#spectls). +- `spec.tls.rotateCertificates` specifies that we want to rotate the certificate of this clickhouse. +- `spec.tls.remove` specifies that we want to remove tls from this clickhouse. + +### spec.timeout +As we internally retry the ops request steps multiple times, This `timeout` field helps the users to specify the timeout for those steps of the ops request (in second). +If a step doesn't finish within the specified timeout, the ops request will result in failure. + +### spec.apply +This field controls the execution of obsRequest depending on the database state. It has two supported values: `Always` & `IfReady`. +Use IfReady, if you want to process the opsRequest only when the database is Ready. And use Always, if you want to process the execution of opsReq irrespective of the Database state. + +### ClickHouseOpsRequest `Status` + +`.status` describes the current state and progress of a `ClickHouseOpsRequest` operation. It has the following fields: + +### status.phase + +`status.phase` indicates the overall phase of the operation for this `ClickHouseOpsRequest`. It can have the following three values: + +| Phase | Meaning | +|-------------|----------------------------------------------------------------------------------| +| Successful | KubeDB has successfully performed the operation requested in the ClickHouseOpsRequest | +| Progressing | KubeDB has started the execution of the applied ClickHouseOpsRequest | +| Failed | KubeDB has failed the operation requested in the ClickHouseOpsRequest | +| Denied | KubeDB has denied the operation requested in the ClickHouseOpsRequest | +| Skipped | KubeDB has skipped the operation requested in the ClickHouseOpsRequest | + +Important: Ops-manager Operator can skip an opsRequest, only if its execution has not been started yet & there is a newer opsRequest applied in the cluster. `spec.type` has to be same as the skipped one, in this case. + +### status.observedGeneration + +`status.observedGeneration` shows the most recent generation observed by the `ClickHouseOpsRequest` controller. + +### status.conditions + +`status.conditions` is an array that specifies the conditions of different steps of `ClickHouseOpsRequest` processing. Each condition entry has the following fields: + +- `types` specifies the type of the condition. ClickHouseOpsRequest has the following types of conditions: + +| Type | Meaning | +|-------------------------------|---------------------------------------------------------------------------| +| `Progressing` | Specifies that the operation is now in the progressing state | +| `Successful` | Specifies such a state that the operation on the database was successful. | +| `HaltDatabase` | Specifies such a state that the database is halted by the operator | +| `ResumeDatabase` | Specifies such a state that the database is resumed by the operator | +| `Failed` | Specifies such a state that the operation on the database failed. | +| `StartingBalancer` | Specifies such a state that the balancer has successfully started | +| `StoppingBalancer` | Specifies such a state that the balancer has successfully stopped | +| `UpdateShardImage` | Specifies such a state that the Shard Images has been updated | +| `UpdateReplicaSetImage` | Specifies such a state that the Replicaset Image has been updated | +| `UpdateConfigServerImage` | Specifies such a state that the ConfigServer Image has been updated | +| `UpdatePetSetResources` | Specifies such a state that the Petset resources has been updated | +| `UpdateShardResources` | Specifies such a state that the Shard resources has been updated | +| `UpdateReplicaSetResources` | Specifies such a state that the Replicaset resources has been updated | +| `UpdateConfigServerResources` | Specifies such a state that the ConfigServer resources has been updated | +| `ScaleDownReplicaSet` | Specifies such a state that the scale down operation of replicaset | +| `ScaleUpReplicaSet` | Specifies such a state that the scale up operation of replicaset | +| `ScaleUpShardReplicas` | Specifies such a state that the scale up operation of shard replicas | +| `ScaleDownShardReplicas` | Specifies such a state that the scale down operation of shard replicas | +| `ScaleDownConfigServer` | Specifies such a state that the scale down operation of config server | +| `ScaleUpConfigServer` | Specifies such a state that the scale up operation of config server | +| `VolumeExpansion` | Specifies such a state that the volume expansion operaton of the database | +| `ReconfigureReplicaset` | Specifies such a state that the reconfiguration of replicaset nodes | +| `ReconfigureShard` | Specifies such a state that the reconfiguration of shard nodes | +| `ReconfigureConfigServer` | Specifies such a state that the reconfiguration of config server nodes | + +- The `status` field is a string, with possible values `True`, `False`, and `Unknown`. + - `status` will be `True` if the current transition succeeded. + - `status` will be `False` if the current transition failed. + - `status` will be `Unknown` if the current transition was denied. +- The `message` field is a human-readable message indicating details about the condition. +- The `reason` field is a unique, one-word, CamelCase reason for the condition's last transition. +- The `lastTransitionTime` field provides a timestamp for when the operation last transitioned from one state to another. +- The `observedGeneration` shows the most recent condition transition generation observed by the controller. diff --git a/docs/guides/clickhouse/configuration/using-config-file.md b/docs/guides/clickhouse/configuration/using-config-file.md index 539720f0c6..34ff72a13b 100644 --- a/docs/guides/clickhouse/configuration/using-config-file.md +++ b/docs/guides/clickhouse/configuration/using-config-file.md @@ -33,11 +33,11 @@ KubeDB supports providing custom configuration for ClickHouse. This tutorial wil ## Overview -ClickHouse allows configuring via configuration file. The default configuration file for ClickHouse deployed by `KubeDB` can be found in `/etc/clickhouse-server/config.xml`. When `spec.configSecret` is set to clickhouse, KubeDB operator will get the secret and after that it will validate the values of the secret and then will keep the validated customizable configurations from the user and merge it with the remaining default config. After all that this secret will be mounted to clickhouse for use it as the configuration file. +ClickHouse allows configuring via configuration file. The default configuration file for ClickHouse deployed by `KubeDB` can be found in `/etc/clickhouse-server/config.xml`. When `spec.configuration` is set to clickhouse, KubeDB operator will get the secret and after that it will validate the values of the secret and then will keep the validated customizable configurations from the user and merge it with the remaining default config. After all that this secret will be mounted to clickhouse for use it as the configuration file. > To learn available configuration option of ClickHouse see [Configuration Options](https://clickhouse.com/docs/operations/configuration-files). -At first, you have to create a secret with your configuration file contents as the value of this key `clickhouse.yaml`. Then, you have to specify the name of this secret in `spec.configSecret.name` section while creating clickhouse CRO. +At first, you have to create a secret with your configuration file contents as the value of this key `clickhouse.yaml`. Then, you have to specify the name of this secret in `spec.configuration.secretName` section while creating clickhouse CRO. ## Custom Configuration @@ -79,7 +79,7 @@ profiles: max_query_size: 200000 ``` -Now, create clickhouse crd specifying `spec.configSecret` field. +Now, create clickhouse crd specifying `spec.configuration` field. ```yaml apiVersion: kubedb.com/v1alpha2 @@ -89,8 +89,8 @@ metadata: namespace: demo spec: version: 24.4.1 - configSecret: - name: clickhouse-configuration + configuration: + secretName: clickhouse-configuration replicas: 1 storage: accessModes: diff --git a/docs/guides/clickhouse/configuration/using-config-file.md.bak b/docs/guides/clickhouse/configuration/using-config-file.md.bak new file mode 100644 index 0000000000..3e0d83fe4c --- /dev/null +++ b/docs/guides/clickhouse/configuration/using-config-file.md.bak @@ -0,0 +1,155 @@ +--- +title: Configuring clickhouse Using Config File +menu: + docs_{{ .version }}: + identifier: ch-configuration-using-config-file + name: Configure Using Config File + parent: ch-configuration + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Using Custom Configuration File + +KubeDB supports providing custom configuration for ClickHouse. This tutorial will show you how to use KubeDB to run a ClickHouse with custom configuration. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. Run the following command to prepare your cluster for this tutorial: + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: The yaml files used in this tutorial are stored in [docs/examples/clickhouse](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/clickhouse) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +ClickHouse allows configuring via configuration file. The default configuration file for ClickHouse deployed by `KubeDB` can be found in `/etc/clickhouse-server/config.xml`. When `spec.configSecret` is set to clickhouse, KubeDB operator will get the secret and after that it will validate the values of the secret and then will keep the validated customizable configurations from the user and merge it with the remaining default config. After all that this secret will be mounted to clickhouse for use it as the configuration file. + +> To learn available configuration option of ClickHouse see [Configuration Options](https://clickhouse.com/docs/operations/configuration-files). + +At first, you have to create a secret with your configuration file contents as the value of this key `clickhouse.yaml`. Then, you have to specify the name of this secret in `spec.configuration.secretName` section while creating clickhouse CRO. + +## Custom Configuration + +At first, create `clickhouse.yaml` file containing required configuration settings. + +```bash +$ cat clickhouse-config.yaml +profiles: + default: + max_query_size: 200000 +``` + +Now, create the secret with this configuration file. + +```bash +➤ kubectl create secret generic -n demo clickhouse-configuration --from-file=./clickhouse-config.yaml +secret/clickhouse-configuration created +``` + +Verify the secret has the configuration file. + +```bash +➤ kubectl get secret -n demo clickhouse-configuration -oyaml +apiVersion: v1 +data: + clickhouse.yaml: cHJvZmlsZXM6CiAgZGVmYXVsdDoKICAgIG1heF9xdWVyeV9zaXplOiAxNTAwMDA= +kind: Secret +metadata: + creationTimestamp: "2025-08-20T12:05:24Z" + name: clickhouse-configuration + namespace: demo + resourceVersion: "199185" + uid: a3439cc2-af41-441a-ad07-56572c86b9c2 +type: Opaque + +➤ echo cHJvZmlsZXM6CiAgZGVmYXVsdDoKICAgIG1heF9xdWVyeV9zaXplOiAxNTAwMDA= | base64 -d +profiles: + default: + max_query_size: 200000 +``` + +Now, create clickhouse crd specifying `spec.configSecret` field. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: ClickHouse +metadata: + name: ch-standalone + namespace: demo +spec: + version: 24.4.1 + configuration: + secretName: clickhouse-configuration + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + deletionPolicy: WipeOut +``` + +```bash +➤ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/clickhouse/configuration/ch-custom-config-standalone.yaml +clickhouse.kubedb.com/ch-standalone created +``` + +Now, wait a few minutes. KubeDB operator will create necessary petset, services, secret etc. If everything goes well, we will see that a pod with the name `ch-standalone-0` has been created. + +Check that the petset's pod is running + +```bash +➤ kubectl get pod -n demo +NAME READY STATUS RESTARTS AGE +ch-standalone-0 1/1 Running 0 21m + +``` + +Now, we will check if the clickhouse has started with the custom configuration we have provided. + +Now, you can exec into the clickhouse pod and find if the custom configuration is there, + +```bash +➤ kubectl exec -it -n demo ch-standalone-0 -- bash +Defaulted container "clickhouse" out of: clickhouse, clickhouse-init (init) +clickhouse@ch-standalone-0:/$ cd /etc/clickhouse-server/conf.d +clickhouse@ch-standalone-0:/etc/clickhouse-server/conf.d$ ls +clickhouse-config.yaml server-config.yaml +clickhouse@ch-standalone-0:/etc/clickhouse-server/conf.d$ cat clickhouse-config.yaml +profiles: + default: + max_query_size: 200000 +clickhouse@ch-standalone-0:/etc/clickhouse-server/conf.d$ exit +exit + +``` + +As we can see from the configuration of running clickhouse, the value of `max_query_size` has been set to our desired value successfully. + +## Cleaning up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete ch -n demo ch-standalone +kubectl delete secret -n demo clickhouse-configuration +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [ClickHouse object](/docs/guides/clickhouse/concepts/clickhouse.md). +- Detail concepts of [ClickHouseVersion object](/docs/guides/clickhouse/concepts/clickhouseversion.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/clickhouse/reconfigure/reconfigure.md b/docs/guides/clickhouse/reconfigure/reconfigure.md index 59616e0222..25be5c85a0 100644 --- a/docs/guides/clickhouse/reconfigure/reconfigure.md +++ b/docs/guides/clickhouse/reconfigure/reconfigure.md @@ -78,7 +78,7 @@ secret/ch-custom-config created ``` -In this section, we are going to create a ClickHouse object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `ClickHouse` CR that we are going to create, +In this section, we are going to create a ClickHouse object specifying `spec.configuration` field to apply this custom configuration. Below is the YAML of the `ClickHouse` CR that we are going to create, ```yaml apiVersion: kubedb.com/v1alpha2 @@ -88,8 +88,8 @@ metadata: namespace: demo spec: version: 24.4.1 - configSecret: - name: ch-custom-config + configuration: + secretName: ch-custom-config clusterTopology: clickHouseKeeper: externallyManaged: false @@ -222,8 +222,8 @@ spec: databaseRef: name: clickhouse-prod configuration: - configSecret: - name: new-ch-custom-config + configuration: + secretName: new-ch-custom-config timeout: 10m apply: IfReady ``` @@ -232,7 +232,7 @@ Here, - `spec.databaseRef.name` specifies that we are reconfiguring `clickhouse-prod` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configSecret.name` specifies the name of the new secret. +- `spec.configuration.secretName` specifies the name of the new secret. Let's create the `ClickHouseOpsRequest` CR we have shown above, diff --git a/docs/guides/clickhouse/reconfigure/reconfigure.md.bak b/docs/guides/clickhouse/reconfigure/reconfigure.md.bak new file mode 100644 index 0000000000..f59e094900 --- /dev/null +++ b/docs/guides/clickhouse/reconfigure/reconfigure.md.bak @@ -0,0 +1,614 @@ +--- +title: Reconfigure ClickHouse Cluster +menu: + docs_{{ .version }}: + identifier: ch-reconfigure-cluster + name: Reconfigure Configurations + parent: ch-reconfigure + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure ClickHouse Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a ClickHouse cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [ClickHouse](/docs/guides/clickhouse/concepts/clickhouse.md) + - [ClickHouseOpsRequest](/docs/guides/clickhouse/concepts/clickhouseopsrequest.md) + - [Reconfigure Overview](/docs/guides/clickhouse/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/clickhouse](/docs/examples/clickhouse) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `ClickHouse` cluster using a supported version by `KubeDB` operator. Then we are going to apply `ClickHouseOpsRequest` to reconfigure its configuration. + +### Prepare ClickHouse Cluster + +Now, we are going to deploy a `ClickHouse` topology cluster with version `24.4.1`. + +### Deploy ClickHouse + +At first, we will create a secret with the `ch-config.yaml` file containing required configuration settings. + +**ch-config.yaml:** + +```properties +profiles: + default: + max_query_size: 200000 +``` + +Here, `max_query_size` is set to `200000`, whereas the default value is `262144` + +Let's create a k8s secret containing the above configuration where the file name will be the key and the file-content as the value: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: ch-custom-config + namespace: demo +type: Opaque +stringData: + ch-config.yaml: | + profiles: + default: + max_query_size: 200000 +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/clickhouse/reconfigure/ch-config-secret.yaml +secret/ch-custom-config created +``` + + +In this section, we are going to create a ClickHouse object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `ClickHouse` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: ClickHouse +metadata: + name: clickhouse-prod + namespace: demo +spec: + version: 24.4.1 + configuration: + secretName: ch-custom-config + clusterTopology: + clickHouseKeeper: + externallyManaged: false + spec: + replicas: 3 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + cluster: + name: appscode-cluster + shards: 2 + replicas: 2 + podTemplate: + spec: + containers: + - name: clickhouse + resources: + limits: + memory: 4Gi + requests: + cpu: 500m + memory: 2Gi + initContainers: + - name: clickhouse-init + resources: + limits: + memory: 1Gi + requests: + cpu: 500m + memory: 1Gi + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut +``` + +Let's create the `ClickHouse` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/clickhouse/reconfigure/clickhouse-cluster.yaml +clickhouse.kubedb.com/clickhouse-prod created +``` + +Now, wait until `clickhouse-prod` has status `Ready`. i.e, + +```bash +➤ kubectl get ch -n demo clickhouse-prod -w +NAME TYPE VERSION STATUS AGE +clickhouse-prod kubedb.com/v1alpha2 24.4.1 Provisioning 101s +clickhouse-prod kubedb.com/v1alpha2 24.4.1 Provisioning 109s +. +. +clickhouse-prod kubedb.com/v1alpha2 24.4.1 Ready 2m17s +``` + +Now, we will check if the clickhouse has started with the custom configuration we have provided. + +Exec into the ClickHouse pod and execute the following commands to see the configurations: +```bash +➤ kubectl exec -it -n demo clickhouse-prod-appscode-cluster-shard-0-0 -- bash +Defaulted container "clickhouse" out of: clickhouse, clickhouse-init (init) +clickhouse@clickhouse-prod-appscode-cluster-shard-0-0:/$ cat /etc/clickhouse-server/conf.d/ch-config.yaml +profiles: + default: + max_query_size: 200000 +``` +Here, we can see that our given configuration is applied to the ClickHouse cluster . `profiles.default.max_query_size` is set to `200000` from the default value `262144`. + +### Reconfigure using new config secret + +Now we will reconfigure this cluster to set `max_query_size` to `150000`. + +Now, update our `ch-config.yaml` file with the new configuration. + +```properties +profiles: + default: + max_query_size: 150000 +``` + +Then, we will create a new secret with this configuration file. + +At first, create `clickhouse.yaml` file containing required configuration settings. + +```bash +$ cat clickhouse.yaml +read_request_timeout: 6500ms +``` + +Then, we will create a new secret with this configuration file. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: new-ch-custom-config + namespace: demo +type: Opaque +stringData: + ch-config.yaml: | + profiles: + default: + max_query_size: 150000 + +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/clickhouse/reconfigure/new-ch-config-secret.yaml +secret/new-ch-custom-config created +``` + +#### Create ClickHouseOpsRequest + +Now, we will use this secret to replace the previous secret using a `ClickHouseOpsRequest` CR. The `ClickHouseOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ClickHouseOpsRequest +metadata: + name: chops-cluster-reconfigure-with-secret + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: clickhouse-prod + configuration: + configuration: + secretName: new-ch-custom-config + timeout: 10m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `clickhouse-prod` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.secretName` specifies the name of the new secret. + +Let's create the `ClickHouseOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/clickhouse/reconfigure/ch-reconfigure-ops-with-secret.yaml +clickhouseopsrequest.ops.kubedb.com/chops-cluster-reconfigure-with-secret created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `ClickHouse` object. + +Let's wait for `ClickHouseOpsRequest` to be `Successful`. Run the following command to watch `ClickHouseOpsRequest` CR, + +```bash +➤ kubectl get clickhouseopsrequests -n demo +NAME TYPE STATUS AGE +chops-cluster-reconfigure-with-secret Reconfigure Successful 48m +``` + +We can see from the above output that the `ClickHouseOpsRequest` has succeeded. If we describe the `ClickHouseOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +➤ kubectl describe chops -n demo chops-cluster-reconfigure-with-secret +Name: chops-cluster-reconfigure-with-secret +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ClickHouseOpsRequest +Metadata: + Creation Timestamp: 2025-08-22T10:39:47Z + Generation: 1 + Resource Version: 458839 + UID: 54b3c1b1-976b-4129-9b08-dcb11426b990 +Spec: + Apply: IfReady + Configuration: + Config Secret: + Name: new-ch-custom-config + Database Ref: + Name: clickhouse-prod + Timeout: 10m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2025-08-22T10:39:47Z + Message: ClickHouse ops-request has started to reconfigure ClickHouse nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2025-08-22T10:39:55Z + Message: successfully reconciled the ClickHouse with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2025-08-22T10:39:55Z + Message: reconcile; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: Reconcile + Last Transition Time: 2025-08-22T10:42:20Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2025-08-22T10:40:00Z + Message: get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-0 + Observed Generation: 1 + Status: True + Type: GetPod--clickhouse-prod-appscode-cluster-shard-0-0 + Last Transition Time: 2025-08-22T10:40:00Z + Message: evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-0 + Observed Generation: 1 + Status: True + Type: EvictPod--clickhouse-prod-appscode-cluster-shard-0-0 + Last Transition Time: 2025-08-22T10:40:05Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2025-08-22T10:40:40Z + Message: get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-1 + Observed Generation: 1 + Status: True + Type: GetPod--clickhouse-prod-appscode-cluster-shard-0-1 + Last Transition Time: 2025-08-22T10:40:40Z + Message: evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-1 + Observed Generation: 1 + Status: True + Type: EvictPod--clickhouse-prod-appscode-cluster-shard-0-1 + Last Transition Time: 2025-08-22T10:41:20Z + Message: get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-0 + Observed Generation: 1 + Status: True + Type: GetPod--clickhouse-prod-appscode-cluster-shard-1-0 + Last Transition Time: 2025-08-22T10:41:20Z + Message: evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-0 + Observed Generation: 1 + Status: True + Type: EvictPod--clickhouse-prod-appscode-cluster-shard-1-0 + Last Transition Time: 2025-08-22T10:42:00Z + Message: get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-1 + Observed Generation: 1 + Status: True + Type: GetPod--clickhouse-prod-appscode-cluster-shard-1-1 + Last Transition Time: 2025-08-22T10:42:00Z + Message: evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-1 + Observed Generation: 1 + Status: True + Type: EvictPod--clickhouse-prod-appscode-cluster-shard-1-1 + Last Transition Time: 2025-08-22T10:42:20Z + Message: Successfully completed reconfigure ClickHouse + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 49m KubeDB Ops-manager Operator Start processing for ClickHouseOpsRequest: demo/chops-cluster-reconfigure-with-secret + Normal Starting 49m KubeDB Ops-manager Operator Pausing ClickHouse databse: demo/clickhouse-prod + Normal Successful 49m KubeDB Ops-manager Operator Successfully paused ClickHouse database: demo/clickhouse-prod for ClickHouseOpsRequest: chops-cluster-reconfigure-with-secret + Warning reconcile; ConditionStatus:True 49m KubeDB Ops-manager Operator reconcile; ConditionStatus:True + Warning reconcile; ConditionStatus:True 49m KubeDB Ops-manager Operator reconcile; ConditionStatus:True + Warning reconcile; ConditionStatus:True 49m KubeDB Ops-manager Operator reconcile; ConditionStatus:True + Normal UpdatePetSets 49m KubeDB Ops-manager Operator successfully reconciled the ClickHouse with new configure + Warning get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-0 48m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-0 + Warning evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-0 48m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-0 + Warning running pod; ConditionStatus:False 48m KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-1 48m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-1 + Warning evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-1 48m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-1 + Warning get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-0 47m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-0 + Warning evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-0 47m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-0 + Warning get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-1 46m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-1 + Warning evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-1 46m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-1 + Normal RestartNodes 46m KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 46m KubeDB Ops-manager Operator Resuming ClickHouse database: demo/clickhouse-prod + Normal Successful 46m KubeDB Ops-manager Operator Successfully resumed ClickHouse database: demo/clickhouse-prod for ClickHouseOpsRequest: chops-cluster-reconfigure-with-secret +``` + +Now let's exec one of the instance to check the new configuration we have provided. + +```bash +➤ kubectl exec -it -n demo clickhouse-prod-appscode-cluster-shard-0-0 -- bash +Defaulted container "clickhouse" out of: clickhouse, clickhouse-init (init) +clickhouse@clickhouse-prod-appscode-cluster-shard-0-0:/$ cat /etc/clickhouse-server/conf.d/ch-config.yaml +profiles: + default: + max_query_size: "150000" +``` + +As we can see from the configuration of ready clickhouse, the value of `max_query_size` has been changed from `200000` to `150000`. So the reconfiguration of the cluster is successful. + + +### Reconfigure using apply config + +Now we will reconfigure this cluster again to set `180000` to ``. This time we won't use a new secret. We will use the `applyConfig` field of the `ClickHouseOpsRequest`. This will merge the new config in the existing secret. + +#### Create ClickHouseOpsRequest + +Now, we will use the new configuration in the `applyConfig` field in the `ClickHouseOpsRequest` CR. The `ClickHouseOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: ClickHouseOpsRequest +metadata: + name: chops-cluster-reconfigure-with-config + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: clickhouse-prod + configuration: + applyConfig: + ch-config.yaml: | + profiles: + default: + max_query_size: 180000 + timeout: 10m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `clickhouse-prod` cluster. +- `spec.type` specifies that we are performing `Reconfigure` on clickhouse. +- `spec.configuration.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `ClickHouseOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/clickhouse/reconfigure/ch-reconfigure-ops-with-apply-config.yaml +clickhouseopsrequest.ops.kubedb.com/chops-cluster-reconfiugre-with-config created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `ClickHouseOpsRequest` to be `Successful`. Run the following command to watch `ClickHouseOpsRequest` CR, + +```bash +➤ kubectl get chops -n demo chops-cluster-reconfiugre-with-config +NAME TYPE STATUS AGE +chops-cluster-reconfiugre-with-config Reconfigure Successful 12m +``` + +We can see from the above output that the `ClickHouseOpsRequest` has succeeded. If we describe the `ClickHouseOpsRequest` we will get an overview of the steps that were followed to reconfigure the cluster. + + + +```bash +➤ kubectl describe chops -n demo chops-cluster-reconfiugre-with-config +Name: chops-cluster-reconfiugre-with-config +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: ClickHouseOpsRequest +Metadata: + Creation Timestamp: 2025-08-22T11:34:23Z + Generation: 1 + Resource Version: 466167 + UID: 2270c10b-490c-43db-9cc9-92171b9513bb +Spec: + Apply: IfReady + Configuration: + Apply Config: + config.yaml: profiles: + default: + max_query_size: 180000 + + Database Ref: + Name: clickhouse-prod + Timeout: 10m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2025-08-22T11:34:23Z + Message: ClickHouse ops-request has started to reconfigure ClickHouse nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2025-08-22T11:34:26Z + Message: Successfully prepared user provided custom config secret + Observed Generation: 1 + Reason: PrepareCustomConfig + Status: True + Type: PrepareCustomConfig + Last Transition Time: 2025-08-22T11:34:31Z + Message: successfully reconciled the ClickHouse with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2025-08-22T11:34:31Z + Message: reconcile; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: Reconcile + Last Transition Time: 2025-08-22T11:36:56Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2025-08-22T11:34:36Z + Message: get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-0 + Observed Generation: 1 + Status: True + Type: GetPod--clickhouse-prod-appscode-cluster-shard-0-0 + Last Transition Time: 2025-08-22T11:34:36Z + Message: evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-0 + Observed Generation: 1 + Status: True + Type: EvictPod--clickhouse-prod-appscode-cluster-shard-0-0 + Last Transition Time: 2025-08-22T11:34:41Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2025-08-22T11:34:56Z + Message: get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-1 + Observed Generation: 1 + Status: True + Type: GetPod--clickhouse-prod-appscode-cluster-shard-0-1 + Last Transition Time: 2025-08-22T11:34:56Z + Message: evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-1 + Observed Generation: 1 + Status: True + Type: EvictPod--clickhouse-prod-appscode-cluster-shard-0-1 + Last Transition Time: 2025-08-22T11:35:36Z + Message: get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-0 + Observed Generation: 1 + Status: True + Type: GetPod--clickhouse-prod-appscode-cluster-shard-1-0 + Last Transition Time: 2025-08-22T11:35:36Z + Message: evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-0 + Observed Generation: 1 + Status: True + Type: EvictPod--clickhouse-prod-appscode-cluster-shard-1-0 + Last Transition Time: 2025-08-22T11:36:21Z + Message: get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-1 + Observed Generation: 1 + Status: True + Type: GetPod--clickhouse-prod-appscode-cluster-shard-1-1 + Last Transition Time: 2025-08-22T11:36:21Z + Message: evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-1 + Observed Generation: 1 + Status: True + Type: EvictPod--clickhouse-prod-appscode-cluster-shard-1-1 + Last Transition Time: 2025-08-22T11:36:56Z + Message: Successfully completed reconfigure ClickHouse + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 13m KubeDB Ops-manager Operator Start processing for ClickHouseOpsRequest: demo/chops-cluster-reconfiugre-with-config + Normal Starting 13m KubeDB Ops-manager Operator Pausing ClickHouse databse: demo/clickhouse-prod + Normal Successful 13m KubeDB Ops-manager Operator Successfully paused ClickHouse database: demo/clickhouse-prod for ClickHouseOpsRequest: chops-cluster-reconfiugre-with-config + Warning reconcile; ConditionStatus:True 13m KubeDB Ops-manager Operator reconcile; ConditionStatus:True + Warning reconcile; ConditionStatus:True 13m KubeDB Ops-manager Operator reconcile; ConditionStatus:True + Warning reconcile; ConditionStatus:True 13m KubeDB Ops-manager Operator reconcile; ConditionStatus:True + Normal UpdatePetSets 13m KubeDB Ops-manager Operator successfully reconciled the ClickHouse with new configure + Warning get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-0 13m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-0 + Warning evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-0 13m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-0 + Warning running pod; ConditionStatus:False 13m KubeDB Ops-manager Operator running pod; ConditionStatus:False + Warning get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-1 12m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-1 + Warning evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-1 12m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-0-1 + Warning get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-0 12m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-0 + Warning evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-0 12m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-0 + Warning get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-1 11m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-1 + Warning evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-1 11m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:clickhouse-prod-appscode-cluster-shard-1-1 + Normal RestartNodes 10m KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 10m KubeDB Ops-manager Operator Resuming ClickHouse database: demo/clickhouse-prod + Normal Successful 10m KubeDB Ops-manager Operator Successfully resumed ClickHouse database: demo/clickhouse-prod for ClickHouseOpsRequest: chops-cluster-reconfiugre-with-config +``` + +Now let's exec into one of the instance to check the new configuration we have provided. + +```bash +➤ kubectl exec -it -n demo clickhouse-prod-appscode-cluster-shard-0-0 -- bash +Defaulted container "clickhouse" out of: clickhouse, clickhouse-init (init) +clickhouse@clickhouse-prod-appscode-cluster-shard-0-0:/$ cat /etc/clickhouse-server/conf.d/ch-config.yaml +profiles: + default: + max_query_size: 180000 +``` + +As we can see from the configuration of ready clickhouse, the value of `max_query_size` has been changed from `150000` to `180000`. So the reconfiguration of the database using the `applyConfig` field is successful. + + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete ch -n demo clickhouse-prod +kubectl delete clickhouseopsrequest -n demo chops-cluster-reconfigure-with-config chops-cluster-reconfigure-with-secret +kubectl delete secret -n demo ch-custom-config new-ch-custom-config +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [ClickHouse object](/docs/guides/clickhouse/concepts/clickhouse.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/druid/autoscaler/compute/guide.md b/docs/guides/druid/autoscaler/compute/guide.md index b0810f67b2..10e5e5994d 100644 --- a/docs/guides/druid/autoscaler/compute/guide.md +++ b/docs/guides/druid/autoscaler/compute/guide.md @@ -107,8 +107,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/autoscaler/compute/guide.md.bak b/docs/guides/druid/autoscaler/compute/guide.md.bak new file mode 100644 index 0000000000..b0810f67b2 --- /dev/null +++ b/docs/guides/druid/autoscaler/compute/guide.md.bak @@ -0,0 +1,864 @@ +--- +title: Druid Topology Autoscaling +menu: + docs_{{ .version }}: + identifier: guides-druid-autoscaler-compute-guide + name: Druid Compute Autoscaling + parent: guides-druid-autoscaler-compute + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Autoscaling the Compute Resource of a Druid Topology Cluster + +This guide will show you how to use `KubeDB` to autoscale compute resources i.e. cpu and memory of a Druid topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner, Ops-manager and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). + +- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) + +- You should be familiar with the following `KubeDB` concepts: + - [Druid](/docs/guides/druid/concepts/druid.md) + - [DruidAutoscaler](/docs/guides/druid/concepts/druidautoscaler.md) + - [DruidOpsRequest](/docs/guides/druid/concepts/druidopsrequest.md) + - [Compute Resource Autoscaling Overview](/docs/guides/druid/autoscaler/compute/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/druid](/docs/examples/druid) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Autoscaling of Topology Cluster + +Here, we are going to deploy a `Druid` Topology Cluster using a supported version by `KubeDB` operator. Then we are going to apply `DruidAutoscaler` to set up autoscaling. + +### Create External Dependency (Deep Storage) + +Before proceeding further, we need to prepare deep storage, which is one of the external dependency of Druid and used for storing the segments. It is a storage mechanism that Apache Druid does not provide. **Amazon S3**, **Google Cloud Storage**, or **Azure Blob Storage**, **S3-compatible storage** (like **Minio**), or **HDFS** are generally convenient options for deep storage. + +In this tutorial, we will run a `minio-server` as deep storage in our local `kind` cluster using `minio-operator` and create a bucket named `druid` in it, which the deployed druid database will use. + +```bash +$ helm repo add minio https://operator.min.io/ +$ helm repo update minio +$ helm upgrade --install --namespace "minio-operator" --create-namespace "minio-operator" minio/operator --set operator.replicaCount=1 + +$ helm upgrade --install --namespace "demo" --create-namespace druid-minio minio/tenant \ +--set tenant.pools[0].servers=1 \ +--set tenant.pools[0].volumesPerServer=1 \ +--set tenant.pools[0].size=1Gi \ +--set tenant.certificate.requestAutoCert=false \ +--set tenant.buckets[0].name="druid" \ +--set tenant.pools[0].name="default" + +``` + +Now we need to create a `Secret` named `deep-storage-config`. It contains the necessary connection information using which the druid database will connect to the deep storage. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: deep-storage-config + namespace: demo +stringData: + druid.storage.type: "s3" + druid.storage.bucket: "druid" + druid.storage.baseKey: "druid/segments" + druid.s3.accessKey: "minio" + druid.s3.secretKey: "minio123" + druid.s3.protocol: "http" + druid.s3.enablePathStyleAccess: "true" + druid.s3.endpoint.signingRegion: "us-east-1" + druid.s3.endpoint.url: "http://myminio-hl.demo.svc.cluster.local:9000/" +``` + +Let’s create the `deep-storage-config` Secret shown above: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/autoscaler/compute/yamls/deep-storage-config.yaml +secret/deep-storage-config created +``` + +Now, we are going to deploy a `Druid` combined cluster with version `28.0.1`. + +### Deploy Druid Cluster + +In this section, we are going to deploy a Druid Topology cluster with version `28.0.1`. Then, in the next section we will set up autoscaling for this database using `DruidAutoscaler` CRD. Below is the YAML of the `Druid` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-cluster + namespace: demo +spec: + version: 28.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: WipeOut +``` + +Let's create the `Druid` CRO we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/autoscaler/compute/yamls/druid-cluster.yaml +druid.kubedb.com/druid-cluster created +``` + +Now, wait until `druid-cluster` has status `Ready`. i.e, + +```bash +$ kubectl get kf -n demo -w +NAME TYPE VERSION STATUS AGE +druid-cluster kubedb.com/v1alpha2 28.0.1 Provisioning 0s +druid-cluster kubedb.com/v1alpha2 28.0.1 Provisioning 24s +. +. +druid-cluster kubedb.com/v1alpha2 28.0.1 Ready 118s +``` + +## Druid Topology Autoscaler + +Let's check the Druid resources for coordinators and historicals, + +```bash +$ kubectl get druid -n demo druid-cluster -o json | jq '.spec.topology.coordinators.podTemplate.spec.containers[].resources' +{ + "limits": { + "memory": "1Gi" + }, + "requests": { + "cpu": "500m", + "memory": "1Gi" + } +} + +$ kubectl get druid -n demo druid-cluster -o json | jq '.spec.topology.historicals.podTemplate.spec.containers[].resources' +{ + "limits": { + "memory": "1Gi" + }, + "requests": { + "cpu": "500m", + "memory": "1Gi" + } +} +``` + +Let's check the coordinators and historicals Pod containers resources, + +```bash +$ kubectl get pod -n demo druid-cluster-coordinators-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "1Gi" + }, + "requests": { + "cpu": "500m", + "memory": "1Gi" + } +} + +$ kubectl get pod -n demo druid-cluster-historicals-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "1Gi" + }, + "requests": { + "cpu": "500m", + "memory": "1Gi" + } +} +``` + +You can see from the above outputs that the resources for coordinators and historicals are same as the one we have assigned while deploying the druid. + +We are now ready to apply the `DruidAutoscaler` CRO to set up autoscaling for these coordinators and historicals nodes. + +### Compute Resource Autoscaling + +Here, we are going to set up compute resource autoscaling using a DruidAutoscaler Object. + +#### Create DruidAutoscaler Object + +In order to set up compute resource autoscaling for this topology cluster, we have to create a `DruidAutoscaler` CRO with our desired configuration. Below is the YAML of the `DruidAutoscaler` object that we are going to create, + +```yaml +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: DruidAutoscaler +metadata: + name: druid-autoscaler + namespace: demo +spec: + databaseRef: + name: druid-quickstart + compute: + coordinators: + trigger: "On" + podLifeTimeThreshold: 1m + minAllowed: + cpu: 600m + memory: 2Gi + maxAllowed: + cpu: 1000m + memory: 5Gi + resourceDiffPercentage: 20 + controlledResources: ["cpu", "memory"] + historicals: + trigger: "On" + podLifeTimeThreshold: 1m + minAllowed: + cpu: 600m + memory: 2Gi + maxAllowed: + cpu: 1000m + memory: 5Gi + resourceDiffPercentage: 20 + controlledResources: [ "cpu", "memory"] +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing compute resource scaling operation on `druid-cluster` cluster. +- `spec.compute.coordinators.trigger` specifies that compute autoscaling is enabled for this node. +- `spec.compute.coordinators.podLifeTimeThreshold` specifies the minimum lifetime for at least one of the pod to initiate a vertical scaling. +- `spec.compute.coordinators.resourceDiffPercentage` specifies the minimum resource difference in percentage. The default is 10%. If the difference between current & recommended resource is less than ResourceDiffPercentage, Autoscaler Operator will ignore the updating. +- `spec.compute.coordinators.minAllowed` specifies the minimum allowed resources for the cluster. +- `spec.compute.coordinators.maxAllowed` specifies the maximum allowed resources for the cluster. +- `spec.compute.coordinators.controlledResources` specifies the resources that are controlled by the autoscaler. +- `spec.compute.coordinators.containerControlledValues` specifies which resource values should be controlled. The default is "RequestsAndLimits". +- `spec.compute.historicals` can be configured the same way shown above. +- `spec.opsRequestOptions` contains the options to pass to the created OpsRequest. It has 2 fields. + - `timeout` specifies the timeout for the OpsRequest. + - `apply` specifies when the OpsRequest should be applied. The default is "IfReady". + +> **Note:** You can also configure autoscaling configurations for all other nodes as well. You can apply autoscaler for each node in separate YAML or combinedly in one a YAML as shown above. + +Let's create the `DruidAutoscaler` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/autoscaler/compute/yamls/druid-autoscaler.yaml +druidautoscaler.autoscaling.kubedb.com/druid-autoscaler created +``` + +#### Verify Autoscaling is set up successfully + +Let's check that the `druidautoscaler` resource is created successfully, + +```bash +$ kubectl describe druidautoscaler druid-autoscaler -n demo + kubectl describe druidautoscaler druid-autoscaler -n demo +Name: druid-autoscaler +Namespace: demo +Labels: +Annotations: +API Version: autoscaling.kubedb.com/v1alpha1 +Kind: DruidAutoscaler +Metadata: + Creation Timestamp: 2024-10-24T10:04:22Z + Generation: 1 + Managed Fields: + API Version: autoscaling.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:compute: + .: + f:coordinators: + .: + f:controlledResources: + f:maxAllowed: + .: + f:cpu: + f:memory: + f:minAllowed: + .: + f:cpu: + f:memory: + f:podLifeTimeThreshold: + f:resourceDiffPercentage: + f:trigger: + f:historicals: + .: + f:controlledResources: + f:maxAllowed: + .: + f:cpu: + f:memory: + f:minAllowed: + .: + f:cpu: + f:memory: + f:podLifeTimeThreshold: + f:resourceDiffPercentage: + f:trigger: + f:databaseRef: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2024-10-24T10:04:22Z + API Version: autoscaling.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:ownerReferences: + .: + k:{"uid":"c2a5c29d-3589-49d8-bc18-585b9c05bf8d"}: + Manager: kubedb-autoscaler + Operation: Update + Time: 2024-10-24T10:04:22Z + API Version: autoscaling.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:status: + .: + f:checkpoints: + f:conditions: + f:vpas: + Manager: kubedb-autoscaler + Operation: Update + Subresource: status + Time: 2024-10-24T10:16:20Z + Owner References: + API Version: kubedb.com/v1alpha2 + Block Owner Deletion: true + Controller: true + Kind: Druid + Name: druid-cluster + UID: c2a5c29d-3589-49d8-bc18-585b9c05bf8d + Resource Version: 274969 + UID: 069fbdd7-87ad-4fd7-acc7-9753fa188312 +Spec: + Compute: + Coordinators: + Controlled Resources: + cpu + memory + Max Allowed: + Cpu: 1000m + Memory: 5Gi + Min Allowed: + Cpu: 600m + Memory: 2Gi + Pod Life Time Threshold: 1m + Resource Diff Percentage: 20 + Trigger: On + Historicals: + Controlled Resources: + cpu + memory + Max Allowed: + Cpu: 1000m + Memory: 5Gi + Min Allowed: + Cpu: 600m + Memory: 2Gi + Pod Life Time Threshold: 1m + Resource Diff Percentage: 20 + Trigger: On + Database Ref: + Name: druid-cluster +Status: + Checkpoints: + Cpu Histogram: + Bucket Weights: + Index: 0 + Weight: 10000 + Index: 5 + Weight: 490 + Reference Timestamp: 2024-10-24T10:05:00Z + Total Weight: 2.871430450948392 + First Sample Start: 2024-10-24T10:05:07Z + Last Sample Start: 2024-10-24T10:16:03Z + Last Update Time: 2024-10-24T10:16:20Z + Memory Histogram: + Bucket Weights: + Index: 25 + Weight: 3648 + Index: 29 + Weight: 10000 + Reference Timestamp: 2024-10-24T10:10:00Z + Total Weight: 3.3099198846728424 + Ref: + Container Name: druid + Vpa Object Name: druid-cluster-historicals + Total Samples Count: 12 + Version: v3 + Cpu Histogram: + Bucket Weights: + Index: 0 + Weight: 3040 + Index: 1 + Weight: 10000 + Index: 2 + Weight: 3278 + Index: 14 + Weight: 1299 + Reference Timestamp: 2024-10-24T10:10:00Z + Total Weight: 1.0092715955023177 + First Sample Start: 2024-10-24T10:04:53Z + Last Sample Start: 2024-10-24T10:14:03Z + Last Update Time: 2024-10-24T10:14:20Z + Memory Histogram: + Bucket Weights: + Index: 24 + Weight: 10000 + Index: 27 + Weight: 8706 + Reference Timestamp: 2024-10-24T10:10:00Z + Total Weight: 3.204567438391289 + Ref: + Container Name: druid + Vpa Object Name: druid-cluster-coordinators + Total Samples Count: 10 + Version: v3 + Conditions: + Last Transition Time: 2024-10-24T10:07:19Z + Message: Successfully created druidOpsRequest demo/drops-druid-cluster-coordinators-g02xtu + Observed Generation: 1 + Reason: CreateOpsRequest + Status: True + Type: CreateOpsRequest + Vpas: + Conditions: + Last Transition Time: 2024-10-24T10:05:19Z + Status: True + Type: RecommendationProvided + Recommendation: + Container Recommendations: + Container Name: druid + Lower Bound: + Cpu: 600m + Memory: 2Gi + Target: + Cpu: 600m + Memory: 2Gi + Uncapped Target: + Cpu: 100m + Memory: 764046746 + Upper Bound: + Cpu: 1 + Memory: 5Gi + Vpa Name: druid-cluster-historicals + Conditions: + Last Transition Time: 2024-10-24T10:06:19Z + Status: True + Type: RecommendationProvided + Recommendation: + Container Recommendations: + Container Name: druid + Lower Bound: + Cpu: 600m + Memory: 2Gi + Target: + Cpu: 600m + Memory: 2Gi + Uncapped Target: + Cpu: 100m + Memory: 671629701 + Upper Bound: + Cpu: 1 + Memory: 5Gi + Vpa Name: druid-cluster-coordinators +Events: +``` +So, the `druidautoscaler` resource is created successfully. + +you can see in the `Status.VPAs.Recommendation` section, that recommendation has been generated for our database. Our autoscaler operator continuously watches the recommendation generated and creates an `druidopsrequest` based on the recommendations, if the database pods resources are needed to scaled up or down. + +Let's watch the `druidopsrequest` in the demo namespace to see if any `druidopsrequest` object is created. After some time you'll see that a `druidopsrequest` will be created based on the recommendation. + +```bash +$ watch kubectl get druidopsrequest -n demo +Every 2.0s: kubectl get druidopsrequest -n demo +NAME TYPE STATUS AGE +drops-druid-cluster-coordinators-g02xtu VerticalScaling Progressing 8m +drops-druid-cluster-historicals-g3oqje VerticalScaling Progressing 8m + +``` +Progressing +Let's wait for the ops request to become successful. + +```bash +$ kubectl get druidopsrequest -n demo +NAME TYPE STATUS AGE +drops-druid-cluster-coordinators-g02xtu VerticalScaling Successful 12m +drops-druid-cluster-historicals-g3oqje VerticalScaling Successful 13m +``` + +We can see from the above output that the `DruidOpsRequest` has succeeded. If we describe the `DruidOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe druidopsrequests -n demo drops-druid-cluster-coordinators-f6qbth +Name: drops-druid-cluster-coordinators-g02xtu +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=druid-cluster + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=druids.kubedb.com +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: DruidOpsRequest +Metadata: + Creation Timestamp: 2024-10-24T10:07:19Z + Generation: 1 + Managed Fields: + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:labels: + .: + f:app.kubernetes.io/component: + f:app.kubernetes.io/instance: + f:app.kubernetes.io/managed-by: + f:app.kubernetes.io/name: + f:ownerReferences: + .: + k:{"uid":"069fbdd7-87ad-4fd7-acc7-9753fa188312"}: + f:spec: + .: + f:apply: + f:databaseRef: + f:type: + f:verticalScaling: + .: + f:coordinators: + .: + f:resources: + .: + f:limits: + .: + f:memory: + f:requests: + .: + f:cpu: + f:memory: + Manager: kubedb-autoscaler + Operation: Update + Time: 2024-10-24T10:07:19Z + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:status: + .: + f:conditions: + f:observedGeneration: + f:phase: + Manager: kubedb-ops-manager + Operation: Update + Subresource: status + Time: 2024-10-24T10:07:43Z + Owner References: + API Version: autoscaling.kubedb.com/v1alpha1 + Block Owner Deletion: true + Controller: true + Kind: DruidAutoscaler + Name: druid-autoscaler + UID: 069fbdd7-87ad-4fd7-acc7-9753fa188312 + Resource Version: 273990 + UID: d14d964b-f4ae-4570-a296-38e91c802473 +Spec: + Apply: IfReady + Database Ref: + Name: druid-cluster + Type: VerticalScaling + Vertical Scaling: + Coordinators: + Resources: + Limits: + Memory: 2Gi + Requests: + Cpu: 600m + Memory: 2Gi +Status: + Conditions: + Last Transition Time: 2024-10-24T10:07:19Z + Message: Druid ops-request has started to vertically scale the Druid nodes + Observed Generation: 1 + Reason: VerticalScaling + Status: True + Type: VerticalScaling + Last Transition Time: 2024-10-24T10:07:28Z + Message: Successfully updated PetSets Resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-10-24T10:07:43Z + Message: Successfully Restarted Pods With Resources + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-10-24T10:07:33Z + Message: get pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + Observed Generation: 1 + Status: True + Type: GetPod--druid-cluster-coordinators-0 + Last Transition Time: 2024-10-24T10:07:33Z + Message: evict pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + Observed Generation: 1 + Status: True + Type: EvictPod--druid-cluster-coordinators-0 + Last Transition Time: 2024-10-24T10:07:38Z + Message: check pod running; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--druid-cluster-coordinators-0 + Last Transition Time: 2024-10-24T10:07:43Z + Message: Successfully completed the vertical scaling for RabbitMQ + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 12m KubeDB Ops-manager Operator Start processing for DruidOpsRequest: demo/drops-druid-cluster-coordinators-g02xtu + Normal Starting 12m KubeDB Ops-manager Operator Pausing Druid databse: demo/druid-cluster + Normal Successful 12m KubeDB Ops-manager Operator Successfully paused Druid database: demo/druid-cluster for DruidOpsRequest: drops-druid-cluster-coordinators-g02xtu + Normal UpdatePetSets 12m KubeDB Ops-manager Operator Successfully updated PetSets Resources + Warning get pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 12m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + Warning evict pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 12m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + Warning check pod running; ConditionStatus:True; PodName:druid-cluster-coordinators-0 12m KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + Normal RestartPods 12m KubeDB Ops-manager Operator Successfully Restarted Pods With Resources + Normal Starting 12m KubeDB Ops-manager Operator Resuming Druid database: demo/druid-cluster + Normal Successful 12m KubeDB Ops-manager Operator Successfully resumed Druid database: demo/druid-cluster for DruidOpsRequest: drops-druid-cluster-coordinators-g02xtu +``` + +Let's describe the other `DruidOpsRequest` created for scaling of historicals. + +```bash +$ kubectl describe druidopsrequests -n demo drops-druid-cluster-historicals-g3oqje +Name: drops-druid-cluster-historicals-g3oqje +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=druid-cluster + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=druids.kubedb.com +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: DruidOpsRequest +Metadata: + Creation Timestamp: 2024-10-24T10:06:19Z + Generation: 1 + Managed Fields: + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:labels: + .: + f:app.kubernetes.io/component: + f:app.kubernetes.io/instance: + f:app.kubernetes.io/managed-by: + f:app.kubernetes.io/name: + f:ownerReferences: + .: + k:{"uid":"069fbdd7-87ad-4fd7-acc7-9753fa188312"}: + f:spec: + .: + f:apply: + f:databaseRef: + f:type: + f:verticalScaling: + .: + f:historicals: + .: + f:resources: + .: + f:limits: + .: + f:memory: + f:requests: + .: + f:cpu: + f:memory: + Manager: kubedb-autoscaler + Operation: Update + Time: 2024-10-24T10:06:19Z + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:status: + .: + f:conditions: + f:observedGeneration: + f:phase: + Manager: kubedb-ops-manager + Operation: Update + Subresource: status + Time: 2024-10-24T10:06:37Z + Owner References: + API Version: autoscaling.kubedb.com/v1alpha1 + Block Owner Deletion: true + Controller: true + Kind: DruidAutoscaler + Name: druid-autoscaler + UID: 069fbdd7-87ad-4fd7-acc7-9753fa188312 + Resource Version: 273770 + UID: fc13624c-42d4-4b03-9448-80f451b1a888 +Spec: + Apply: IfReady + Database Ref: + Name: druid-cluster + Type: VerticalScaling + Vertical Scaling: + Historicals: + Resources: + Limits: + Memory: 2Gi + Requests: + Cpu: 600m + Memory: 2Gi +Status: + Conditions: + Last Transition Time: 2024-10-24T10:06:19Z + Message: Druid ops-request has started to vertically scale the Druid nodes + Observed Generation: 1 + Reason: VerticalScaling + Status: True + Type: VerticalScaling + Last Transition Time: 2024-10-24T10:06:22Z + Message: Successfully updated PetSets Resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-10-24T10:06:37Z + Message: Successfully Restarted Pods With Resources + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-10-24T10:06:27Z + Message: get pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 + Observed Generation: 1 + Status: True + Type: GetPod--druid-cluster-historicals-0 + Last Transition Time: 2024-10-24T10:06:27Z + Message: evict pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 + Observed Generation: 1 + Status: True + Type: EvictPod--druid-cluster-historicals-0 + Last Transition Time: 2024-10-24T10:06:32Z + Message: check pod running; ConditionStatus:True; PodName:druid-cluster-historicals-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--druid-cluster-historicals-0 + Last Transition Time: 2024-10-24T10:06:37Z + Message: Successfully completed the vertical scaling for RabbitMQ + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 16m KubeDB Ops-manager Operator Start processing for DruidOpsRequest: demo/drops-druid-cluster-historicals-g3oqje + Normal Starting 16m KubeDB Ops-manager Operator Pausing Druid databse: demo/druid-cluster + Normal Successful 16m KubeDB Ops-manager Operator Successfully paused Druid database: demo/druid-cluster for DruidOpsRequest: drops-druid-cluster-historicals-g3oqje + Normal UpdatePetSets 16m KubeDB Ops-manager Operator Successfully updated PetSets Resources + Warning get pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 16m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 + Warning evict pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 16m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 + Warning check pod running; ConditionStatus:True; PodName:druid-cluster-historicals-0 16m KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-cluster-historicals-0 + Normal RestartPods 16m KubeDB Ops-manager Operator Successfully Restarted Pods With Resources + Normal Starting 16m KubeDB Ops-manager Operator Resuming Druid database: demo/druid-cluster + Normal Successful 16m KubeDB Ops-manager Operator Successfully resumed Druid database: demo/druid-cluster for DruidOpsRequest: drops-druid-cluster-historicals-g3oqje + +``` + +Now, we are going to verify from the Pod, and the Druid yaml whether the resources of the coordinators and historicals node has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get pod -n demo druid-cluster-coordinators-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "1536Mi" + }, + "requests": { + "cpu": "600m", + "memory": "1536Mi" + } +} + +$ kubectl get pod -n demo druid-cluster-historicals-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "2Gi" + }, + "requests": { + "cpu": "600m", + "memory": "2Gi" + } +} + +$ kubectl get druid -n demo druid-cluster -o json | jq '.spec.topology.coordinators.podTemplate.spec.containers[].resources' +{ + "limits": { + "memory": "1536Mi" + }, + "requests": { + "cpu": "600m", + "memory": "1536Mi" + } +} + +$ kubectl get druid -n demo druid-cluster -o json | jq '.spec.topology.historicals.podTemplate.spec.containers[].resources' +{ + "limits": { + "memory": "2Gi" + }, + "requests": { + "cpu": "600m", + "memory": "2Gi" + } +} +``` + +The above output verifies that we have successfully auto scaled the resources of the Druid topology cluster for coordinators and historicals. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete druidopsrequest -n demo drops-druid-cluster-coordinators-g02xtu drops-druid-cluster-historicals-g3oqje +kubectl delete druidautoscaler -n demo druid-autoscaler +kubectl delete dr -n demo druid-cluster +kubectl delete ns demo +``` +## Next Steps + +- Detail concepts of [Druid object](/docs/guides/druid/concepts/druid.md). +- Different Druid topology clustering modes [here](/docs/guides/druid/clustering/_index.md). +- Monitor your Druid database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/druid/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Druid database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/druid/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/druid/autoscaler/compute/yamls/druid-cluster.yaml b/docs/guides/druid/autoscaler/compute/yamls/druid-cluster.yaml index ffac2b300b..f8e3ebd5ad 100644 --- a/docs/guides/druid/autoscaler/compute/yamls/druid-cluster.yaml +++ b/docs/guides/druid/autoscaler/compute/yamls/druid-cluster.yaml @@ -7,8 +7,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: historicals: replicas: 1 diff --git a/docs/guides/druid/autoscaler/compute/yamls/druid-cluster.yaml.bak b/docs/guides/druid/autoscaler/compute/yamls/druid-cluster.yaml.bak new file mode 100644 index 0000000000..ffac2b300b --- /dev/null +++ b/docs/guides/druid/autoscaler/compute/yamls/druid-cluster.yaml.bak @@ -0,0 +1,31 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-cluster + namespace: demo +spec: + version: 28.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + historicals: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + middleManagers: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + routers: + replicas: 1 + deletionPolicy: Delete diff --git a/docs/guides/druid/autoscaler/storage/guide.md b/docs/guides/druid/autoscaler/storage/guide.md index 02b3571625..460cd01f15 100644 --- a/docs/guides/druid/autoscaler/storage/guide.md +++ b/docs/guides/druid/autoscaler/storage/guide.md @@ -121,8 +121,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: historicals: replicas: 1 diff --git a/docs/guides/druid/autoscaler/storage/guide.md.bak b/docs/guides/druid/autoscaler/storage/guide.md.bak new file mode 100644 index 0000000000..02b3571625 --- /dev/null +++ b/docs/guides/druid/autoscaler/storage/guide.md.bak @@ -0,0 +1,896 @@ +--- +title: Druid Topology Autoscaling +menu: + docs_{{ .version }}: + identifier: guides-druid-autoscaler-storage-guide + name: Druid Storage Autoscaling + parent: guides-druid-autoscaler-storage + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Storage Autoscaling of a Druid Topology Cluster + +This guide will show you how to use `KubeDB` to autoscale the storage of a Druid Topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner, Ops-manager and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). + +- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) + +- Install Prometheus from [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) + +- You must have a `StorageClass` that supports volume expansion. + +- You should be familiar with the following `KubeDB` concepts: + - [Druid](/docs/guides/druid/concepts/druid.md) + - [DruidAutoscaler](/docs/guides/druid/concepts/druidautoscaler.md) + - [DruidOpsRequest](/docs/guides/druid/concepts/druidopsrequest.md) + - [Storage Autoscaling Overview](/docs/guides/druid/autoscaler/storage/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/druid](/docs/examples/druid) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Storage Autoscaling of Topology Cluster + +At first verify that your cluster has a storage class, that supports volume expansion. Let's check, + +```bash +$ kubectl get storageclass +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 28h +longhorn (default) driver.longhorn.io Delete Immediate true 28h +longhorn-static driver.longhorn.io Delete Immediate true 28h +``` + +We can see from the output the `longhorn` storage class has `ALLOWVOLUMEEXPANSION` field as true. So, this storage class supports volume expansion. We can use it. + +Now, we are going to deploy a `Druid` topology using a supported version by `KubeDB` operator. Then we are going to apply `DruidAutoscaler` to set up autoscaling. + +### Create External Dependency (Deep Storage) + +Before proceeding further, we need to prepare deep storage, which is one of the external dependency of Druid and used for storing the segments. It is a storage mechanism that Apache Druid does not provide. **Amazon S3**, **Google Cloud Storage**, or **Azure Blob Storage**, **S3-compatible storage** (like **Minio**), or **HDFS** are generally convenient options for deep storage. + +In this tutorial, we will run a `minio-server` as deep storage in our local `kind` cluster using `minio-operator` and create a bucket named `druid` in it, which the deployed druid database will use. + +```bash +$ helm repo add minio https://operator.min.io/ +$ helm repo update minio +$ helm upgrade --install --namespace "minio-operator" --create-namespace "minio-operator" minio/operator --set operator.replicaCount=1 + +$ helm upgrade --install --namespace "demo" --create-namespace druid-minio minio/tenant \ +--set tenant.pools[0].servers=1 \ +--set tenant.pools[0].volumesPerServer=1 \ +--set tenant.pools[0].size=1Gi \ +--set tenant.certificate.requestAutoCert=false \ +--set tenant.buckets[0].name="druid" \ +--set tenant.pools[0].name="default" + +``` + +Now we need to create a `Secret` named `deep-storage-config`. It contains the necessary connection information using which the druid database will connect to the deep storage. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: deep-storage-config + namespace: demo +stringData: + druid.storage.type: "s3" + druid.storage.bucket: "druid" + druid.storage.baseKey: "druid/segments" + druid.s3.accessKey: "minio" + druid.s3.secretKey: "minio123" + druid.s3.protocol: "http" + druid.s3.enablePathStyleAccess: "true" + druid.s3.endpoint.signingRegion: "us-east-1" + druid.s3.endpoint.url: "http://myminio-hl.demo.svc.cluster.local:9000/" +``` + +Let’s create the `deep-storage-config` Secret shown above: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/autoscaler/storage/yamls/deep-storage-config.yaml +secret/deep-storage-config created +``` + +### Deploy Druid Cluster + +In this section, we are going to deploy a Druid topology cluster with monitoring enabled and with version `28.0.1`. Then, in the next section we will set up autoscaling for this cluster using `DruidAutoscaler` CRD. Below is the YAML of the `Druid` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-cluster + namespace: demo +spec: + version: 28.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + historicals: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageType: Durable + middleManagers: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageType: Durable + routers: + replicas: 1 + deletionPolicy: Delete + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + release: prometheus + interval: 10s +``` + +Let's create the `Druid` CRO we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/autoscaler/storage/yamls/druid-cluster.yaml +druid.kubedb.com/druid-cluster created +``` + +Now, wait until `druid-cluster` has status `Ready`. i.e, + +```bash +$ kubectl get dr -n demo -w +NAME TYPE VERSION STATUS AGE +druid-cluster kubedb.com/v1alpha2 28.0.1 Provisioning 0s +druid-cluster kubedb.com/v1alpha2 28.0.1 Provisioning 24s +. +. +druid-cluster kubedb.com/v1alpha2 28.0.1 Ready 2m20s +``` + +Let's check volume size from petset, and from the persistent volume, + +```bash +$ kubectl get petset -n demo druid-cluster-historicals -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1Gi" +$ kubectl get petset -n demo druid-cluster-middleManagers -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1Gi" +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-2c0ef2aa-0438-4d75-9cb2-c12a176bae6a 1Gi RWO Delete Bound demo/druid-cluster-base-task-dir-druid-cluster-middlemanagers-0 longhorn 95s +pvc-5f4cea5f-e0c8-4339-b67c-9cb8b02ba49d 1Gi RWO Delete Bound demo/druid-cluster-segment-cache-druid-cluster-historicals-0 longhorn 96s +``` + +You can see the petset for both historicals and middleManagers has 1GB storage, and the capacity of all the persistent volume is also 1GB. + +We are now ready to apply the `DruidAutoscaler` CRO to set up storage autoscaling for this cluster(historicals and middleManagers). + +### Storage Autoscaling + +Here, we are going to set up storage autoscaling using a DruidAutoscaler Object. + +#### Create DruidAutoscaler Object + +In order to set up vertical autoscaling for this topology cluster, we have to create a `DruidAutoscaler` CRO with our desired configuration. Below is the YAML of the `DruidAutoscaler` object that we are going to create, + +```yaml +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: DruidAutoscaler +metadata: + name: druid-storage-autoscaler + namespace: demo +spec: + databaseRef: + name: druid-cluster + storage: + historicals: + expansionMode: "Offline" + trigger: "On" + usageThreshold: 60 + scalingThreshold: 100 + middleManagers: + expansionMode: "Offline" + trigger: "On" + usageThreshold: 60 + scalingThreshold: 100 +``` + +Here, + +- `spec.clusterRef.name` specifies that we are performing vertical scaling operation on `druid-cluster` cluster. +- `spec.storage.historicals.trigger/spec.storage.middleManagers.trigger` specifies that storage autoscaling is enabled for historicals and middleManagers of topology cluster. +- `spec.storage.historicals.usageThreshold/spec.storage.middleManagers.usageThreshold` specifies storage usage threshold, if storage usage exceeds `60%` then storage autoscaling will be triggered. +- `spec.storage.historicals.scalingThreshold/spec.storage.historicals.scalingThreshold` specifies the scaling threshold. Storage will be scaled to `100%` of the current amount. +- It has another field `spec.storage.historicals.expansionMode/spec.storage.middleManagers.expansionMode` to set the opsRequest volumeExpansionMode, which support two values: `Online` & `Offline`. Default value is `Online`. + +Let's create the `DruidAutoscaler` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/autoscaler/storage/yamls/druid-storage-autoscaler.yaml +druidautoscaler.autoscaling.kubedb.com/druid-storage-autoscaler created +``` + +#### Storage Autoscaling is set up successfully + +Let's check that the `druidautoscaler` resource is created successfully, + +```bash +$ kubectl get druidautoscaler -n demo +NAME AGE +druid-storage-autoscaler 34s + +$ kubectl describe druidautoscaler -n demo druid-storage-autoscaler +Name: druid-storage-autoscaler +Namespace: demo +Labels: +Annotations: +API Version: autoscaling.kubedb.com/v1alpha1 +Kind: DruidAutoscaler +Metadata: + Creation Timestamp: 2024-10-25T09:52:37Z + Generation: 1 + Managed Fields: + API Version: autoscaling.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:databaseRef: + f:storage: + .: + f:historicals: + .: + f:expansionMode: + f:scalingThreshold: + f:trigger: + f:usageThreshold: + f:middleManagers: + .: + f:expansionMode: + f:scalingThreshold: + f:trigger: + f:usageThreshold: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2024-10-25T09:52:37Z + API Version: autoscaling.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:ownerReferences: + .: + k:{"uid":"712730e8-41ef-4700-b184-825b30ecbc8c"}: + Manager: kubedb-autoscaler + Operation: Update + Time: 2024-10-25T09:52:37Z + Owner References: + API Version: kubedb.com/v1alpha2 + Block Owner Deletion: true + Controller: true + Kind: Druid + Name: druid-cluster + UID: 712730e8-41ef-4700-b184-825b30ecbc8c + Resource Version: 226662 + UID: 57cbd906-a9b7-4649-bfe0-304840bb60c1 +Spec: + Database Ref: + Name: druid-cluster + Ops Request Options: + Apply: IfReady + Storage: + Historicals: + Expansion Mode: Offline + Scaling Rules: + Applies Upto: + Threshold: 100pc + Scaling Threshold: 100 + Trigger: On + Usage Threshold: 60 + Middle Managers: + Expansion Mode: Offline + Scaling Rules: + Applies Upto: + Threshold: 100pc + Scaling Threshold: 100 + Trigger: On + Usage Threshold: 60 +Events: +``` +So, the `druidautoscaler` resource is created successfully. + +Now, for this demo, we are going to manually fill up the persistent volume to exceed the `usageThreshold` using `dd` command to see if storage autoscaling is working or not. + +We are autoscaling volume for both historicals and middleManagers. So we need to fill up the persistent volume for both historicals and middleManagers. + +1. Lets exec into the historicals pod and fill the cluster volume using the following commands: + +```bash +$ kubectl exec -it -n demo druid-cluster-historicals-0 -- bash +bash-5.1$ df -h /druid/data/segments +Filesystem Size Used Available Use% Mounted on +/dev/longhorn/pvc-d4ef15ef-b1af-4a1f-ad25-ad9bc990a2fb 973.4M 92.0K 957.3M 0% /druid/data/segment + +bash-5.1$ dd if=/dev/zero of=/druid/data/segments/file.img bs=600M count=1 +1+0 records in +1+0 records out +629145600 bytes (600.0MB) copied, 46.709228 seconds, 12.8MB/s + +bash-5.1$ df -h /druid/data/segments +Filesystem Size Used Available Use% Mounted on +/dev/longhorn/pvc-d4ef15ef-b1af-4a1f-ad25-ad9bc990a2fb 973.4M 600.1M 357.3M 63% /druid/data/segments +``` + +2. Let's exec into the middleManagers pod and fill the cluster volume using the following commands: + +```bash +$ kubectl exec -it -n demo druid-cluster-middleManagers-0 -- bash +druid@druid-cluster-middleManagers-0:~$ df -h /var/druid/task +Filesystem Size Used Available Use% Mounted on +/dev/longhorn/pvc-2c0ef2aa-0438-4d75-9cb2-c12a176bae6a 973.4M 24.0K 957.4M 0% /var/druid/task +druid@druid-cluster-middleManagers-0:~$ dd if=/dev/zero of=/var/druid/task/file.img bs=600M count=1 +1+0 records in +1+0 records out +629145600 bytes (629 MB, 600 MiB) copied, 3.39618 s, 185 MB/s +druid@druid-cluster-middleManagers-0:~$ df -h /var/druid/task +Filesystem Size Used Available Use% Mounted on +/dev/longhorn/pvc-2c0ef2aa-0438-4d75-9cb2-c12a176bae6a 973.4M 600.0M 357.4M 63% /var/druid/task +``` + +So, from the above output we can see that the storage usage is 63% for both nodes, which exceeded the `usageThreshold` 60%. + +There will be two `DruidOpsRequest` created for both historicals and middleManagers to expand the volume of the cluster for both nodes. +Let's watch the `druidopsrequest` in the demo namespace to see if any `druidopsrequest` object is created. After some time you'll see that a `druidopsrequest` of type `VolumeExpansion` will be created based on the `scalingThreshold`. + +```bash +$ watch kubectl get druidopsrequest -n demo +NAME TYPE STATUS AGE +druidopsrequest.ops.kubedb.com/drops-druid-cluster-gq9huj VolumeExpansion Progressing 46s +druidopsrequest.ops.kubedb.com/drops-druid-cluster-kbw4fd VolumeExpansion Successful 4m46s +``` + +Once ops request has succeeded. Let's wait for the other one to become successful. + +```bash +$ kubectl get druidopsrequest -n demo +NAME TYPE STATUS AGE +druidopsrequest.ops.kubedb.com/drops-druid-cluster-gq9huj VolumeExpansion Successful 3m18s +druidopsrequest.ops.kubedb.com/drops-druid-cluster-kbw4fd VolumeExpansion Successful 7m18s +``` + +We can see from the above output that the both `DruidOpsRequest` has succeeded. If we describe the `DruidOpsRequest` one by one we will get an overview of the steps that were followed to expand the volume of the cluster. + +```bash +$ kubectl describe druidopsrequest -n demo drops-druid-cluster-kbw4fd +Name: drops-druid-cluster-kbw4fd +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=druid-cluster + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=druids.kubedb.com +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: DruidOpsRequest +Metadata: + Creation Timestamp: 2024-10-25T09:57:14Z + Generation: 1 + Managed Fields: + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:labels: + .: + f:app.kubernetes.io/component: + f:app.kubernetes.io/instance: + f:app.kubernetes.io/managed-by: + f:app.kubernetes.io/name: + f:ownerReferences: + .: + k:{"uid":"57cbd906-a9b7-4649-bfe0-304840bb60c1"}: + f:spec: + .: + f:apply: + f:databaseRef: + f:type: + f:volumeExpansion: + .: + f:historicals: + f:mode: + Manager: kubedb-autoscaler + Operation: Update + Time: 2024-10-25T09:57:14Z + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:status: + .: + f:conditions: + f:observedGeneration: + f:phase: + Manager: kubedb-ops-manager + Operation: Update + Subresource: status + Time: 2024-10-25T10:00:20Z + Owner References: + API Version: autoscaling.kubedb.com/v1alpha1 + Block Owner Deletion: true + Controller: true + Kind: DruidAutoscaler + Name: druid-storage-autoscaler + UID: 57cbd906-a9b7-4649-bfe0-304840bb60c1 + Resource Version: 228016 + UID: 1fa750bb-2db3-4684-a7cf-1b3047bc07af +Spec: + Apply: IfReady + Database Ref: + Name: druid-cluster + Type: VolumeExpansion + Volume Expansion: + Historicals: 2041405440 + Mode: Offline +Status: + Conditions: + Last Transition Time: 2024-10-25T09:57:14Z + Message: Druid ops-request has started to expand volume of druid nodes. + Observed Generation: 1 + Reason: VolumeExpansion + Status: True + Type: VolumeExpansion + Last Transition Time: 2024-10-25T09:57:22Z + Message: get pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPetSet + Last Transition Time: 2024-10-25T09:57:22Z + Message: is pet set deleted; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IsPetSetDeleted + Last Transition Time: 2024-10-25T09:57:32Z + Message: successfully deleted the petSets with orphan propagation policy + Observed Generation: 1 + Reason: OrphanPetSetPods + Status: True + Type: OrphanPetSetPods + Last Transition Time: 2024-10-25T09:57:37Z + Message: get pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPod + Last Transition Time: 2024-10-25T09:57:37Z + Message: is ops req patched; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IsOpsReqPatched + Last Transition Time: 2024-10-25T09:57:37Z + Message: create pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreatePod + Last Transition Time: 2024-10-25T09:57:42Z + Message: get pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPvc + Last Transition Time: 2024-10-25T09:57:42Z + Message: is pvc patched; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IsPvcPatched + Last Transition Time: 2024-10-25T09:59:27Z + Message: compare storage; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CompareStorage + Last Transition Time: 2024-10-25T09:59:27Z + Message: create; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: Create + Last Transition Time: 2024-10-25T09:59:35Z + Message: is druid running; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: IsDruidRunning + Last Transition Time: 2024-10-25T09:59:57Z + Message: successfully updated historicals node PVC sizes + Observed Generation: 1 + Reason: UpdateHistoricalsNodePVCs + Status: True + Type: UpdateHistoricalsNodePVCs + Last Transition Time: 2024-10-25T10:00:15Z + Message: successfully reconciled the Druid resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-10-25T10:00:20Z + Message: PetSet is recreated + Observed Generation: 1 + Reason: ReadyPetSets + Status: True + Type: ReadyPetSets + Last Transition Time: 2024-10-25T10:00:20Z + Message: Successfully completed volumeExpansion for Druid + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 8m29s KubeDB Ops-manager Operator Start processing for DruidOpsRequest: demo/drops-druid-cluster-kbw4fd + Normal Starting 8m29s KubeDB Ops-manager Operator Pausing Druid databse: demo/druid-cluster + Normal Successful 8m29s KubeDB Ops-manager Operator Successfully paused Druid database: demo/druid-cluster for DruidOpsRequest: drops-druid-cluster-kbw4fd + Warning get pet set; ConditionStatus:True 8m21s KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Warning is pet set deleted; ConditionStatus:True 8m21s KubeDB Ops-manager Operator is pet set deleted; ConditionStatus:True + Warning get pet set; ConditionStatus:True 8m16s KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Normal OrphanPetSetPods 8m11s KubeDB Ops-manager Operator successfully deleted the petSets with orphan propagation policy + Warning get pod; ConditionStatus:True 8m6s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning is ops req patched; ConditionStatus:True 8m6s KubeDB Ops-manager Operator is ops req patched; ConditionStatus:True + Warning create pod; ConditionStatus:True 8m6s KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 8m1s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 8m1s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning is pvc patched; ConditionStatus:True 8m1s KubeDB Ops-manager Operator is pvc patched; ConditionStatus:True + Warning compare storage; ConditionStatus:False 8m1s KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 7m56s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 7m56s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 7m51s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 7m51s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 7m46s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 7m46s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 7m41s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 7m41s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 7m36s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 7m36s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 7m31s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 7m31s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 7m26s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 7m26s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 7m21s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 7m21s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 7m16s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 7m16s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 7m11s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 7m11s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 7m6s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 7m6s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 7m1s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 7m1s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 6m56s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 6m56s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 6m51s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 6m51s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 6m46s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 6m46s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 6m41s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 6m41s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 6m36s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 6m36s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 6m31s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 6m31s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 6m26s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 6m26s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 6m21s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 6m21s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 6m16s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 6m16s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 6m16s KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create; ConditionStatus:True 6m16s KubeDB Ops-manager Operator create; ConditionStatus:True + Warning is ops req patched; ConditionStatus:True 6m16s KubeDB Ops-manager Operator is ops req patched; ConditionStatus:True + Warning get pod; ConditionStatus:True 6m11s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning is druid running; ConditionStatus:False 6m8s KubeDB Ops-manager Operator is druid running; ConditionStatus:False + Warning get pod; ConditionStatus:True 6m6s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 6m1s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 5m56s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 5m51s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Normal UpdateHistoricalsNodePVCs 5m46s KubeDB Ops-manager Operator successfully updated historicals node PVC sizes + Normal UpdatePetSets 5m28s KubeDB Ops-manager Operator successfully reconciled the Druid resources + Warning get pet set; ConditionStatus:True 5m23s KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Normal ReadyPetSets 5m23s KubeDB Ops-manager Operator PetSet is recreated + Normal Starting 5m23s KubeDB Ops-manager Operator Resuming Druid database: demo/druid-cluster + Normal Successful 5m23s KubeDB Ops-manager Operator Successfully resumed Druid database: demo/druid-cluster for DruidOpsRequest: drops-druid-cluster-kbw4fd + Normal UpdatePetSets 5m18s KubeDB Ops-manager Operator successfully reconciled the Druid resources + Normal UpdatePetSets 5m8s KubeDB Ops-manager Operator successfully reconciled the Druid resources + Normal UpdatePetSets 4m57s KubeDB Ops-manager Operator successfully reconciled the Druid resources +``` + +```bash +$ kubectl describe druidopsrequest -n demo drops-druid-cluster-gq9huj +Name: drops-druid-cluster-gq9huj +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=druid-cluster + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=druids.kubedb.com +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: DruidOpsRequest +Metadata: + Creation Timestamp: 2024-10-25T10:01:14Z + Generation: 1 + Managed Fields: + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:labels: + .: + f:app.kubernetes.io/component: + f:app.kubernetes.io/instance: + f:app.kubernetes.io/managed-by: + f:app.kubernetes.io/name: + f:ownerReferences: + .: + k:{"uid":"57cbd906-a9b7-4649-bfe0-304840bb60c1"}: + f:spec: + .: + f:apply: + f:databaseRef: + f:type: + f:volumeExpansion: + .: + f:middleManagers: + f:mode: + Manager: kubedb-autoscaler + Operation: Update + Time: 2024-10-25T10:01:14Z + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:status: + .: + f:conditions: + f:observedGeneration: + f:phase: + Manager: kubedb-ops-manager + Operation: Update + Subresource: status + Time: 2024-10-25T10:04:12Z + Owner References: + API Version: autoscaling.kubedb.com/v1alpha1 + Block Owner Deletion: true + Controller: true + Kind: DruidAutoscaler + Name: druid-storage-autoscaler + UID: 57cbd906-a9b7-4649-bfe0-304840bb60c1 + Resource Version: 228783 + UID: 3b97380c-e867-467f-b366-4b50c7cd7d6d +Spec: + Apply: IfReady + Database Ref: + Name: druid-cluster + Type: VolumeExpansion + Volume Expansion: + Middle Managers: 2041405440 + Mode: Offline +Status: + Conditions: + Last Transition Time: 2024-10-25T10:01:14Z + Message: Druid ops-request has started to expand volume of druid nodes. + Observed Generation: 1 + Reason: VolumeExpansion + Status: True + Type: VolumeExpansion + Last Transition Time: 2024-10-25T10:01:22Z + Message: get pet set; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPetSet + Last Transition Time: 2024-10-25T10:01:22Z + Message: is pet set deleted; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IsPetSetDeleted + Last Transition Time: 2024-10-25T10:01:32Z + Message: successfully deleted the petSets with orphan propagation policy + Observed Generation: 1 + Reason: OrphanPetSetPods + Status: True + Type: OrphanPetSetPods + Last Transition Time: 2024-10-25T10:01:37Z + Message: get pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPod + Last Transition Time: 2024-10-25T10:01:37Z + Message: is ops req patched; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IsOpsReqPatched + Last Transition Time: 2024-10-25T10:01:37Z + Message: create pod; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CreatePod + Last Transition Time: 2024-10-25T10:01:42Z + Message: get pvc; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: GetPvc + Last Transition Time: 2024-10-25T10:01:42Z + Message: is pvc patched; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: IsPvcPatched + Last Transition Time: 2024-10-25T10:03:32Z + Message: compare storage; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: CompareStorage + Last Transition Time: 2024-10-25T10:03:32Z + Message: create; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: Create + Last Transition Time: 2024-10-25T10:03:40Z + Message: is druid running; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: IsDruidRunning + Last Transition Time: 2024-10-25T10:03:52Z + Message: successfully updated middleManagers node PVC sizes + Observed Generation: 1 + Reason: UpdateMiddleManagersNodePVCs + Status: True + Type: UpdateMiddleManagersNodePVCs + Last Transition Time: 2024-10-25T10:04:07Z + Message: successfully reconciled the Druid resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-10-25T10:04:12Z + Message: PetSet is recreated + Observed Generation: 1 + Reason: ReadyPetSets + Status: True + Type: ReadyPetSets + Last Transition Time: 2024-10-25T10:04:12Z + Message: Successfully completed volumeExpansion for Druid + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 5m33s KubeDB Ops-manager Operator Start processing for DruidOpsRequest: demo/drops-druid-cluster-gq9huj + Normal Starting 5m33s KubeDB Ops-manager Operator Pausing Druid databse: demo/druid-cluster + Normal Successful 5m33s KubeDB Ops-manager Operator Successfully paused Druid database: demo/druid-cluster for DruidOpsRequest: drops-druid-cluster-gq9huj + Warning get pet set; ConditionStatus:True 5m25s KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Warning is pet set deleted; ConditionStatus:True 5m25s KubeDB Ops-manager Operator is pet set deleted; ConditionStatus:True + Warning get pet set; ConditionStatus:True 5m20s KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Normal OrphanPetSetPods 5m15s KubeDB Ops-manager Operator successfully deleted the petSets with orphan propagation policy + Warning get pod; ConditionStatus:True 5m10s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning is ops req patched; ConditionStatus:True 5m10s KubeDB Ops-manager Operator is ops req patched; ConditionStatus:True + Warning create pod; ConditionStatus:True 5m10s KubeDB Ops-manager Operator create pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 5m5s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 5m5s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning is pvc patched; ConditionStatus:True 5m5s KubeDB Ops-manager Operator is pvc patched; ConditionStatus:True + Warning compare storage; ConditionStatus:False 5m5s KubeDB Ops-manager Operator compare storage; ConditionStatus:False + Warning get pod; ConditionStatus:True 5m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 5m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 4m55s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m55s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 4m50s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m50s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 4m45s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m45s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 4m40s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m40s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 4m35s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m35s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 4m30s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m30s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 4m25s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m25s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 4m20s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m20s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 4m15s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m15s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 4m10s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m10s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 4m5s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m5s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 4m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 4m KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 3m55s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 3m55s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 3m50s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 3m50s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 3m45s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 3m45s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 3m40s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 3m40s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 3m35s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 3m35s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 3m30s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 3m30s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 3m25s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 3m25s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 3m20s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 3m20s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning get pod; ConditionStatus:True 3m15s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pvc; ConditionStatus:True 3m15s KubeDB Ops-manager Operator get pvc; ConditionStatus:True + Warning compare storage; ConditionStatus:True 3m15s KubeDB Ops-manager Operator compare storage; ConditionStatus:True + Warning create; ConditionStatus:True 3m15s KubeDB Ops-manager Operator create; ConditionStatus:True + Warning is ops req patched; ConditionStatus:True 3m15s KubeDB Ops-manager Operator is ops req patched; ConditionStatus:True + Warning get pod; ConditionStatus:True 3m10s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning is druid running; ConditionStatus:False 3m7s KubeDB Ops-manager Operator is druid running; ConditionStatus:False + Warning get pod; ConditionStatus:True 3m5s KubeDB Ops-manager Operator get pod; ConditionStatus:True + Warning get pod; ConditionStatus:True 3m KubeDB Ops-manager Operator get pod; ConditionStatus:True + Normal UpdateMiddleManagersNodePVCs 2m55s KubeDB Ops-manager Operator successfully updated middleManagers node PVC sizes + Normal UpdatePetSets 2m40s KubeDB Ops-manager Operator successfully reconciled the Druid resources + Warning get pet set; ConditionStatus:True 2m35s KubeDB Ops-manager Operator get pet set; ConditionStatus:True + Normal ReadyPetSets 2m35s KubeDB Ops-manager Operator PetSet is recreated + Normal Starting 2m35s KubeDB Ops-manager Operator Resuming Druid database: demo/druid-cluster + Normal Successful 2m35s KubeDB Ops-manager Operator Successfully resumed Druid database: demo/druid-cluster for DruidOpsRequest: drops-druid-cluster-gq9huj +``` + +Now, we are going to verify from the `Petset`, and the `Persistent Volume` whether the volume of the topology cluster has expanded to meet the desired state, Let's check, + +```bash +$ kubectl get petset -n demo druid-cluster-historicals -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"2041405440" +$ kubectl get petset -n demo druid-cluster-middleManagers -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"2041405440" +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE +pvc-2c0ef2aa-0438-4d75-9cb2-c12a176bae6a 1948Mi RWO Delete Bound demo/druid-cluster-base-task-dir-druid-cluster-middlemanagers-0 longhorn 19m +pvc-5f4cea5f-e0c8-4339-b67c-9cb8b02ba49d 1948Mi RWO Delete Bound demo/druid-cluster-segment-cache-druid-cluster-historicals-0 longhorn 19m +``` + +The above output verifies that we have successfully autoscaled the volume of the Druid topology cluster for both historicals and middleManagers. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete druidopsrequests -n demo drops-druid-cluster-gq9huj drops-druid-cluster-kbw4fd +kubectl delete druidutoscaler -n demo druid-storage-autoscaler +kubectl delete dr -n demo druid-cluster +``` + +## Next Steps + +- Detail concepts of [Druid object](/docs/guides/druid/concepts/druid.md). +- Different Druid topology clustering modes [here](/docs/guides/druid/clustering/_index.md). +- Monitor your Druid database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/druid/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Druid database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/druid/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/druid/autoscaler/storage/yamls/druid-cluster.yaml b/docs/guides/druid/autoscaler/storage/yamls/druid-cluster.yaml index 5415590a2b..9fe7a3f2d4 100644 --- a/docs/guides/druid/autoscaler/storage/yamls/druid-cluster.yaml +++ b/docs/guides/druid/autoscaler/storage/yamls/druid-cluster.yaml @@ -7,8 +7,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: historicals: replicas: 1 diff --git a/docs/guides/druid/autoscaler/storage/yamls/druid-cluster.yaml.bak b/docs/guides/druid/autoscaler/storage/yamls/druid-cluster.yaml.bak new file mode 100644 index 0000000000..5415590a2b --- /dev/null +++ b/docs/guides/druid/autoscaler/storage/yamls/druid-cluster.yaml.bak @@ -0,0 +1,40 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-cluster + namespace: demo +spec: + version: 28.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + historicals: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageType: Durable + middleManagers: + replicas: 1 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageType: Durable + routers: + replicas: 1 + deletionPolicy: Delete + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + release: prometheus + interval: 10s diff --git a/docs/guides/druid/backup/application-level/examples/sample-druid.yaml b/docs/guides/druid/backup/application-level/examples/sample-druid.yaml index cca8bd8797..a107788960 100644 --- a/docs/guides/druid/backup/application-level/examples/sample-druid.yaml +++ b/docs/guides/druid/backup/application-level/examples/sample-druid.yaml @@ -7,8 +7,8 @@ spec: version: 30.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/backup/application-level/examples/sample-druid.yaml.bak b/docs/guides/druid/backup/application-level/examples/sample-druid.yaml.bak new file mode 100644 index 0000000000..cca8bd8797 --- /dev/null +++ b/docs/guides/druid/backup/application-level/examples/sample-druid.yaml.bak @@ -0,0 +1,15 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: sample-druid + namespace: demo +spec: + version: 30.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/guides/druid/backup/application-level/index.md b/docs/guides/druid/backup/application-level/index.md index f99ac9df02..ba4ef6bf24 100644 --- a/docs/guides/druid/backup/application-level/index.md +++ b/docs/guides/druid/backup/application-level/index.md @@ -117,8 +117,8 @@ spec: version: 30.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/backup/application-level/index.md.bak b/docs/guides/druid/backup/application-level/index.md.bak new file mode 100644 index 0000000000..f99ac9df02 --- /dev/null +++ b/docs/guides/druid/backup/application-level/index.md.bak @@ -0,0 +1,791 @@ +--- +title: Application Level Backup & Restore Druid | KubeStash +description: Application Level Backup and Restore using KubeStash +menu: + docs_{{ .version }}: + identifier: guides-druid-backup-application-level + name: Application Level Backup + parent: guides-druid-backup + weight: 40 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +# Application Level Backup and Restore Druid database using KubeStash + +[KubeStash](https://kubestash.com) offers application-level backup and restore functionality for `Druid` databases. It captures both manifest and logical data backups of any `Druid` database in a single snapshot. During the restore process, KubeStash first applies the `Druid` manifest to the cluster and then restores the data into it. + +This guide will give you how you can take application-level backup and restore your `Druid` databases using `Kubestash`. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using `Minikube` or `Kind`. +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md) and make sure to include the flags `--set global.featureGates.Druid=true` to ensure **Druid CRD** and `--set global.featureGates.ZooKeeper=true` to ensure **ZooKeeper CRD** as Druid depends on ZooKeeper for external dependency with helm command. +- Install `KubeStash` in your cluster following the steps [here](https://kubestash.com/docs/latest/setup/install/kubestash). +- Install KubeStash `kubectl` plugin following the steps [here](https://kubestash.com/docs/latest/setup/install/kubectl-plugin/). +- If you are not familiar with how KubeStash backup and restore Druid databases, please check the following guide [here](/docs/guides/druid/backup/overview/index.md). + +You should be familiar with the following `KubeStash` concepts: + +- [BackupStorage](https://kubestash.com/docs/latest/concepts/crds/backupstorage/) +- [BackupConfiguration](https://kubestash.com/docs/latest/concepts/crds/backupconfiguration/) +- [BackupSession](https://kubestash.com/docs/latest/concepts/crds/backupsession/) +- [RestoreSession](https://kubestash.com/docs/latest/concepts/crds/restoresession/) +- [Addon](https://kubestash.com/docs/latest/concepts/crds/addon/) +- [Function](https://kubestash.com/docs/latest/concepts/crds/function/) +- [Task](https://kubestash.com/docs/latest/concepts/crds/addon/#task-specification) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/guides/druid/backup/application-level/examples](https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/application-level/examples) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Backup Druid + +KubeStash supports backups for `Druid` instances for various Cluster setups. In this demonstration, we'll focus on a `Druid` database with 5 type of nodes (coordinators, historicals, brokers, middlemanagers and routers). The backup and restore process is similar for other Cluster setup as well. + +This section will demonstrate how to take application-level backup of a `Druid` database. Here, we are going to deploy a `Druid` database using KubeDB. Then, we are going to back up the database at the application level to a `GCS` bucket. Finally, we will restore the entire `Druid` database. + +## Deploy Sample Druid Database + +**Create External Dependency (Deep Storage):** + +One of the external dependency of Druid is deep storage where the segments are stored. It is a storage mechanism that Apache Druid does not provide. **Amazon S3**, **Google Cloud Storage**, or **Azure Blob Storage**, **S3-compatible storage** (like **Minio**), or **HDFS** are generally convenient options for deep storage. + +In this tutorial, we will run a `minio-server` as deep storage in our local `kind` cluster using `minio-operator` and create a bucket named `druid` in it, which the deployed druid database will use. + +```bash + +$ helm repo add minio https://operator.min.io/ +$ helm repo update minio +$ helm upgrade --install --namespace "minio-operator" --create-namespace "minio-operator" minio/operator --set operator.replicaCount=1 + +$ helm upgrade --install --namespace "demo" --create-namespace druid-minio minio/tenant \ +--set tenant.pools[0].servers=1 \ +--set tenant.pools[0].volumesPerServer=1 \ +--set tenant.pools[0].size=1Gi \ +--set tenant.certificate.requestAutoCert=false \ +--set tenant.buckets[0].name="druid" \ +--set tenant.pools[0].name="default" + +``` + +Now we need to create a `Secret` named `deep-storage-config`. It contains the necessary connection information using which the druid database will connect to the deep storage. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: deep-storage-config + namespace: demo +stringData: + druid.storage.type: "s3" + druid.storage.bucket: "druid" + druid.storage.baseKey: "druid/segments" + druid.s3.accessKey: "minio" + druid.s3.secretKey: "minio123" + druid.s3.protocol: "http" + druid.s3.enablePathStyleAccess: "true" + druid.s3.endpoint.signingRegion: "us-east-1" + druid.s3.endpoint.url: "http://myminio-hl.demo.svc.cluster.local:9000/" +``` + +Let’s create the `deep-storage-config` Secret shown above: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/application-level/examples/deep-storage-config.yaml +secret/deep-storage-config created +``` + +Let's deploy a sample `Druid` database and insert some data into it. + +**Create Druid CR:** + +Below is the YAML of a sample `Druid` CR that we are going to create for this tutorial: + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: sample-druid + namespace: demo +spec: + version: 30.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: WipeOut +``` + +Here, +- `.spec.topology` specifies about the clustering configuration of Druid. +- `.Spec.topology.routers` specifies that 1 replica of routers node will get provisioned alongside the essential nodes. + +Create the above `Druid` CR, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/application-level/examples/sample-druid.yaml +druid.kubedb.com/sample-druid created +``` + +KubeDB will deploy a Druid database according to the above specification. It will also create the necessary Secrets and Services to access the database. + +Let's check if the database is ready to use, + +```bash +$ kubectl get druids.kubedb.com -n demo +NAME TYPE VERSION STATUS AGE +sample-druid kubedb.com/v1alpha2 30.0.1 Ready 4m14s +``` + +The database is `Ready`. Verify that KubeDB has created a `Secret` and a `Service` for this database using the following commands, + +```bash +$ kubectl get secret -n demo -l=app.kubernetes.io/instance=sample-druid +NAME TYPE DATA AGE +sample-druid-admin-cred kubernetes.io/basic-auth 2 2m34s +sample-druid-config Opaque 11 2m34s + +$ kubectl get service -n demo -l=app.kubernetes.io/instance=sample-druid +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +sample-druid-brokers ClusterIP 10.128.135.115 8082/TCP 2m53s +sample-druid-coordinators ClusterIP 10.128.16.222 8081/TCP 2m53s +sample-druid-pods ClusterIP None 8081/TCP,8090/TCP,8083/TCP,8091/TCP,8082/TCP,8888/TCP 2m53s +sample-druid-routers ClusterIP 10.128.191.186 8888/TCP 2m53s +``` + +Here, we have to use service `sample-druid-routers` and secret `sample-druid-admin-cred` to connect with the database. `KubeDB` creates an [AppBinding](/docs/guides/druid/concepts/appbinding.md) CR that holds the necessary information to connect with the database. + +**Verify Internal Dependencies:** + +```bash +kubectl get mysql,zk -n demo +NAME VERSION STATUS AGE +mysql.kubedb.com/sample-druid-mysql-metadata 9.1.0 Ready 6m31s + +NAME TYPE VERSION STATUS AGE +zookeeper.kubedb.com/sample-druid-zk kubedb.com/v1alpha2 3.7.2 Ready 6m31s +``` +We can see that KubeDB has deployed a `MySQL` and a `ZooKeeper` instance as [External dependencies](https://druid.apache.org/docs/latest/design/architecture/#external-dependencies) of the `Druid` cluster. + +**Verify AppBinding:** + +Verify that the `AppBinding` has been created successfully using the following command, + +```bash +$ kubectl get appbindings -n demo +NAME TYPE VERSION AGE +sample-druid kubedb.com/druid 30.0.1 4m7s +sample-druid-mysql-metadata kubedb.com/mysql 9.1.0 6m31s +sample-druid-zk kubedb.com/zookeeper 3.7.2 6m34s +``` + +Here `sample-druid` is the `AppBinding` of Druid, while `sample-druid-mysql-metadata` and `sample-druid-zk` are the `AppBinding` of `MySQL` and `ZooKeeper` instances that `KubeDB` has deployed as the [External dependencies](https://druid.apache.org/docs/latest/design/architecture/#external-dependencies) of `Druid` + +Let's check the YAML of the `AppBinding` of druid, + +```bash +$ kubectl get appbindings -n demo sample-druid -o yaml +``` + +```yaml +apiVersion: appcatalog.appscode.com/v1alpha1 +kind: AppBinding +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"kubedb.com/v1alpha2","kind":"Druid","metadata":{"annotations":{},"name":"sample-druid","namespace":"demo"},"spec":{"deepStorage":{"configSecret":{"name":"deep-storage-config"},"type":"s3"},"deletionPolicy":"WipeOut","topology":{"routers":{"replicas":1}},"version":"30.0.1"}} + creationTimestamp: "2024-09-19T13:02:20Z" + generation: 1 + labels: + app.kubernetes.io/component: database + app.kubernetes.io/instance: sample-druid + app.kubernetes.io/managed-by: kubedb.com + app.kubernetes.io/name: druids.kubedb.com + name: sample-druid + namespace: demo + ownerReferences: + - apiVersion: kubedb.com/v1alpha2 + blockOwnerDeletion: true + controller: true + kind: Druid + name: sample-druid + uid: cdbc2414-0dd1-4573-9532-e96b9094a443 + resourceVersion: "1610820" + uid: 8430d22d-e715-454a-8a83-e30e40cbeb14 +spec: + appRef: + apiGroup: kubedb.com + kind: Druid + name: sample-druid + namespace: demo + clientConfig: + service: + name: sample-druid-pods + port: 8888 + scheme: http + url: http://sample-druid-coordinators-0.sample-druid-pods.demo.svc.cluster.local:8081,http://sample-druid-overlords-0.sample-druid-pods.demo.svc.cluster.local:8090,http://sample-druid-middlemanagers-0.sample-druid-pods.demo.svc.cluster.local:8091,http://sample-druid-historicals-0.sample-druid-pods.demo.svc.cluster.local:8083,http://sample-druid-brokers-0.sample-druid-pods.demo.svc.cluster.local:8082,http://sample-druid-routers-0.sample-druid-pods.demo.svc.cluster.local:8888 + secret: + name: sample-druid-admin-cred + type: kubedb.com/druid + version: 30.0.1 +``` + +KubeStash uses the `AppBinding` CR to connect with the target database. It requires the following two fields to be set in AppBinding's `.spec` section. + +- `.spec.clientConfig.service.name` specifies the name of the Service that connects to the database. +- `.spec.secret` specifies the name of the Secret that holds necessary credentials to access the database. +- `spec.type` specifies the types of the app that this AppBinding is pointing to. KubeDB generated AppBinding follows the following format: `/`. + +**Insert Sample Data:** + +We can access the [web console](https://druid.apache.org/docs/latest/operations/web-console) of Druid database from any browser by port-forwarding the routers. Let’s port-forward the port `8888` to local machine: +```bash +kubectl port-forward -n demo svc/sample-druid-routers 8888 +Forwarding from 127.0.0.1:8888 -> 8888 +Forwarding from [::1]:8888 -> 8888 +``` + +Now hit the `http://localhost:8888` from any browser, and you will be prompted to provide the credential of the druid database. By following the steps discussed below, you can get the credential generated by the KubeDB operator for your Druid database. + +**Connection information:** + +- Username: + + ```bash + $ kubectl get secret -n demo sample-druid-admin-cred -o jsonpath='{.data.username}' | base64 -d + admin + ``` + +- Password: + + ```bash + $ kubectl get secret -n demo sample-druid-admin-cred -o jsonpath='{.data.password}' | base64 -d + DqG5E63NtklAkxqC + ``` + +After providing the credentials correctly, you should be able to access the web console like shown below. + +

+  lifecycle +

+ +Now select the `Load Data` option and then select `Batch - classic` from the drop-down menu. +

+  lifecycle +

+ +Select `Example data` and click `Load example` to insert the example `Wikipedia Edits` datasource. + +

+  lifecycle +

+ +After clicking `Next` multiple times, click `Submit` + +

+  lifecycle +

+ +Within a minute status of the ingestion task should become `SUCCESS` +

+  lifecycle +

+ +Now, we are ready to backup the database. + +### Prepare Backend + +We are going to store our backed up data into a GCS bucket. We have to create a Secret with necessary credentials and a `BackupStorage` CR to use this backend. If you want to use a different backend, please read the respective backend configuration doc from [here](https://kubestash.com/docs/latest/guides/backends/overview/). + +**Create Secret:** + +Let's create a secret called `gcs-secret` with access credentials to our desired GCS bucket, + +```bash +$ echo -n '' > GOOGLE_PROJECT_ID +$ cat /path/to/downloaded-sa-key.json > GOOGLE_SERVICE_ACCOUNT_JSON_KEY +$ kubectl create secret generic -n demo gcs-secret \ + --from-file=./GOOGLE_PROJECT_ID \ + --from-file=./GOOGLE_SERVICE_ACCOUNT_JSON_KEY +secret/gcs-secret created +``` + +**Create BackupStorage:** + +Now, create a `BackupStorage` using this secret. Below is the YAML of `BackupStorage` CR we are going to create, + +```yaml +apiVersion: storage.kubestash.com/v1alpha1 +kind: BackupStorage +metadata: + name: gcs-storage + namespace: demo +spec: + storage: + provider: gcs + gcs: + bucket: kubestash-qa + prefix: druid + secretName: gcs-secret + usagePolicy: + allowedNamespaces: + from: All + default: true + deletionPolicy: Delete +``` + +Let's create the BackupStorage we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/application-level/examples/backupstorage.yaml +backupstorage.storage.kubestash.com/gcs-storage created +``` + +Now, we are ready to backup our database to our desired backend. + +**Create RetentionPolicy:** + +Now, let's create a `RetentionPolicy` to specify how the old Snapshots should be cleaned up. + +Below is the YAML of the `RetentionPolicy` object that we are going to create, + +```yaml +apiVersion: storage.kubestash.com/v1alpha1 +kind: RetentionPolicy +metadata: + name: demo-retention + namespace: demo +spec: + default: true + failedSnapshots: + last: 2 + maxRetentionPeriod: 2mo + successfulSnapshots: + last: 5 + usagePolicy: + allowedNamespaces: + from: All +``` + +Let’s create the above `RetentionPolicy`, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/application-level/examples/retentionpolicy.yaml +retentionpolicy.storage.kubestash.com/demo-retention created +``` + +### Backup + +We have to create a `BackupConfiguration` targeting respective `sample-druid` Druid database. Then, KubeStash will create a `CronJob` for each session to take periodic backup of that database. + +At first, we need to create a secret with a Restic password for backup data encryption. + +**Create Secret:** + +Let's create a secret called `encrypt-secret` with the Restic password, + +```bash +$ echo -n 'changeit' > RESTIC_PASSWORD +$ kubectl create secret generic -n demo encrypt-secret \ + --from-file=./RESTIC_PASSWORD \ +secret "encrypt-secret" created +``` + +**Create BackupConfiguration:** + +Below is the YAML for `BackupConfiguration` CR to take application-level backup of the `sample-druid` database that we have deployed earlier, + +```yaml +apiVersion: core.kubestash.com/v1alpha1 +kind: BackupConfiguration +metadata: + name: sample-druid-backup + namespace: demo +spec: + target: + apiGroup: kubedb.com + kind: Druid + namespace: demo + name: sample-druid + backends: + - name: gcs-backend + storageRef: + namespace: demo + name: gcs-storage + retentionPolicy: + name: demo-retention + namespace: demo + sessions: + - name: frequent-backup + scheduler: + schedule: "*/5 * * * *" + jobTemplate: + backoffLimit: 1 + repositories: + - name: gcs-druid-repo + backend: gcs-backend + directory: /druid + encryptionSecret: + name: encrypt-secret + namespace: demo + addon: + name: druid-addon + tasks: + - name: manifest-backup + - name: mysql-metadata-storage-backup +``` + +- `.spec.sessions[*].schedule` specifies that we want to backup at `5 minutes` interval. +- `.spec.target` refers to the targeted `sample-druid` Druid database that we created earlier. +- `.spec.sessions[*].addon.tasks[*].name[*]` specifies that both the `manifest-backup` and `mysql-metadata-storage-backup` tasks will be executed. + +> **Note**: +> - To create `BackupConfiguration` for druid with `PostgreSQL` as metadata storage update the `spec.sessions[*].addon.tasks.name` from `mysql-metadata-storage-backup` to `postgres-metadata-storage-restore` +> - When we backup a `Druid`, KubeStash operator will also take backup of the dependency of the `MySQL` and `ZooKeeper` cluster as well. +> - When we backup a `Druid` where `spec.metadatastorage.externallyManaged` is false then KubeStash operator will also take backup of + +Let's create the `BackupConfiguration` CR that we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/application-level/examples/backupconfiguration.yaml +backupconfiguration.core.kubestash.com/sample-druid-backup created +``` + +**Verify Backup Setup Successful** + +If everything goes well, the phase of the `BackupConfiguration` should be `Ready`. The `Ready` phase indicates that the backup setup is successful. Let's verify the `Phase` of the BackupConfiguration, + +```bash +$ kubectl get backupconfiguration -n demo +NAME PHASE PAUSED AGE +sample-druid-backup Ready 2m50s +``` + +Additionally, we can verify that the `Repository` specified in the `BackupConfiguration` has been created using the following command, + +```bash +$ kubectl get repo -n demo +NAME INTEGRITY SNAPSHOT-COUNT SIZE PHASE LAST-SUCCESSFUL-BACKUP AGE +gcs-druid-repo 0 0 B Ready 3m +``` + +KubeStash keeps the backup for `Repository` YAMLs. If we navigate to the GCS bucket, we will see the `Repository` YAML stored in the `demo/druid` directory. + +**Verify CronJob:** + +It will also create a `CronJob` with the schedule specified in `spec.sessions[*].scheduler.schedule` field of `BackupConfiguration` CR. + +Verify that the `CronJob` has been created using the following command, + +```bash +$ kubectl get cronjob -n demo +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +trigger-sample-druid-backup-frequent-backup */5 * * * * 0 2m45s 3m25s +``` + +**Verify BackupSession:** + +KubeStash triggers an instant backup as soon as the `BackupConfiguration` is ready. After that, backups are scheduled according to the specified schedule. + +Run the following command to watch `BackupSession` CR, + +```bash +$ kubectl get backupsession -n demo -w + +NAME INVOKER-TYPE INVOKER-NAME PHASE DURATION AGE +sample-druid-backup-frequent-backup-1724065200 BackupConfiguration sample-druid-backup Succeeded 7m22s +``` + +We can see from the above output that the backup session has succeeded. Now, we are going to verify whether the backed up data has been stored in the backend. + +**Verify Backup:** + +Once a backup is complete, KubeStash will update the respective `Repository` CR to reflect the backup. Check that the repository `sample-druid-backup` has been updated by the following command, + +```bash +$ kubectl get repository -n demo gcs-druid-repo +NAME INTEGRITY SNAPSHOT-COUNT SIZE PHASE LAST-SUCCESSFUL-BACKUP AGE +gcs-druid-repo true 4 664.979 KiB Ready 2m55s 4h56m +``` + +At this moment we have one `Snapshot`. Run the following command to check the respective `Snapshot` which represents the state of a backup run for an application. + +```bash +$ kubectl get snapshots -n demo -l=kubestash.com/repo-name=gcs-druid-repo +NAME REPOSITORY SESSION SNAPSHOT-TIME DELETION-POLICY PHASE AGE +gcs-druid-repo-sample-druid-backup-frequent-backup-1726830540 gcs-druid-repo frequent-backup 2024-09-20T11:09:00Z Delete Succeeded 3m13s +``` + +> **Note**: KubeStash creates a `Snapshot` with the following labels: +> - `kubestash.com/app-ref-kind: ` +> - `kubestash.com/app-ref-name: ` +> - `kubestash.com/app-ref-namespace: ` +> - `kubestash.com/repo-name: ` +> +> These labels can be used to watch only the `Snapshot`s related to our target Database or `Repository`. + +If we check the YAML of the `Snapshot`, we can find the information about the backed up components of the Database. + +```bash +$ kubectl get snapshots -n demo gcs-druid-repo-sample-druid-backup-frequent-backup-1725359100 -oyaml +``` + +```yaml +apiVersion: storage.kubestash.com/v1alpha1 +kind: Snapshot +metadata: + annotations: + kubedb.com/db-version: 30.0.1 + creationTimestamp: "2024-09-20T11:09:00Z" + finalizers: + - kubestash.com/cleanup + generation: 1 + labels: + kubestash.com/app-ref-kind: Druid + kubestash.com/app-ref-name: sample-druid + kubestash.com/app-ref-namespace: demo + kubestash.com/repo-name: gcs-druid-repo + name: gcs-druid-repo-sample-druid-backup-frequent-backup-1726830540 + namespace: demo + ownerReferences: + - apiVersion: storage.kubestash.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Repository + name: gcs-druid-repo + uid: d894aad3-ac0d-4c8f-b165-9f9f1085ef3a + resourceVersion: "1720138" + uid: 348fe907-9207-4a71-953c-6cafa80ba3f7 +spec: + appRef: + apiGroup: kubedb.com + kind: Druid + name: sample-druid + namespace: demo + backupSession: sample-druid-backup-frequent-backup-1726830540 + deletionPolicy: Delete + repository: gcs-druid-repo + session: frequent-backup + snapshotID: 01J87HXY4439P70MKGWS8RZM7E + type: FullBackup + version: v1 +status: + components: + dump: + driver: Restic + duration: 10.312603282s + integrity: true + path: repository/v1/frequent-backup/dump + phase: Succeeded + resticStats: + - hostPath: dumpfile.sql + id: 647a7123a66423a81fa21ac77128e46587ddae3e9c9426537a30ad1c9a8e1843 + size: 3.807 MiB + uploaded: 3.807 MiB + size: 652.853 KiB + manifest: + driver: Restic + duration: 10.457007184s + integrity: true + path: repository/v1/frequent-backup/manifest + phase: Succeeded + resticStats: + - hostPath: /kubestash-tmp/manifest + id: 069ad1c6dae59fd086aa9771289fc4dad6d076afbc11180e3b1cd8083cd01691 + size: 13.599 KiB + uploaded: 4.268 KiB + size: 12.127 KiB + conditions: + - lastTransitionTime: "2024-09-20T11:09:00Z" + message: Recent snapshot list updated successfully + reason: SuccessfullyUpdatedRecentSnapshotList + status: "True" + type: RecentSnapshotListUpdated + - lastTransitionTime: "2024-09-20T11:10:07Z" + message: Metadata uploaded to backend successfully + reason: SuccessfullyUploadedSnapshotMetadata + status: "True" + type: SnapshotMetadataUploaded + integrity: true + phase: Succeeded + size: 664.979 KiB + snapshotTime: "2024-09-20T11:09:00Z" + totalComponents: 2 +``` + +> KubeStash uses the `mysqldump`/`postgresdump` command to take backups of the metadata storage of the target Druid databases. Therefore, the component name for `logical backups` is set as `dump`. +> KubeStash set component name as `manifest` for the `manifest backup` of Druid databases. + +Now, if we navigate to the GCS bucket, we will see the backed up data stored in the `demo/druid/repository/v1/frequent-backup/dump` directory. KubeStash also keeps the backup for `Snapshot` YAMLs, which can be found in the `demo/dep/snapshots` directory. + +> **Note**: KubeStash stores all dumped data encrypted in the backup directory, meaning it remains unreadable until decrypted. + +## Restore + + +In this section, we are going to restore the entire database from the backup that we have taken in the previous section. For this tutorial, we will restore the database in a separate namespace called `dev`. +Now, create the namespace by running the following command: + +```bash +$ kubectl create ns dev +namespace/dev created +``` + +#### Create RestoreSession: + +We need to create a RestoreSession CR. + +Below, is the contents of YAML file of the `RestoreSession` CR that we are going to create to restore the entire database. + +```yaml +apiVersion: core.kubestash.com/v1alpha1 +kind: RestoreSession +metadata: + name: restore-sample-druid + namespace: demo +spec: + manifestOptions: + druid: + restoreNamespace: dev + db: true + dataSource: + repository: gcs-druid-repo + snapshot: latest + encryptionSecret: + name: encrypt-secret + namespace: demo + addon: + name: druid-addon + tasks: + - name: mysql-metadata-storage-restore + - name: manifest-restore +``` + +Here, + +- `.spec.manifestOptions.druid.db` specifies whether to restore the DB manifest or not. +- `.spec.dataSource.repository` specifies the Repository object that holds the backed up data. +- `.spec.dataSource.snapshot` specifies to restore from latest `Snapshot`. +- `.spec.addon.tasks[*]` specifies that both the `manifest-restore` and `logical-backup-restore` tasks. + +> **Note**: +> - When we restore a `Druid` with `spec.metadataStorage.externallyManaged` set to `false` (which is `false` by default), then KubeStash operator will also restore the metadataStorage automatically. +> - Similarly, if `spec.zooKeeper.externallyManaged` is `false` (which is also `false` by default) then KubeStash operator will also restore the zookeeper instance automatically. +> - For externally managed metadata storage and zookeeper however, user needs to specify it in `spec.manifestOptions.mySQL`/`spec.manifestOptions.postgres`/`spec.manifestOptions.zooKeeper` to restore those. + +Let's create the RestoreSession CRD object we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/application-level/examples/restoresession.yaml +restoresession.core.kubestash.com/restore-sample-druid created +``` + +Once, you have created the `RestoreSession` object, KubeStash will create restore Job. Run the following command to watch the phase of the `RestoreSession` object, + +```bash +$ watch kubectl get restoresession -n demo +Every 2.0s: kubectl get restores... AppsCode-PC-03: Wed Aug 21 10:44:05 2024 + +NAME REPOSITORY FAILURE-POLICY PHASE DURATION AGE +sample-restore gcs-demo-repo Succeeded 3s 53s +``` +The `Succeeded` phase means that the restore process has been completed successfully. + +#### Verify Restored Druid Manifest: + +In this section, we will verify whether the desired `Druid` database manifest has been successfully applied to the cluster. + +```bash +$ kubectl get druids.kubedb.com -n dev +NAME VERSION STATUS AGE +restored-druid 30.0.1 Ready 39m +``` + +The output confirms that the `Druid` database has been successfully created with the same configuration as it had at the time of backup. + +Verify the dependencies have been restored: +```bash +$ kubectl get mysql,zk -n dev +NAME VERSION STATUS AGE +mysql.kubedb.com/restored-druid-mysql-metadata 9.1.0 Ready 2m52s + +NAME TYPE VERSION STATUS AGE +zookeeper.kubedb.com/restored-druid-zk kubedb.com/v1alpha2 3.7.2 Ready 2m42s +``` + +The output confirms that the `MySQL` and `ZooKeper` databases have been successfully created with the same configuration as it had at the time of backup. + +#### Verify Restored Data: + +In this section, we are going to verify whether the desired data has been restored successfully. We are going to connect to the database server and check whether the database and the table we created earlier in the original database are restored. + +At first, check if the database has gone into `Ready` state by the following command, + +```bash +$ kubectl get druid -n dev restored-druid +NAME VERSION STATUS AGE +restored-druid 30.0.1 Ready 34m +``` + +Now, let's verify if our datasource `wikipedia` exists or not. For that, first find out the database `Sevices` by the following command, + +Now access the [web console](https://druid.apache.org/docs/latest/operations/web-console) of Druid database from any browser by port-forwarding the routers. Let’s port-forward the port `8888` to local machine: +```bash +$ kubectl get svc -n dev --selector="app.kubernetes.io/instance=restored-druid" +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +restored-druid-brokers ClusterIP 10.128.74.54 8082/TCP 10m +restored-druid-coordinators ClusterIP 10.128.30.124 8081/TCP 10m +restored-druid-pods ClusterIP None 8081/TCP,8090/TCP,8083/TCP,8091/TCP,8082/TCP,8888/TCP 10m +restored-druid-routers ClusterIP 10.128.228.193 8888/TCP 10m +``` +```bash +kubectl port-forward -n dev svc/restored-druid-routers 8888 +Forwarding from 127.0.0.1:8888 -> 8888 +Forwarding from [::1]:8888 -> 8888 +``` + +Then hit the `http://localhost:8888` from any browser, and you will be prompted to provide the credential of the druid database. By following the steps discussed below, you can get the credential generated by the KubeDB operator for your Druid database. +**Connection information:** +- Username: + + ```bash + $ kubectl get secret -n dev restored-druid-admin-cred -o jsonpath='{.data.username}' | base64 -d + admin + ``` + +- Password: + + ```bash + $ kubectl get secret -n dev restored-druid-admin-cred -o jsonpath='{.data.password}' | base64 -d + DqG5E63NtklAkxqC + ``` +After providing the credentials correctly, you should be able to access the web console like shown below. Now if you go to the `Datasources` section, you will see that our ingested datasource `wikipedia` exists in the list. +

+  lifecycle +

+ +So, from the above screenshot, we can see that the `wikipedia` datasource we have ingested earlier in the original database and now, it is restored successfully. + +## Cleanup + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete backupconfigurations.core.kubestash.com -n demo sample-druid-backup +kubectl delete backupstorage -n demo gcs-storage +kubectl delete secret -n demo gcs-secret +kubectl delete secret -n demo encrypt-secret +kubectl delete retentionpolicies.storage.kubestash.com -n demo demo-retention +kubectl delete restoresessions.core.kubestash.com -n demo restore-sample-druid +kubectl delete druid -n demo sample-druid +kubectl delete druid -n dev restored-druid +``` diff --git a/docs/guides/druid/backup/auto-backup/examples/sample-druid-2.yaml b/docs/guides/druid/backup/auto-backup/examples/sample-druid-2.yaml index a9808332e8..ceceb9d87c 100644 --- a/docs/guides/druid/backup/auto-backup/examples/sample-druid-2.yaml +++ b/docs/guides/druid/backup/auto-backup/examples/sample-druid-2.yaml @@ -15,8 +15,8 @@ spec: version: 30.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/backup/auto-backup/examples/sample-druid-2.yaml.bak b/docs/guides/druid/backup/auto-backup/examples/sample-druid-2.yaml.bak new file mode 100644 index 0000000000..a9808332e8 --- /dev/null +++ b/docs/guides/druid/backup/auto-backup/examples/sample-druid-2.yaml.bak @@ -0,0 +1,23 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: sample-druid-2 + namespace: demo + annotations: + blueprint.kubestash.com/name: druid-customize-backup-blueprint + blueprint.kubestash.com/namespace: demo + variables.kubestash.com/schedule: "*/10 * * * *" + variables.kubestash.com/repoName: customize-blueprint + variables.kubestash.com/namespace: demo + variables.kubestash.com/targetName: sample-druid-2 + variables.kubestash.com/targetedDatabases: druid +spec: + version: 30.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/guides/druid/backup/auto-backup/index.md b/docs/guides/druid/backup/auto-backup/index.md index 159e9feee8..27fe73251d 100644 --- a/docs/guides/druid/backup/auto-backup/index.md +++ b/docs/guides/druid/backup/auto-backup/index.md @@ -269,8 +269,8 @@ spec: version: 30.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 @@ -589,8 +589,8 @@ spec: version: 30.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/backup/auto-backup/index.md.bak b/docs/guides/druid/backup/auto-backup/index.md.bak new file mode 100644 index 0000000000..159e9feee8 --- /dev/null +++ b/docs/guides/druid/backup/auto-backup/index.md.bak @@ -0,0 +1,817 @@ +--- +title: Druid Auto-Backup | KubeStash +description: Backup Druid database using KubeStash +menu: + docs_{{ .version }}: + identifier: guides-druid-backup-auto-backup + name: Auto Backup + parent: guides-druid-backup + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +# Backup Druid using KubeStash Auto-Backup + +[KubeStash](https://kubestash.com) can automatically be configured to backup any `Druid` databases in your cluster. KubeStash enables cluster administrators to deploy backup `blueprints` ahead of time so database owners can easily backup any `Druid` database with a few annotations. + +In this tutorial, we are going to show how you can configure a backup blueprint for `Druid` databases in your cluster and backup them with a few annotations. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using `Minikube` or `Kind`. +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md) and make sure to include the flags `--set global.featureGates.Druid=true` to ensure **Druid CRD** and `--set global.featureGates.ZooKeeper=true` to ensure **ZooKeeper CRD** as Druid depends on ZooKeeper for external dependency with helm command. +- Install `KubeStash` in your cluster following the steps [here](https://kubestash.com/docs/latest/setup/install/kubestash). +- Install KubeStash `kubectl` plugin following the steps [here](https://kubestash.com/docs/latest/setup/install/kubectl-plugin/). +- If you are not familiar with how KubeStash backup and restore Druid databases, please check the following guide [here](/docs/guides/druid/backup/overview/index.md). + +You should be familiar with the following `KubeStash` concepts: + +- [BackupStorage](https://kubestash.com/docs/latest/concepts/crds/backupstorage/) +- [BackupConfiguration](https://kubestash.com/docs/latest/concepts/crds/backupconfiguration/) +- [BackupSession](https://kubestash.com/docs/latest/concepts/crds/backupsession/) +- [RestoreSession](https://kubestash.com/docs/latest/concepts/crds/restoresession/) +- [Addon](https://kubestash.com/docs/latest/concepts/crds/addon/) +- [Function](https://kubestash.com/docs/latest/concepts/crds/function/) +- [Task](https://kubestash.com/docs/latest/concepts/crds/addon/#task-specification) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +### Prepare Backend + +We are going to store our backed up data into a GCS bucket. We have to create a Secret with necessary credentials and a `BackupStorage` CR to use this backend. If you want to use a different backend, please read the respective backend configuration doc from [here](https://kubestash.com/docs/latest/guides/backends/overview/). + +**Create Secret:** + +Let's create a secret called `gcs-secret` with access credentials to our desired GCS bucket, + +```bash +$ echo -n '' > GOOGLE_PROJECT_ID +$ cat /path/to/downloaded-sa-key.json > GOOGLE_SERVICE_ACCOUNT_JSON_KEY +$ kubectl create secret generic -n demo gcs-secret \ + --from-file=./GOOGLE_PROJECT_ID \ + --from-file=./GOOGLE_SERVICE_ACCOUNT_JSON_KEY +secret/gcs-secret created +``` + +**Create BackupStorage:** + +Now, create a `BackupStorage` using this secret. Below is the YAML of `BackupStorage` CR we are going to create, + +```yaml +apiVersion: storage.kubestash.com/v1alpha1 +kind: BackupStorage +metadata: + name: gcs-storage + namespace: demo +spec: + storage: + provider: gcs + gcs: + bucket: kubestash-qa + prefix: blueprint + secretName: gcs-secret + usagePolicy: + allowedNamespaces: + from: All + default: true + deletionPolicy: Delete +``` + +Let's create the BackupStorage we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/auto-backup/examples/backupstorage.yaml +backupstorage.storage.kubestash.com/gcs-storage created +``` + +**Create RetentionPolicy:** + +Now, let's create a `RetentionPolicy` to specify how the old Snapshots should be cleaned up. + +Below is the YAML of the `RetentionPolicy` object that we are going to create, + +```yaml +apiVersion: storage.kubestash.com/v1alpha1 +kind: RetentionPolicy +metadata: + name: demo-retention + namespace: demo +spec: + default: true + failedSnapshots: + last: 2 + maxRetentionPeriod: 2mo + successfulSnapshots: + last: 5 + usagePolicy: + allowedNamespaces: + from: All +``` + +Let’s create the above `RetentionPolicy`, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/auto-backup/examples/retentionpolicy.yaml +retentionpolicy.storage.kubestash.com/demo-retention created +``` + +**Create Secret:** + +We also need to create a secret with a `Restic` password for backup data encryption. + +Let's create a secret called `encrypt-secret` with the Restic password, + +```bash +$ echo -n 'changeit' > RESTIC_PASSWORD +$ kubectl create secret generic -n demo encrypt-secret \ + --from-file=./RESTIC_PASSWORD +secret "encrypt-secret" created +``` + +## Auto-backup with default configurations + +In this section, we are going to backup a `Druid` database of `demo` namespace. We are going to use the default configurations which will be specified in the `Backup Blueprint` CR. + +**Prepare Backup Blueprint** + +A `BackupBlueprint` allows you to specify a template for the `Repository`,`Session` or `Variables` of `BackupConfiguration` in a Kubernetes native way. + +Now, we have to create a `BackupBlueprint` CR with a blueprint for `BackupConfiguration` object. + +```yaml +apiVersion: core.kubestash.com/v1alpha1 +kind: BackupBlueprint +metadata: + name: druid-default-backup-blueprint + namespace: demo +spec: + usagePolicy: + allowedNamespaces: + from: All + backupConfigurationTemplate: + deletionPolicy: OnDelete + backends: + - name: gcs-backend + storageRef: + namespace: demo + name: gcs-storage + retentionPolicy: + name: demo-retention + namespace: demo + sessions: + - name: frequent-backup + sessionHistoryLimit: 3 + scheduler: + schedule: "*/5 * * * *" + jobTemplate: + backoffLimit: 1 + repositories: + - name: default-blueprint + backend: gcs-backend + directory: /default-blueprint + encryptionSecret: + name: encrypt-secret + namespace: demo + addon: + name: druid-addon + tasks: + - name: mysql-metadata-storage-backup +``` + +Here, + +- `.spec.backupConfigurationTemplate.backends[*].storageRef` refers to our earlier created `gcs-storage` backupStorage. +- `.spec.backupConfigurationTemplate.sessions[*].schedule` specifies that we want to backup the database at `5 minutes` interval. + +Let's create the `BackupBlueprint` we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/auto-backup/examples/default-backupblueprint.yaml +backupblueprint.core.kubestash.com/druid-default-backup-blueprint created +``` + +Now, we are ready to backup our `Druid` databases using a few annotations. + +## Deploy Sample Druid Database + + +**Create External Dependency (Deep Storage):** + +One of the external dependency of Druid is deep storage where the segments are stored. It is a storage mechanism that Apache Druid does not provide. **Amazon S3**, **Google Cloud Storage**, or **Azure Blob Storage**, **S3-compatible storage** (like **Minio**), or **HDFS** are generally convenient options for deep storage. + +In this tutorial, we will run a `minio-server` as deep storage in our local `kind` cluster using `minio-operator` and create a bucket named `druid` in it, which the deployed druid database will use. + +```bash + +$ helm repo add minio https://operator.min.io/ +$ helm repo update minio +$ helm upgrade --install --namespace "minio-operator" --create-namespace "minio-operator" minio/operator --set operator.replicaCount=1 + +$ helm upgrade --install --namespace "demo" --create-namespace druid-minio minio/tenant \ +--set tenant.pools[0].servers=1 \ +--set tenant.pools[0].volumesPerServer=1 \ +--set tenant.pools[0].size=1Gi \ +--set tenant.certificate.requestAutoCert=false \ +--set tenant.buckets[0].name="druid" \ +--set tenant.pools[0].name="default" + +``` + +Now we need to create a `Secret` named `deep-storage-config`. It contains the necessary connection information using which the druid database will connect to the deep storage. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: deep-storage-config + namespace: demo +stringData: + druid.storage.type: "s3" + druid.storage.bucket: "druid" + druid.storage.baseKey: "druid/segments" + druid.s3.accessKey: "minio" + druid.s3.secretKey: "minio123" + druid.s3.protocol: "http" + druid.s3.enablePathStyleAccess: "true" + druid.s3.endpoint.signingRegion: "us-east-1" + druid.s3.endpoint.url: "http://myminio-hl.demo.svc.cluster.local:9000/" +``` + +Let’s create the `deep-storage-config` Secret shown above: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/auto-backup/examples/deep-storage-config.yaml +secret/deep-storage-config created +``` + +Let's deploy a sample `Druid` database and insert some data into it. + +**Create Druid CR:** + +Below is the YAML of a sample `Druid` CRD that we are going to create for this tutorial: + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: sample-druid + namespace: demo + annotations: + blueprint.kubestash.com/name: druid-default-backup-blueprint + blueprint.kubestash.com/namespace: demo +spec: + version: 30.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: WipeOut +``` +Here, + +- `.spec.annotations.blueprint.kubestash.com/name: druid-default-backup-blueprint` specifies the name of the `BackupBlueprint` that will use in backup. +- `.spec.annotations.blueprint.kubestash.com/namespace: demo` specifies the name of the `namespace` where the `BackupBlueprint` resides. + +Create the above `Druid` CR, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/auto-backup/examples/sample-druid.yaml +druid.kubedb.com/sample-druid created +``` + +**Verify BackupConfiguration** + +If everything goes well, KubeStash should create a `BackupConfiguration` for our Druid in demo namespace and the phase of that `BackupConfiguration` should be `Ready`. Verify the `BackupConfiguration` object by the following command, + +```bash +$ kubectl get backupconfiguration -n demo +NAME PHASE PAUSED AGE +appbinding-sample-druid Ready 8m48s +``` + +Now, let’s check the YAML of the `BackupConfiguration`. + +```bash +$ kubectl get backupconfiguration -n demo appbinding-sample-druid -o yaml +``` + +```yaml +apiVersion: core.kubestash.com/v1alpha1 +kind: BackupConfiguration +metadata: + creationTimestamp: "2024-09-19T10:30:46Z" + finalizers: + - kubestash.com/cleanup + generation: 1 + labels: + app.kubernetes.io/managed-by: kubestash.com + kubestash.com/invoker-name: druid-default-backup-blueprint + kubestash.com/invoker-namespace: demo + name: appbinding-sample-druid + namespace: demo + resourceVersion: "1594861" + uid: 8c5a21cd-780b-4b67-b95a-d6338d038dd4 +spec: + backends: + - name: gcs-backend + retentionPolicy: + name: demo-retention + namespace: demo + storageRef: + name: gcs-storage + namespace: demo + sessions: + - addon: + name: druid-addon + tasks: + - name: mysql-metadata-storage-backup + name: frequent-backup + repositories: + - backend: gcs-backend + directory: /default-blueprint + encryptionSecret: + name: encrypt-secret + namespace: demo + name: default-blueprint + scheduler: + jobTemplate: + backoffLimit: 1 + template: + controller: {} + metadata: {} + spec: + resources: {} + schedule: '*/5 * * * *' + sessionHistoryLimit: 3 + target: + apiGroup: kubedb.com + kind: Druid + name: sample-druid + namespace: demo +status: + backends: + - name: gcs-backend + ready: true + retentionPolicy: + found: true + ref: + name: demo-retention + namespace: demo + storage: + phase: Ready + ref: + name: gcs-storage + namespace: demo +``` + +Notice the `spec.backends`, `spec.sessions` and `spec.target` sections, KubeStash automatically resolved those info from the `BackupBluePrint` and created above `BackupConfiguration`. + +**Verify BackupSession:** + +KubeStash triggers an instant backup as soon as the `BackupConfiguration` is ready. After that, backups are scheduled according to the specified schedule. + +```bash +$ kubectl get backupsession -n demo -w + +NAME INVOKER-TYPE INVOKER-NAME PHASE DURATION AGE +appbinding-sample-druid-frequent-backup-1726741846 BackupConfiguration appbinding-sample-druid Succeeded 28s 10m +appbinding-sample-druid-frequent-backup-1726742101 BackupConfiguration appbinding-sample-druid Succeeded 35s 6m37s +appbinding-sample-druid-frequent-backup-1726742400 BackupConfiguration appbinding-sample-druid Succeeded 29s 98s +``` + +We can see from the above output that the backup session has succeeded. Now, we are going to verify whether the backed up data has been stored in the backend. + +**Verify Backup:** + +Once a backup is complete, KubeStash will update the respective `Repository` CR to reflect the backup. Check that the repository `default-blueprint` has been updated by the following command, + +```bash +$ kubectl get repository -n demo default-blueprint +NAME INTEGRITY SNAPSHOT-COUNT SIZE PHASE LAST-SUCCESSFUL-BACKUP AGE +default-blueprint true 3 1.757 MiB Ready 2m23s 11m +``` + +At this moment we have one `Snapshot`. Run the following command to check the respective `Snapshot` which represents the state of a backup run for an application. + +```bash +$ kubectl get snapshots -n demo -l=kubestash.com/repo-name=default-blueprint +NAME REPOSITORY SESSION SNAPSHOT-TIME DELETION-POLICY PHASE AGE +default-blueprint-appbinding-samruid-frequent-backup-1726741846 default-blueprint frequent-backup 2024-09-19T10:30:56Z Delete Succeeded 11m +default-blueprint-appbinding-samruid-frequent-backup-1726742101 default-blueprint frequent-backup 2024-09-19T10:35:01Z Delete Succeeded 7m49s +default-blueprint-appbinding-samruid-frequent-backup-1726742400 default-blueprint frequent-backup 2024-09-19T10:40:00Z Delete Succeeded 2m50s +``` + +> **Note**: KubeStash creates a `Snapshot` with the following labels: +> - `kubestash.com/app-ref-kind: ` +> - `kubestash.com/app-ref-name: ` +> - `kubestash.com/app-ref-namespace: ` +> - `kubestash.com/repo-name: ` +> +> These labels can be used to watch only the `Snapshot`s related to our target Database or `Repository`. + +If we check the YAML of the `Snapshot`, we can find the information about the backed up components of the Database. + +```bash +$ kubectl get snapshots -n demo default-blueprint-appbinding-samruid-frequent-backup-1726741846 -oyaml +``` + +```yaml +apiVersion: storage.kubestash.com/v1alpha1 +kind: Snapshot +metadata: + creationTimestamp: "2024-09-19T10:30:56Z" + finalizers: + - kubestash.com/cleanup + generation: 1 + labels: + kubestash.com/app-ref-kind: Druid + kubestash.com/app-ref-name: sample-druid + kubestash.com/app-ref-namespace: demo + kubestash.com/repo-name: default-blueprint + annotations: + kubedb.com/db-version: 30.0.1 + name: default-blueprint-appbinding-samruid-frequent-backup-1726741846 + namespace: demo + ownerReferences: + - apiVersion: storage.kubestash.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Repository + name: default-blueprint + uid: 7ced6866-349b-48c0-821d-d1ecfee1c80e + resourceVersion: "1594964" + uid: 8ec9bb0c-590c-47b8-944b-22af92d62470 +spec: + appRef: + apiGroup: kubedb.com + kind: Druid + name: sample-druid + namespace: demo + backupSession: appbinding-sample-druid-frequent-backup-1726741846 + deletionPolicy: Delete + repository: default-blueprint + session: frequent-backup + snapshotID: 01J84XBGGY0JKG7JKTRCGV3HYM + type: FullBackup + version: v1 +status: + components: + dump: + driver: Restic + duration: 9.614587405s + integrity: true + path: repository/v1/frequent-backup/dump + phase: Succeeded + resticStats: + - hostPath: dumpfile.sql + id: 8f2b5f5d8a7a18304917e2d4c5a3636f8927085b15c652c35d5fca4a9988515d + size: 3.750 MiB + uploaded: 3.751 MiB + size: 674.017 KiB +``` + +> KubeStash uses the `mysqldump`/`postgresdump` command to take backups of metadata storage of target Druid databases. Therefore, the component name for `logical backups` is set as `dump`. + +Now, if we navigate to the GCS bucket, we will see the backed up data stored in the `/blueprint/default-blueprint/repository/v1/frequent-backup/dump` directory. KubeStash also keeps the backup for `Snapshot` YAMLs, which can be found in the `blueprint/default-blueprintrepository/snapshots` directory. + +> **Note**: KubeStash stores all dumped data encrypted in the backup directory, meaning it remains unreadable until decrypted. + +## Auto-backup with custom configurations + +In this section, we are going to backup a `Druid` database of `demo` namespace. We are going to use the custom configurations which will be specified in the `BackupBlueprint` CR. + +**Prepare Backup Blueprint** + +A `BackupBlueprint` allows you to specify a template for the `Repository`,`Session` or `Variables` of `BackupConfiguration` in a Kubernetes native way. + +Now, we have to create a `BackupBlueprint` CR with a blueprint for `BackupConfiguration` object. + +```yaml +apiVersion: core.kubestash.com/v1alpha1 +kind: BackupBlueprint +metadata: + name: druid-customize-backup-blueprint + namespace: demo +spec: + usagePolicy: + allowedNamespaces: + from: All + backupConfigurationTemplate: + deletionPolicy: OnDelete + # ============== Blueprint for Backends of BackupConfiguration ================= + backends: + - name: gcs-backend + storageRef: + namespace: demo + name: gcs-storage + retentionPolicy: + name: demo-retention + namespace: demo + # ============== Blueprint for Sessions of BackupConfiguration ================= + sessions: + - name: frequent-backup + sessionHistoryLimit: 3 + scheduler: + schedule: ${schedule} + jobTemplate: + backoffLimit: 1 + repositories: + - name: ${repoName} + backend: gcs-backend + directory: ${namespace}/${targetName} + encryptionSecret: + name: encrypt-secret + namespace: demo + addon: + name: druid-addon + tasks: + - name: mysql-metadata-storage-backup + params: + databases: ${targetedDatabases} +``` + +Note that we have used some variables (format: `${}`) in different fields. KubeStash will substitute these variables with values from the respective target’s annotations. You’re free to use any variables you like. + +Here, + +- `.spec.backupConfigurationTemplate.backends[*].storageRef` refers our earlier created `gcs-storage` backupStorage. +- `.spec.backupConfigurationTemplate.sessions[*]`: + - `.schedule` defines `${schedule}` variable, which determines the time interval for the backup. + - `.repositories[*].name` defines the `${repoName}` variable, which specifies the name of the backup `Repository`. + - `.repositories[*].directory` defines two variables, `${namespace}` and `${targetName}`, which are used to determine the path where the backup will be stored. + - `.addon.tasks[*]databases` defines `${targetedDatabases}` variable, which identifies list of databases to backup. + +> **Note**: To create `BackupBlueprint` for druid with `PostgreSQL` as metadata storage just update `spec.sessions[*].addon.tasks.name` to `postgres-metadata-storage-restore` + +Let's create the `BackupBlueprint` we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/auto-backup/examples/customize-backupblueprint.yaml +backupblueprint.core.kubestash.com/druid-customize-backup-blueprint created +``` + +Now, we are ready to backup our `Druid` databases using few annotations. You can check available auto-backup annotations for a databases from [here](https://kubestash.com/docs/latest/concepts/crds/backupblueprint/). + +**Create Database** + +Before proceeding to creating a new `Druid` database, let us clean up the resources of the previous step: +```bash +kubectl delete backupblueprints.core.kubestash.com -n demo druid-default-backup-blueprint +kubectl delete druid -n demo sample-druid +``` + +Now, we are going to create a new `Druid` CR in demo namespace. Below is the YAML of the Druid object that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: sample-druid-2 + namespace: demo + annotations: + blueprint.kubestash.com/name: druid-customize-backup-blueprint + blueprint.kubestash.com/namespace: demo + variables.kubestash.com/schedule: "*/10 * * * *" + variables.kubestash.com/repoName: customize-blueprint + variables.kubestash.com/namespace: demo + variables.kubestash.com/targetName: sample-druid-2 + variables.kubestash.com/targetedDatabases: druid +spec: + version: 30.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: WipeOut +``` + +Notice the `metadata.annotations` field, where we have defined the annotations related to the automatic backup configuration. Specifically, we've set the `BackupBlueprint` name as `druid-customize-backup-blueprint` and the namespace as `demo`. We have also provided values for the blueprint template variables, such as the backup `schedule`, `repositoryName`, `namespace`, `targetName`, and `targetedDatabases`. These annotations will be used to create a `BackupConfiguration` for this `Druid` database. + +Let's create the `Druid` we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/auto-backup/examples/sample-druid-2.yaml +druid.kubedb.com/sample-druid-2 created +``` + +**Verify BackupConfiguration** + +If everything goes well, KubeStash should create a `BackupConfiguration` for our Druid in demo namespace and the phase of that `BackupConfiguration` should be `Ready`. Verify the `BackupConfiguration` object by the following command, + +```bash +$ kubectl get backupconfiguration -n demo +NAME PHASE PAUSED AGE +appbinding-sample-druid-2 Ready 2m50m +``` + +Now, let’s check the YAML of the `BackupConfiguration`. + +```bash +$ kubectl get backupconfiguration -n demo appbinding-sample-druid-2 -o yaml +``` + +```yaml +apiVersion: core.kubestash.com/v1alpha1 +kind: BackupConfiguration +metadata: + creationTimestamp: "2024-09-19T11:00:56Z" + finalizers: + - kubestash.com/cleanup + generation: 1 + labels: + app.kubernetes.io/managed-by: kubestash.com + kubestash.com/invoker-name: druid-customize-backup-blueprint + kubestash.com/invoker-namespace: demo + name: appbinding-sample-druid-2 + namespace: demo + resourceVersion: "1599083" + uid: 1c979902-33cd-4212-ae6d-ea4e4198bcaf +spec: + backends: + - name: gcs-backend + retentionPolicy: + name: demo-retention + namespace: demo + storageRef: + name: gcs-storage + namespace: demo + sessions: + - addon: + name: druid-addon + tasks: + - name: mysql-metadata-storage-backup + params: + databases: druid + name: frequent-backup + repositories: + - backend: gcs-backend + directory: demo/sample-druid-2 + encryptionSecret: + name: encrypt-secret + namespace: demo + name: customize-blueprint + scheduler: + jobTemplate: + backoffLimit: 1 + template: + controller: {} + metadata: {} + spec: + resources: {} + schedule: '*/10 * * * *' + sessionHistoryLimit: 3 + target: + apiGroup: kubedb.com + kind: Druid + name: sample-druid-2 + namespace: demo +status: + backends: + - name: gcs-backend + ready: true + retentionPolicy: + found: true + ref: + name: demo-retention + namespace: demo + storage: + phase: Ready + ref: + name: gcs-storage + namespace: demo +``` + +Notice the `spec.backends`, `spec.sessions` and `spec.target` sections, KubeStash automatically resolved those info from the `BackupBluePrint` and created above `BackupConfiguration`. + +**Verify BackupSession:** + +KubeStash triggers an instant backup as soon as the `BackupConfiguration` is ready. After that, backups are scheduled according to the specified schedule. + +```bash +$ kubectl get backupsession -n demo -w + +NAME INVOKER-TYPE INVOKER-NAME PHASE DURATION AGE +appbinding-sample-druid-2-frequent-backup-1726743656 BackupConfiguration appbinding-sample-druid-2 Succeeded 30s 2m32s +``` + +We can see from the above output that the backup session has succeeded. Now, we are going to verify whether the backed up data has been stored in the backend. + +**Verify Backup:** + +Once a backup is complete, KubeStash will update the respective `Repository` CR to reflect the backup. Check that the repository `customize-blueprint` has been updated by the following command, + +```bash +$ kubectl get repository -n demo customize-blueprint +NAME INTEGRITY SNAPSHOT-COUNT SIZE PHASE LAST-SUCCESSFUL-BACKUP AGE +customize-blueprint true 1 806 B Ready 8m27s 9m18s +``` + +At this moment we have one `Snapshot`. Run the following command to check the respective `Snapshot` which represents the state of a backup run for an application. + +```bash +$ kubectl get snapshots -n demo -l=kubestash.com/repo-name=customize-blueprint +NAME REPOSITORY SESSION SNAPSHOT-TIME DELETION-POLICY PHASE AGE +customize-blueprint-appbinding-sid-2-frequent-backup-1726743656 customize-blueprint frequent-backup 2024-09-19T11:01:06Z Delete Succeeded 2m56s +``` + +> **Note**: KubeStash creates a `Snapshot` with the following labels: +> - `kubestash.com/app-ref-kind: ` +> - `kubestash.com/app-ref-name: ` +> - `kubestash.com/app-ref-namespace: ` +> - `kubestash.com/repo-name: ` +> +> These labels can be used to watch only the `Snapshot`s related to our target Database or `Repository`. + +If we check the YAML of the `Snapshot`, we can find the information about the backed up components of the Database. + +```bash +$ kubectl get snapshots -n demo customize-blueprint-appbinding-sid-2-frequent-backup-1726743656 -oyaml +``` + +```yaml +apiVersion: storage.kubestash.com/v1alpha1 +kind: Snapshot +metadata: + creationTimestamp: "2024-09-19T11:01:06Z" + finalizers: + - kubestash.com/cleanup + generation: 1 + labels: + kubestash.com/app-ref-kind: Druid + kubestash.com/app-ref-name: sample-druid-2 + kubestash.com/app-ref-namespace: demo + kubestash.com/repo-name: customize-blueprint + annotations: + kubedb.com/db-version: 30.0.1 + name: customize-blueprint-appbinding-sid-2-frequent-backup-1726743656 + namespace: demo + ownerReferences: + - apiVersion: storage.kubestash.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Repository + name: customize-blueprint + uid: 5eaccae6-046c-4c6a-9b76-087d040f001a + resourceVersion: "1599190" + uid: 014c050d-0e91-43eb-b60a-36eefbd4b048 +spec: + appRef: + apiGroup: kubedb.com + kind: Druid + name: sample-druid-2 + namespace: demo + backupSession: appbinding-sample-druid-2-frequent-backup-1726743656 + deletionPolicy: Delete + repository: customize-blueprint + session: frequent-backup + snapshotID: 01J84Z2R6R64FH8E7QYNNZGC1S + type: FullBackup + version: v1 +status: + components: + dump: + driver: Restic + duration: 9.132887467s + integrity: true + path: repository/v1/frequent-backup/dump + phase: Succeeded + resticStats: + - hostPath: dumpfile.sql + id: a1061e74f1ad398a9fe85bcbae34f540f2437a97061fd26c5b3e6bde3b5b7642 + size: 10.859 KiB + uploaded: 11.152 KiB + size: 2.127 KiB +``` + +> KubeStash uses the `mysqldump`/`postgresdump` command to take backups of the metadata storage of the target Druid databases. Therefore, the component name for `logical backups` is set as `dump`. + +Now, if we navigate to the GCS bucket, we will see the backed up data stored in the `/blueprint/custom-blueprint/repository/v1/frequent-backup/dump` directory. KubeStash also keeps the backup for `Snapshot` YAMLs, which can be found in the `blueprint/custom-blueprint/snapshots` directory. + +> **Note**: KubeStash stores all dumped data encrypted in the backup directory, meaning it remains unreadable until decrypted. + +## Cleanup + +To cleanup the resources crated by this tutorial, run the following commands, + +```bash +kubectl delete backupblueprints.core.kubestash.com -n demo druid-default-backup-blueprint +kubectl delete backupblueprints.core.kubestash.com -n demo druid-customize-backup-blueprint +kubectl delete backupstorage -n demo gcs-storage +kubectl delete secret -n demo gcs-secret +kubectl delete secret -n demo encrypt-secret +kubectl delete retentionpolicies.storage.kubestash.com -n demo demo-retention +kubectl delete druid -n demo sample-druid +kubectl delete druid -n demo sample-druid-2 +``` diff --git a/docs/guides/druid/backup/cross-ns-dependencies/examples/sample-druid.yaml b/docs/guides/druid/backup/cross-ns-dependencies/examples/sample-druid.yaml index c21629db96..2297fb6e9d 100644 --- a/docs/guides/druid/backup/cross-ns-dependencies/examples/sample-druid.yaml +++ b/docs/guides/druid/backup/cross-ns-dependencies/examples/sample-druid.yaml @@ -13,8 +13,8 @@ spec: namespace: dev1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/backup/cross-ns-dependencies/examples/sample-druid.yaml.bak b/docs/guides/druid/backup/cross-ns-dependencies/examples/sample-druid.yaml.bak new file mode 100644 index 0000000000..c21629db96 --- /dev/null +++ b/docs/guides/druid/backup/cross-ns-dependencies/examples/sample-druid.yaml.bak @@ -0,0 +1,21 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: sample-druid + namespace: demo +spec: + version: 30.0.1 + zookeeperRef: + name: zk-dev + namespace: dev + metadataStorage: + name: my-dev1 + namespace: dev1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/guides/druid/backup/cross-ns-dependencies/index.md b/docs/guides/druid/backup/cross-ns-dependencies/index.md index acae14fffa..7071cb6e73 100644 --- a/docs/guides/druid/backup/cross-ns-dependencies/index.md +++ b/docs/guides/druid/backup/cross-ns-dependencies/index.md @@ -130,8 +130,8 @@ spec: namespace: dev1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/backup/cross-ns-dependencies/index.md.bak b/docs/guides/druid/backup/cross-ns-dependencies/index.md.bak new file mode 100644 index 0000000000..acae14fffa --- /dev/null +++ b/docs/guides/druid/backup/cross-ns-dependencies/index.md.bak @@ -0,0 +1,891 @@ +--- +title: Cross-Namespace Dependencies Backup & Restore Druid | KubeStash +description: Druid Backup and Restore for Cross Namespace Dependencies using KubeStash +menu: + docs_{{ .version }}: + identifier: guides-druid-backup-cross-ns-dependencies + name: Cross-Namespace Dependencies Backup + parent: guides-druid-backup + weight: 60 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +# Druid Backup and Restore for Cross Namespace Dependencies using KubeStash + +[KubeStash](https://kubestash.com) offers backup and restore functionality for `Druid` with dependencies in different namespaces. + +This guide will give you how you can take [Application Level Backup](https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/application-level/) and restore your `Druid` databases, with dependencies in different namespaces in using `Kubestash`. In application level backup both manifest and logical data backups of any `Druid` database are captured in a single snapshot. During the restore process, KubeStash first applies the `Druid` manifest to the cluster and then restores the data into it. + +> **Note:** You can also apply the same updates in your `BackupConfiguration` and `RestoreSession` resources for other types of backup such as [Logical Backup](https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/logical/) or [Auto Backup](https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/auto-backup/) + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using `Minikube` or `Kind`. +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md) and make sure to include the flags `--set global.featureGates.Druid=true` to ensure **Druid CRD** and `--set global.featureGates.ZooKeeper=true` to ensure **ZooKeeper CRD** as Druid depends on ZooKeeper for external dependency with helm command. +- Install `KubeStash` in your cluster following the steps [here](https://kubestash.com/docs/latest/setup/install/kubestash). +- Install KubeStash `kubectl` plugin following the steps [here](https://kubestash.com/docs/latest/setup/install/kubectl-plugin/). +- If you are not familiar with how KubeStash backup and restore Druid databases, please check the following guide [here](/docs/guides/druid/backup/overview/index.md). + +You should be familiar with the following `KubeStash` concepts: + +- [BackupStorage](https://kubestash.com/docs/latest/concepts/crds/backupstorage/) +- [BackupConfiguration](https://kubestash.com/docs/latest/concepts/crds/backupconfiguration/) +- [BackupSession](https://kubestash.com/docs/latest/concepts/crds/backupsession/) +- [RestoreSession](https://kubestash.com/docs/latest/concepts/crds/restoresession/) +- [Addon](https://kubestash.com/docs/latest/concepts/crds/addon/) +- [Function](https://kubestash.com/docs/latest/concepts/crds/function/) +- [Task](https://kubestash.com/docs/latest/concepts/crds/addon/#task-specification) + +To keep everything isolated, we are going to use a separate namespace called `demo`, `dev` and `dev1` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +$ kubectl create ns dev +namespace/dev created +$ kubectl create ns dev1 +namespace/dev1 created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/guides/druid/backup/application-level/examples](https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/cross-ns-dependencies/examples) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Backup Druid + +KubeStash supports backups for `Druid` instances for various Cluster setups. In this demonstration, we'll focus on a `Druid` database with 5 type of nodes (coordinators, historicals, brokers, middlemanagers and routers). The backup and restore process is similar for other Cluster setup as well. + +This section will demonstrate how to take application-level backup of a `Druid` database. Here, we are going to deploy a `Druid` database using KubeDB. Then, we are going to back up the database at the application level to a `GCS` bucket. Finally, we will restore the entire `Druid` database. + +## Deploy Sample Druid Database + + +**Create External Dependency (Deep Storage):** + +One of the external dependency of Druid is deep storage where the segments are stored. It is a storage mechanism that Apache Druid does not provide. **Amazon S3**, **Google Cloud Storage**, or **Azure Blob Storage**, **S3-compatible storage** (like **Minio**), or **HDFS** are generally convenient options for deep storage. + +In this tutorial, we will run a `minio-server` as deep storage in our local `kind` cluster using `minio-operator` and create a bucket named `druid` in it, which the deployed druid database will use. + +```bash + +$ helm repo add minio https://operator.min.io/ +$ helm repo update minio +$ helm upgrade --install --namespace "minio-operator" --create-namespace "minio-operator" minio/operator --set operator.replicaCount=1 + +$ helm upgrade --install --namespace "demo" --create-namespace druid-minio minio/tenant \ +--set tenant.pools[0].servers=1 \ +--set tenant.pools[0].volumesPerServer=1 \ +--set tenant.pools[0].size=1Gi \ +--set tenant.certificate.requestAutoCert=false \ +--set tenant.buckets[0].name="druid" \ +--set tenant.pools[0].name="default" + +``` + +Now we need to create a `Secret` named `deep-storage-config`. It contains the necessary connection information using which the druid database will connect to the deep storage. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: deep-storage-config + namespace: demo +stringData: + druid.storage.type: "s3" + druid.storage.bucket: "druid" + druid.storage.baseKey: "druid/segments" + druid.s3.accessKey: "minio" + druid.s3.secretKey: "minio123" + druid.s3.protocol: "http" + druid.s3.enablePathStyleAccess: "true" + druid.s3.endpoint.signingRegion: "us-east-1" + druid.s3.endpoint.url: "http://myminio-hl.demo.svc.cluster.local:9000/" +``` + +Let’s create the `deep-storage-config` Secret shown above: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/application-level/examples/deep-storage-config.yaml +secret/deep-storage-config created +``` + +Let's deploy a sample `Druid` database and insert some data into it. + +**Create Druid CR:** + +Below is the YAML of a sample `Druid` CR that we are going to create for this tutorial: + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: sample-druid + namespace: demo +spec: + version: 30.0.1 + zookeeperRef: + name: zk-dev + namespace: dev + metadataStorage: + name: my-dev1 + namespace: dev1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: WipeOut +``` + +Here, +- `.spec.topology` specifies about the clustering configuration of Druid. +- `.spec.topology.routers` specifies that 1 replica of routers node will get provisioned alongside the essential nodes. +- `.spec.metadataStoage` specifies the `name` and `namespace` of the `MySQL` that the `KubeDB` operator will deploy as metadata storage alongside `Druid`. +- `.spec.zookeeperRef` specifies the `name` and `namespace` of the `ZooKeeper` that the `KubeDB` operator will deploy alongside `Druid`. + +Create the above `Druid` CR, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/application-level/examples/sample-druid.yaml +druid.kubedb.com/sample-druid created +``` + +KubeDB will deploy a Druid database according to the above specification. It will also create the necessary Secrets and Services to access the database. + +Let's check if the database is ready to use, + +```bash +$ kubectl get druids.kubedb.com -n demo +NAME TYPE VERSION STATUS AGE +sample-druid kubedb.com/v1alpha2 30.0.1 Ready 4m14s +``` + +The database is `Ready`. Verify that KubeDB has created a `Secret` and a `Service` for this database using the following commands, + +```bash +$ kubectl get secret -n demo -l=app.kubernetes.io/instance=sample-druid +NAME TYPE DATA AGE +sample-druid-admin-cred kubernetes.io/basic-auth 2 2m34s +sample-druid-config Opaque 11 2m34s + +$ kubectl get service -n demo -l=app.kubernetes.io/instance=sample-druid +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +sample-druid-brokers ClusterIP 10.128.135.115 8082/TCP 2m53s +sample-druid-coordinators ClusterIP 10.128.16.222 8081/TCP 2m53s +sample-druid-pods ClusterIP None 8081/TCP,8090/TCP,8083/TCP,8091/TCP,8082/TCP,8888/TCP 2m53s +sample-druid-routers ClusterIP 10.128.191.186 8888/TCP 2m53s +``` + +Here, we have to use service `sample-druid-routers` and secret `sample-druid-admin-cred` to connect with the database. `KubeDB` creates an [AppBinding](/docs/guides/druid/concepts/appbinding.md) CR that holds the necessary information to connect with the database. + +**Verify Internal Dependencies:** + +```bash +$ kubectl get mysql -n dev1 +NAME VERSION STATUS AGE +mysql.kubedb.com/my-dev1 9.1.0 Ready 6m31s + +$ kubectl get zk -n dev +NAME TYPE VERSION STATUS AGE +zookeeper.kubedb.com/zk-dev kubedb.com/v1alpha2 3.7.2 Ready 6m31s +``` +We can see that KubeDB has deployed a `MySQL` and a `ZooKeeper` instance as [External dependencies](https://druid.apache.org/docs/latest/design/architecture/#external-dependencies) of the `Druid` cluster. + +**Verify AppBinding:** + +Verify that the `AppBinding` has been created successfully using the following command, + +```bash +$ kubectl get appbindings -n demo +NAME TYPE VERSION AGE +sample-druid kubedb.com/druid 30.0.1 4m7s + +$ kubectl get appbindings -n dev1 +NAME TYPE VERSION AGE +my-dev1 kubedb.com/mysql 9.1.0 6m31s + +$ kubectl get appbindings -n dev +NAME TYPE VERSION AGE +zk-dev kubedb.com/zookeeper 3.7.2 6m34s +``` + +Here `sample-druid` is the `AppBinding` of Druid, while `my-dev1` and `zk-dev` are the `AppBinding` of `MySQL` and `ZooKeeper` instances that `KubeDB` has deployed as the [External dependencies](https://druid.apache.org/docs/latest/design/architecture/#external-dependencies) of `Druid` + +Let's check the YAML of the `AppBinding` of druid, + +```bash +$ kubectl get appbindings -n demo sample-druid -o yaml +``` + +```yaml +apiVersion: appcatalog.appscode.com/v1alpha1 +kind: AppBinding +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"kubedb.com/v1alpha2","kind":"Druid","metadata":{"annotations":{},"name":"sample-druid","namespace":"demo"},"spec":{"deepStorage":{"configSecret":{"name":"deep-storage-config"},"type":"s3"},"deletionPolicy":"WipeOut","topology":{"routers":{"replicas":1}},"version":"30.0.1"}} + creationTimestamp: "2024-09-19T13:02:20Z" + generation: 1 + labels: + app.kubernetes.io/component: database + app.kubernetes.io/instance: sample-druid + app.kubernetes.io/managed-by: kubedb.com + app.kubernetes.io/name: druids.kubedb.com + name: sample-druid + namespace: demo + ownerReferences: + - apiVersion: kubedb.com/v1alpha2 + blockOwnerDeletion: true + controller: true + kind: Druid + name: sample-druid + uid: cdbc2414-0dd1-4573-9532-e96b9094a443 + resourceVersion: "1610820" + uid: 8430d22d-e715-454a-8a83-e30e40cbeb14 +spec: + appRef: + apiGroup: kubedb.com + kind: Druid + name: sample-druid + namespace: demo + clientConfig: + service: + name: sample-druid-pods + port: 8888 + scheme: http + url: http://sample-druid-coordinators-0.sample-druid-pods.demo.svc.cluster.local:8081,http://sample-druid-overlords-0.sample-druid-pods.demo.svc.cluster.local:8090,http://sample-druid-middlemanagers-0.sample-druid-pods.demo.svc.cluster.local:8091,http://sample-druid-historicals-0.sample-druid-pods.demo.svc.cluster.local:8083,http://sample-druid-brokers-0.sample-druid-pods.demo.svc.cluster.local:8082,http://sample-druid-routers-0.sample-druid-pods.demo.svc.cluster.local:8888 + secret: + name: sample-druid-admin-cred + type: kubedb.com/druid + version: 30.0.1 +``` + +KubeStash uses the `AppBinding` CR to connect with the target database. It requires the following two fields to set in AppBinding's `.spec` section. + +- `.spec.clientConfig.service.name` specifies the name of the Service that connects to the database. +- `.spec.secret` specifies the name of the Secret that holds necessary credentials to access the database. +- `spec.type` specifies the types of the app that this AppBinding is pointing to. KubeDB generated AppBinding follows the following format: `/`. + +**Insert Sample Data:** + +We can access the [web console](https://druid.apache.org/docs/latest/operations/web-console) of Druid database from any browser by port-forwarding the routers. Let’s port-forward the port `8888` to local machine: +```bash +kubectl port-forward -n demo svc/sample-druid-routers 8888 +Forwarding from 127.0.0.1:8888 -> 8888 +Forwarding from [::1]:8888 -> 8888 +``` + +Now hit the `http://localhost:8888` from any browser, and you will be prompted to provide the credential of the druid database. By following the steps discussed below, you can get the credential generated by the KubeDB operator for your Druid database. + +**Connection information:** + +- Username: + + ```bash + $ kubectl get secret -n demo sample-druid-admin-cred -o jsonpath='{.data.username}' | base64 -d + admin + ``` + +- Password: + + ```bash + $ kubectl get secret -n demo sample-druid-admin-cred -o jsonpath='{.data.password}' | base64 -d + DqG5E63NtklAkxqC + ``` + +After providing the credentials correctly, you should be able to access the web console like shown below. + +

+  lifecycle +

+ +Now select the `Load Data` option and then select `Batch - classic` from the drop-down menu. +

+  lifecycle +

+ +Select `Example data` and click `Load example` to insert the example `Wikipedia Edits` datasource. + +

+  lifecycle +

+ +After clicking `Next` multiple times, click `Submit` + +

+  lifecycle +

+ +Within a minute status of the ingestion task should become `SUCCESS` +

+  lifecycle +

+ +Now, we are ready to backup the database. + +### Prepare Backend + +We are going to store our backed up data into a GCS bucket. We have to create a Secret with necessary credentials and a `BackupStorage` CR to use this backend. If you want to use a different backend, please read the respective backend configuration doc from [here](https://kubestash.com/docs/latest/guides/backends/overview/). + +**Create Secret:** + +Let's create a secret called `gcs-secret` with access credentials to our desired GCS bucket, + +```bash +$ echo -n '' > GOOGLE_PROJECT_ID +$ cat /path/to/downloaded-sa-key.json > GOOGLE_SERVICE_ACCOUNT_JSON_KEY +$ kubectl create secret generic -n demo gcs-secret \ + --from-file=./GOOGLE_PROJECT_ID \ + --from-file=./GOOGLE_SERVICE_ACCOUNT_JSON_KEY +secret/gcs-secret created +``` + +**Create BackupStorage:** + +Now, create a `BackupStorage` using this secret. Below is the YAML of `BackupStorage` CR we are going to create, + +```yaml +apiVersion: storage.kubestash.com/v1alpha1 +kind: BackupStorage +metadata: + name: gcs-storage + namespace: demo +spec: + storage: + provider: gcs + gcs: + bucket: kubestash-qa + prefix: druid + secretName: gcs-secret + usagePolicy: + allowedNamespaces: + from: All + default: true + deletionPolicy: Delete +``` + +Let's create the BackupStorage we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/application-level/examples/backupstorage.yaml +backupstorage.storage.kubestash.com/gcs-storage created +``` + +Now, we are ready to backup our database to our desired backend. + +**Create RetentionPolicy:** + +Now, let's create a `RetentionPolicy` to specify how the old Snapshots should be cleaned up. + +Below is the YAML of the `RetentionPolicy` object that we are going to create, + +```yaml +apiVersion: storage.kubestash.com/v1alpha1 +kind: RetentionPolicy +metadata: + name: demo-retention + namespace: demo +spec: + default: true + failedSnapshots: + last: 2 + maxRetentionPeriod: 2mo + successfulSnapshots: + last: 5 + usagePolicy: + allowedNamespaces: + from: All +``` + +Let’s create the above `RetentionPolicy`, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/application-level/examples/retentionpolicy.yaml +retentionpolicy.storage.kubestash.com/demo-retention created +``` + +### Backup + +We have to create a `BackupConfiguration` targeting respective `sample-druid` Druid database. Then, KubeStash will create a `CronJob` for each session to take periodic backup of that database. + +At first, we need to create a secret with a Restic password for backup data encryption. + +**Create Secret:** + +Let's create a secret called `encrypt-secret` with the Restic password, + +```bash +$ echo -n 'changeit' > RESTIC_PASSWORD +$ kubectl create secret generic -n demo encrypt-secret \ + --from-file=./RESTIC_PASSWORD \ +secret "encrypt-secret" created +``` + +**Create RBAC** + +To take backup of the Druid Database alongside with its dependencies, KubeStash creates a backup `Job`. Consequently, if the dependencies are in different namespaces, then this `Job` requires `read`, `list`, `watch` and `create` permission for some of the cluster resources. This includes resources for the dependencies (`MySQL`, `ZooKeeper` and `PostgreSQL`) as well as `Appbinding`, `Secrets` and `Configmaps`. By default, KubeStash does not grant such cluster-wide permissions. We have to provide the necessary permissions manually. + +Here, is the YAML of the `ServiceAccount`, `ClusterRole`, and `RoleBinding` that we are going to use for granting the necessary permissions. We will create two `RoleBinding` in both `dev` and `dev1` because we are going to deploy `ZooKeeper` in `dev` and `MySQL` in `dev1` namespace. + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cluster-resource-reader + namespace: demo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-resource-reader +rules: + - apiGroups: ["kubedb.com"] + resources: ["zookeepers", "mysqls", "postgreses"] + verbs: ["get", "list", "watch", "create"] + - apiGroups: ["appcatalog.appscode.com"] + resources: ["appbindings"] + verbs: ["get", "list", "watch", "create"] + - apiGroups: [""] + resources: ["secrets", "configmaps"] + verbs: ["get", "list", "watch", "create"] +--- +# RoleBinding for the dev namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cluster-resource-reader + namespace: dev +subjects: + - kind: ServiceAccount + name: cluster-resource-reader + namespace: demo +roleRef: + kind: ClusterRole + name: cluster-resource-reader + apiGroup: rbac.authorization.k8s.io +--- +# RoleBinding for the dev1 namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cluster-resource-reader + namespace: dev1 +subjects: + - kind: ServiceAccount + name: cluster-resource-reader + namespace: demo +roleRef: + kind: ClusterRole + name: cluster-resource-reader + apiGroup: rbac.authorization.k8s.io +``` + +Let’s create the RBAC resources we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/cross-ns-dependencies/examples/rbac.yaml +serviceaccount/cluster-resource-reader created +clusterrole.rbac.authorization.k8s.io/cluster-resource-reader created +rolebinding.rbac.authorization.k8s.io/cluster-resource-reader created +rolebinding.rbac.authorization.k8s.io/cluster-resource-reader created +``` + +**Create BackupConfiguration:** + +Below is the YAML for `BackupConfiguration` CR to take application-level backup of the `sample-druid` database that we have deployed earlier, + +```yaml +apiVersion: core.kubestash.com/v1alpha1 +kind: BackupConfiguration +metadata: + name: sample-druid-backup + namespace: demo +spec: + target: + apiGroup: kubedb.com + kind: Druid + namespace: demo + name: sample-druid + backends: + - name: gcs-backend + storageRef: + namespace: demo + name: gcs-storage + retentionPolicy: + name: demo-retention + namespace: demo + sessions: + - name: frequent-backup + scheduler: + schedule: "*/5 * * * *" + jobTemplate: + backoffLimit: 1 + repositories: + - name: gcs-druid-repo + backend: gcs-backend + directory: /druid + encryptionSecret: + name: encrypt-secret + namespace: demo + addon: + name: druid-addon + tasks: + - name: manifest-backup + - name: mysql-metadata-storage-backup + jobTemplate: + spec: + serviceAccountName: cluster-resource-reader +``` + +- `.spec.sessions[*].schedule` specifies that we want to backup at `5 minutes` interval. +- `.spec.target` refers to the targeted `sample-druid` Druid database that we created earlier. +- `.spec.sessions[*].addon.tasks[*].name[*]` specifies that both the `manifest-backup` and `mysql-metadata-storage-backup` tasks will be executed. +- `spec.sessions[*].addon.jobTemplate.spec.serviceAccountName` specifies the `ServiceAccount` name that we have created earlier with sufficient permission in `dev` and `dev1` namespace. + +> **Note**: +> - To create `BackupConfiguration` for druid with `PostgreSQL` as metadata storage update the `spec.sessions[*].addon.tasks.name` from `mysql-metadata-storage-backup` to `postgres-metadata-storage-restore`. +> - When we backup a `Druid`, KubeStash operator will also take backup of the dependency of the `MySQL` and `ZooKeeper` cluster as well. + +Let's create the `BackupConfiguration` CR that we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/application-level/examples/backupconfiguration.yaml +backupconfiguration.core.kubestash.com/sample-druid-backup created +``` + +**Verify Backup Setup Successful** + +If everything goes well, the phase of the `BackupConfiguration` should be `Ready`. The `Ready` phase indicates that the backup setup is successful. Let's verify the `Phase` of the BackupConfiguration, + +```bash +$ kubectl get backupconfiguration -n demo +NAME PHASE PAUSED AGE +sample-druid-backup Ready 2m50s +``` + +Additionally, we can verify that the `Repository` specified in the `BackupConfiguration` has been created using the following command, + +```bash +$ kubectl get repo -n demo +NAME INTEGRITY SNAPSHOT-COUNT SIZE PHASE LAST-SUCCESSFUL-BACKUP AGE +gcs-druid-repo 0 0 B Ready 3m +``` + +KubeStash keeps the backup for `Repository` YAMLs. If we navigate to the GCS bucket, we will see the `Repository` YAML stored in the `demo/druid` directory. + +**Verify CronJob:** + +It will also create a `CronJob` with the schedule specified in `spec.sessions[*].scheduler.schedule` field of `BackupConfiguration` CR. + +Verify that the `CronJob` has been created using the following command, + +```bash +$ kubectl get cronjob -n demo +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +trigger-sample-druid-backup-frequent-backup */5 * * * * 0 2m45s 3m25s +``` + +**Verify BackupSession:** + +KubeStash triggers an instant backup as soon as the `BackupConfiguration` is ready. After that, backups are scheduled according to the specified schedule. + +Run the following command to watch `BackupSession` CR, + +```bash +$ kubectl get backupsession -n demo -w + +NAME INVOKER-TYPE INVOKER-NAME PHASE DURATION AGE +sample-druid-backup-frequent-backup-1724065200 BackupConfiguration sample-druid-backup Succeeded 7m22s +``` + +We can see from the above output that the backup session has succeeded. Now, we are going to verify whether the backed up data has been stored in the backend. + +**Verify Backup:** + +Once a backup is complete, KubeStash will update the respective `Repository` CR to reflect the backup. Check that the repository `sample-druid-backup` has been updated by the following command, + +```bash +$ kubectl get repository -n demo gcs-druid-repo +NAME INTEGRITY SNAPSHOT-COUNT SIZE PHASE LAST-SUCCESSFUL-BACKUP AGE +gcs-druid-repo true 4 664.979 KiB Ready 2m55s 4h56m +``` + +At this moment we have one `Snapshot`. Run the following command to check the respective `Snapshot` which represents the state of a backup run for an application. + +```bash +$ kubectl get snapshots -n demo -l=kubestash.com/repo-name=gcs-druid-repo +NAME REPOSITORY SESSION SNAPSHOT-TIME DELETION-POLICY PHASE AGE +gcs-druid-repo-sample-druid-backup-frequent-backup-1726830540 gcs-druid-repo frequent-backup 2024-09-20T11:09:00Z Delete Succeeded 3m13s +``` + +> **Note**: KubeStash creates a `Snapshot` with the following labels: +> - `kubestash.com/app-ref-kind: ` +> - `kubestash.com/app-ref-name: ` +> - `kubestash.com/app-ref-namespace: ` +> - `kubestash.com/repo-name: ` +> +> These labels can be used to watch only the `Snapshot`s related to our target Database or `Repository`. + +If we check the YAML of the `Snapshot`, we can find the information about the backed up components of the Database. + +```bash +$ kubectl get snapshots -n demo gcs-druid-repo-sample-druid-backup-frequent-backup-1725359100 -oyaml +``` + +```yaml +apiVersion: storage.kubestash.com/v1alpha1 +kind: Snapshot +metadata: + annotations: + kubedb.com/db-version: 30.0.1 + creationTimestamp: "2024-09-20T11:09:00Z" + finalizers: + - kubestash.com/cleanup + generation: 1 + labels: + kubestash.com/app-ref-kind: Druid + kubestash.com/app-ref-name: sample-druid + kubestash.com/app-ref-namespace: demo + kubestash.com/repo-name: gcs-druid-repo + name: gcs-druid-repo-sample-druid-backup-frequent-backup-1726830540 + namespace: demo + ownerReferences: + - apiVersion: storage.kubestash.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Repository + name: gcs-druid-repo + uid: d894aad3-ac0d-4c8f-b165-9f9f1085ef3a + resourceVersion: "1720138" + uid: 348fe907-9207-4a71-953c-6cafa80ba3f7 +spec: + appRef: + apiGroup: kubedb.com + kind: Druid + name: sample-druid + namespace: demo + backupSession: sample-druid-backup-frequent-backup-1726830540 + deletionPolicy: Delete + repository: gcs-druid-repo + session: frequent-backup + snapshotID: 01J87HXY4439P70MKGWS8RZM7E + type: FullBackup + version: v1 +status: + components: + dump: + driver: Restic + duration: 10.312603282s + integrity: true + path: repository/v1/frequent-backup/dump + phase: Succeeded + resticStats: + - hostPath: dumpfile.sql + id: 647a7123a66423a81fa21ac77128e46587ddae3e9c9426537a30ad1c9a8e1843 + size: 3.807 MiB + uploaded: 3.807 MiB + size: 652.853 KiB + manifest: + driver: Restic + duration: 10.457007184s + integrity: true + path: repository/v1/frequent-backup/manifest + phase: Succeeded + resticStats: + - hostPath: /kubestash-tmp/manifest + id: 069ad1c6dae59fd086aa9771289fc4dad6d076afbc11180e3b1cd8083cd01691 + size: 13.599 KiB + uploaded: 4.268 KiB + size: 12.127 KiB + conditions: + - lastTransitionTime: "2024-09-20T11:09:00Z" + message: Recent snapshot list updated successfully + reason: SuccessfullyUpdatedRecentSnapshotList + status: "True" + type: RecentSnapshotListUpdated + - lastTransitionTime: "2024-09-20T11:10:07Z" + message: Metadata uploaded to backend successfully + reason: SuccessfullyUploadedSnapshotMetadata + status: "True" + type: SnapshotMetadataUploaded + integrity: true + phase: Succeeded + size: 664.979 KiB + snapshotTime: "2024-09-20T11:09:00Z" + totalComponents: 2 +``` + +> KubeStash uses the `mysqldump`/`postgresdump` command to take backups of the metadata storage of the target Druid databases. Therefore, the component name for `logical backups` is set as `dump`. +> KubeStash set component name as `manifest` for the `manifest backup` of Druid databases. + +Now, if we navigate to the GCS bucket, we will see the backed up data stored in the `demo/druid/repository/v1/frequent-backup/dump` directory. KubeStash also keeps the backup for `Snapshot` YAMLs, which can be found in the `demo/dep/snapshots` directory. + +> **Note**: KubeStash stores all dumped data encrypted in the backup directory, meaning it remains unreadable until decrypted. + +**Delete Druid** + +Now, we are going to delete the `Druid` cluster that we have deployed and took backup earlier. +```bash +$ kubectl delete druid -n demo sample-druid +druid.kubedb.com "sample-druid" deleted +``` +The dependencies of druid with name `zk-dev` and `my-dev1` will also be deleted from their respective namespaces. + +## Restore + +In this section, we are going to restore the entire database from the backup that we have taken in the previous section. For this tutorial, we will restore the database in the same namespaces that they were in before. + +#### Create RestoreSession: + +We need to create a RestoreSession CR. + +Below, is the contents of YAML file of the `RestoreSession` CR that we are going to create to restore the entire database. + +```yaml +apiVersion: core.kubestash.com/v1alpha1 +kind: RestoreSession +metadata: + name: restore-sample-druid + namespace: demo +spec: + manifestOptions: + druid: + db: true + dataSource: + repository: gcs-druid-repo + snapshot: latest + encryptionSecret: + name: encrypt-secret + namespace: demo + addon: + name: druid-addon + tasks: + - name: mysql-metadata-storage-restore + - name: manifest-restore + jobTemplate: + spec: + serviceAccountName: cluster-resource-reader +``` + +Here, + +- `.spec.manifestOptions.druid.db` specifies whether to restore the DB manifest or not. +- `.spec.dataSource.repository` specifies the Repository object that holds the backed up data. +- `.spec.dataSource.snapshot` specifies to restore from latest `Snapshot`. +- `.spec.addon.tasks[*]` specifies that both the `manifest-restore` and `logical-backup-restore` tasks. +- `spec.sessions[*].addon.jobTemplate.spec.serviceAccountName` specifies the `ServiceAccount` name that we have created earlier with sufficient permission in `dev` and `dev1` namespace. + +> **Note**: +> - When we restore a `Druid` with `spec.metadataStorage.externallyManaged` set to `false` (which is `false` by default), then KubeStash operator will also restore the metadataStorage automatically. +> - Similarly, if `spec.zooKeeper.externallyManaged` is `false` (which is also `false` by default) then KubeStash operator will also restore the zookeeper instance automatically. +> - For externally managed metadata storage and zookeeper however, user needs to specify it in `spec.manifestOptions.mySQL`/`spec.manifestOptions.postgres`/`spec.manifestOptions.zooKeeper` to restore those. + +Let's create the RestoreSession CRD object we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/application-level/examples/restoresession.yaml +restoresession.core.kubestash.com/restore-sample-druid created +``` + +Once, you have created the `RestoreSession` object, KubeStash will create restore Job. Run the following command to watch the phase of the `RestoreSession` object, + +```bash +$ watch kubectl get restoresession -n demo +Every 2.0s: kubectl get restores... AppsCode-PC-03: Wed Aug 21 10:44:05 2024 + +NAME REPOSITORY FAILURE-POLICY PHASE DURATION AGE +sample-restore gcs-demo-repo Succeeded 3s 53s +``` +The `Succeeded` phase means that the restore process has been completed successfully. + +#### Verify Restored Druid Manifest: + +In this section, we will verify whether the desired `Druid` database manifest has been successfully applied to the cluster. + +```bash +$ kubectl get druids.kubedb.com -n demo +NAME VERSION STATUS AGE +restored-druid 30.0.1 Ready 6m26s +``` + +The output confirms that the `Druid` database has been successfully created with the same configuration as it had at the time of backup. + +Verify the dependencies have been restored: +```bash +$ $ kubectl get mysql -n dev1 +NAME VERSION STATUS AGE +mysql.kubedb.com/my-dev1 9.1.0 Ready 6m30s + +$ kubectl get zk -n dev +NAME TYPE VERSION STATUS AGE +zookeeper.kubedb.com/zk-dev kubedb.com/v1alpha2 3.7.2 Ready 6m30s +``` + +The output confirms that the `MySQL` and `ZooKeper` databases have been successfully created with the same configuration as it had at the time of backup. + +#### Verify Restored Data: + +In this section, we are going to verify whether the desired data has been restored successfully. We are going to connect to the database server and check whether the database and the table we created earlier in the original database are restored. + +At first, check if the database has gone into `Ready` state by the following command, + +```bash +$ kubectl get druid -n demo restored-druid +NAME VERSION STATUS AGE +restored-druid 30.0.1 Ready 34m +``` + +Now, let's verify if our datasource `wikipedia` exists or not. For that, first find out the database `Sevices` by the following command, + +Now access the [web console](https://druid.apache.org/docs/latest/operations/web-console) of Druid database from any browser by port-forwarding the routers. Let’s port-forward the port `8888` to local machine: +```bash +$ kubectl get svc -n demo --selector="app.kubernetes.io/instance=restored-druid" +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +restored-druid-brokers ClusterIP 10.128.74.54 8082/TCP 10m +restored-druid-coordinators ClusterIP 10.128.30.124 8081/TCP 10m +restored-druid-pods ClusterIP None 8081/TCP,8090/TCP,8083/TCP,8091/TCP,8082/TCP,8888/TCP 10m +restored-druid-routers ClusterIP 10.128.228.193 8888/TCP 10m +``` +```bash +kubectl port-forward -n demo svc/restored-druid-routers 8888 +Forwarding from 127.0.0.1:8888 -> 8888 +Forwarding from [::1]:8888 -> 8888 +``` + +Then hit the `http://localhost:8888` from any browser, and you will be prompted to provide the credential of the druid database. By following the steps discussed below, you can get the credential generated by the KubeDB operator for your Druid database. +**Connection information:** +- Username: + + ```bash + $ kubectl get secret -n demo restored-druid-admin-cred -o jsonpath='{.data.username}' | base64 -d + admin + ``` + +- Password: + + ```bash + $ kubectl get secret -n demo restored-druid-admin-cred -o jsonpath='{.data.password}' | base64 -d + DqG5E63NtklAkxqC + ``` +After providing the credentials correctly, you should be able to access the web console like shown below. Now if you go to the `Datasources` section, you will see that our ingested datasource `wikipedia` exists in the list. +

+  lifecycle +

+ +So, from the above screenshot, we can see that the `wikipedia` datasource we have ingested earlier in the original database and now, it is restored successfully. + +## Cleanup + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete backupconfigurations.core.kubestash.com -n demo sample-druid-backup +kubectl delete backupstorage -n demo gcs-storage +kubectl delete secret -n demo gcs-secret +kubectl delete secret -n demo encrypt-secret +kubectl delete retentionpolicies.storage.kubestash.com -n demo demo-retention +kubectl delete restoresessions.core.kubestash.com -n demo restore-sample-druid +kubectl delete druid -n demo sample-druid +kubectl delete druid -n dev restored-druid +``` \ No newline at end of file diff --git a/docs/guides/druid/backup/customization/examples/common/sample-druid.yaml b/docs/guides/druid/backup/customization/examples/common/sample-druid.yaml index cca8bd8797..a107788960 100644 --- a/docs/guides/druid/backup/customization/examples/common/sample-druid.yaml +++ b/docs/guides/druid/backup/customization/examples/common/sample-druid.yaml @@ -7,8 +7,8 @@ spec: version: 30.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/backup/customization/examples/common/sample-druid.yaml.bak b/docs/guides/druid/backup/customization/examples/common/sample-druid.yaml.bak new file mode 100644 index 0000000000..cca8bd8797 --- /dev/null +++ b/docs/guides/druid/backup/customization/examples/common/sample-druid.yaml.bak @@ -0,0 +1,15 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: sample-druid + namespace: demo +spec: + version: 30.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/guides/druid/backup/logical/examples/restored-druid.yaml b/docs/guides/druid/backup/logical/examples/restored-druid.yaml index ed4937b8a6..3649b7bddb 100644 --- a/docs/guides/druid/backup/logical/examples/restored-druid.yaml +++ b/docs/guides/druid/backup/logical/examples/restored-druid.yaml @@ -9,8 +9,8 @@ spec: version: 30.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/backup/logical/examples/restored-druid.yaml.bak b/docs/guides/druid/backup/logical/examples/restored-druid.yaml.bak new file mode 100644 index 0000000000..ed4937b8a6 --- /dev/null +++ b/docs/guides/druid/backup/logical/examples/restored-druid.yaml.bak @@ -0,0 +1,17 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: restored-druid + namespace: demo +spec: + init: + waitForInitialRestore: true + version: 30.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/guides/druid/backup/logical/examples/sample-druid.yaml b/docs/guides/druid/backup/logical/examples/sample-druid.yaml index cca8bd8797..a107788960 100644 --- a/docs/guides/druid/backup/logical/examples/sample-druid.yaml +++ b/docs/guides/druid/backup/logical/examples/sample-druid.yaml @@ -7,8 +7,8 @@ spec: version: 30.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/backup/logical/examples/sample-druid.yaml.bak b/docs/guides/druid/backup/logical/examples/sample-druid.yaml.bak new file mode 100644 index 0000000000..cca8bd8797 --- /dev/null +++ b/docs/guides/druid/backup/logical/examples/sample-druid.yaml.bak @@ -0,0 +1,15 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: sample-druid + namespace: demo +spec: + version: 30.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/guides/druid/backup/logical/index.md b/docs/guides/druid/backup/logical/index.md index eb6e88118f..34e3512777 100644 --- a/docs/guides/druid/backup/logical/index.md +++ b/docs/guides/druid/backup/logical/index.md @@ -118,8 +118,8 @@ spec: version: 30.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 @@ -602,8 +602,8 @@ spec: version: 30.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/backup/logical/index.md.bak b/docs/guides/druid/backup/logical/index.md.bak new file mode 100644 index 0000000000..eb6e88118f --- /dev/null +++ b/docs/guides/druid/backup/logical/index.md.bak @@ -0,0 +1,750 @@ +--- +title: Backup & Restore Druid | KubeStash +description: Backup Druid database using KubeStash +menu: + docs_{{ .version }}: + identifier: guides-druid-backup-logical + name: Logical Backup + parent: guides-druid-backup + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +# Backup and Restore Druid database using KubeStash + +[KubeStash](https://kubestash.com) allows you to backup and restore `Druid` databases. Specifically backup of external dependency of `Druid` metadata storage (`MySQL` or `PostgreSQL`) is sufficient to restore `Druid` to its previous state if the deep storage is kept intact. KubeStash makes managing your `Druid` backups and restorations more straightforward and efficient. + +This guide will give you how you can take backup and restore your `Druid` databases using `Kubestash`. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using `Minikube` or `Kind`. +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md) and make sure to include the flags `--set global.featureGates.Druid=true` to ensure **Druid CRD** and `--set global.featureGates.ZooKeeper=true` to ensure **ZooKeeper CRD** as Druid depends on ZooKeeper for external dependency with helm command. +- Install `KubeStash` in your cluster following the steps [here](https://kubestash.com/docs/latest/setup/install/kubestash). +- Install KubeStash `kubectl` plugin following the steps [here](https://kubestash.com/docs/latest/setup/install/kubectl-plugin/). +- If you are not familiar with how KubeStash backup and restore Druid databases, please check the following guide [here](/docs/guides/druid/backup/overview/index.md). + +You should be familiar with the following `KubeStash` concepts: + +- [BackupStorage](https://kubestash.com/docs/latest/concepts/crds/backupstorage/) +- [BackupConfiguration](https://kubestash.com/docs/latest/concepts/crds/backupconfiguration/) +- [BackupSession](https://kubestash.com/docs/latest/concepts/crds/backupsession/) +- [RestoreSession](https://kubestash.com/docs/latest/concepts/crds/restoresession/) +- [Addon](https://kubestash.com/docs/latest/concepts/crds/addon/) +- [Function](https://kubestash.com/docs/latest/concepts/crds/function/) +- [Task](https://kubestash.com/docs/latest/concepts/crds/addon/#task-specification) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/guides/druid/backup/logical/examples](https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/logical/examples) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Backup Druid + +KubeStash supports backups for `Druid` instances with both type of metadata storage (`MySQL` and `PostgreSQL`). In this demonstration, we'll focus on a `Druid` database with a MySQL cluster. The backup and restore process is similar for `Druid` with `PostgreSQL` as metadata storage as well. + +This section will demonstrate how to backup a `Druid` database. Here, we are going to deploy a `Druid` database using KubeDB. Then, we are going to backup this database into a `GCS` bucket. Finally, we are going to restore the backup up data into another `Druid` database. + +## Deploy Sample Druid Database + + +**Create External Dependency (Deep Storage):** + +One of the external dependency of Druid is deep storage where the segments are stored. It is a storage mechanism that Apache Druid does not provide. **Amazon S3**, **Google Cloud Storage**, or **Azure Blob Storage**, **S3-compatible storage** (like **Minio**), or **HDFS** are generally convenient options for deep storage. + +In this tutorial, we will run a `minio-server` as deep storage in our local `kind` cluster using `minio-operator` and create a bucket named `druid` in it, which the deployed druid database will use. + +```bash + +$ helm repo add minio https://operator.min.io/ +$ helm repo update minio +$ helm upgrade --install --namespace "minio-operator" --create-namespace "minio-operator" minio/operator --set operator.replicaCount=1 + +$ helm upgrade --install --namespace "demo" --create-namespace druid-minio minio/tenant \ +--set tenant.pools[0].servers=1 \ +--set tenant.pools[0].volumesPerServer=1 \ +--set tenant.pools[0].size=1Gi \ +--set tenant.certificate.requestAutoCert=false \ +--set tenant.buckets[0].name="druid" \ +--set tenant.pools[0].name="default" + +``` + +Now we need to create a `Secret` named `deep-storage-config`. It contains the necessary connection information using which the druid database will connect to the deep storage. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: deep-storage-config + namespace: demo +stringData: + druid.storage.type: "s3" + druid.storage.bucket: "druid" + druid.storage.baseKey: "druid/segments" + druid.s3.accessKey: "minio" + druid.s3.secretKey: "minio123" + druid.s3.protocol: "http" + druid.s3.enablePathStyleAccess: "true" + druid.s3.endpoint.signingRegion: "us-east-1" + druid.s3.endpoint.url: "http://myminio-hl.demo.svc.cluster.local:9000/" +``` + +Let’s create the `deep-storage-config` Secret shown above: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/logical/examples/deep-storage-config.yaml +secret/deep-storage-config created +``` + +Let's deploy a sample `Druid` database and insert some data into it. + +**Create Druid CR:** + +Below is the YAML of a sample `Druid` CRD that we are going to create for this tutorial: + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-quickstart + namespace: demo +spec: + version: 30.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: WipeOut +``` + +Create the above `Druid` CR, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/logical/examples/sample-druid.yaml +druid.kubedb.com/sample-druid created +``` + +KubeDB will deploy a Druid database according to the above specification. It will also create the necessary `Secrets` and `Services` to access the database along with `MySQL` and `ZooKeeper` instance as druid dependencies. + +Let's check if the database is ready to use, + +```bash +$ kubectl get druids.kubedb.com -n demo +NAME TYPE VERSION STATUS AGE +sample-druid kubedb.com/v1alpha2 30.0.1 Ready 113s +``` + +The database is `Ready`. Verify that KubeDB has created the necessary `Secrets` and `Services` to access the database along with `MySQL` and `ZooKeeper` instance for this database using the following commands, + +```bash +$ kubectl get secret -n demo -l=app.kubernetes.io/instance=sample-druid +NAME TYPE DATA AGE +sample-druid-admin-cred kubernetes.io/basic-auth 2 48s + +$ kubectl get service -n demo -l=app.kubernetes.io/instance=sample-druid +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +sample-druid-brokers ClusterIP 10.128.189.77 8082/TCP 72s +sample-druid-coordinators ClusterIP 10.128.175.228 8081/TCP 72s +sample-druid-pods ClusterIP None 8081/TCP,8090/TCP,8083/TCP,8091/TCP,8082/TCP,8888/TCP 72s +sample-druid-routers ClusterIP 10.128.95.51 8888/TCP 72s +``` + +Here, we have to use service `sample-druid-routers` and secret `sample-druid-admin-cred` to connect with the database. `KubeDB` creates an [AppBinding](/docs/guides/druid/concepts/appbinding.md) CR that holds the necessary information to connect with the database. + +**Verify AppBinding:** + +Verify that the `AppBinding` has been created successfully using the following command, + +```bash +$ kubectl get appbindings -n demo +NAME TYPE VERSION AGE +sample-druid kubedb.com/druid 30.0.1 2m26s +sample-druid-mysql-metadata kubedb.com/mysql 9.1.0 5m40s +sample-druid-zk kubedb.com/zookeeper 3.7.2 5m43s +``` + +Let's check the YAML of the above `AppBinding`, + +```bash +$ kubectl get appbindings -n demo sample-druid -o yaml +``` + +```yaml +apiVersion: appcatalog.appscode.com/v1alpha1 +kind: AppBinding +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"kubedb.com/v1alpha2","kind":"Druid","metadata":{"annotations":{},"name":"sample-druid","namespace":"demo"},"spec":{"deepStorage":{"configSecret":{"name":"deep-storage-config"},"type":"s3"},"deletionPolicy":"WipeOut","topology":{"routers":{"replicas":1}},"version":"30.0.1"}} + creationTimestamp: "2024-09-17T12:17:27Z" + generation: 1 + labels: + app.kubernetes.io/component: database + app.kubernetes.io/instance: sample-druid + app.kubernetes.io/managed-by: kubedb.com + app.kubernetes.io/name: druids.kubedb.com + name: sample-druid + namespace: demo + ownerReferences: + - apiVersion: kubedb.com/v1alpha2 + blockOwnerDeletion: true + controller: true + kind: Druid + name: sample-druid + uid: aab70ef0-ff00-437d-be91-68438513552e + resourceVersion: "1372134" + uid: a45b6562-aa0b-4dba-8e6b-139cfc33beb6 +spec: + appRef: + apiGroup: kubedb.com + kind: Druid + name: sample-druid + namespace: demo + clientConfig: + service: + name: sample-druid-pods + port: 8888 + scheme: http + url: http://sample-druid-coordinators-0.sample-druid-pods.demo.svc.cluster.local:8081,http://sample-druid-overlords-0.sample-druid-pods.demo.svc.cluster.local:8090,http://sample-druid-middlemanagers-0.sample-druid-pods.demo.svc.cluster.local:8091,http://sample-druid-historicals-0.sample-druid-pods.demo.svc.cluster.local:8083,http://sample-druid-brokers-0.sample-druid-pods.demo.svc.cluster.local:8082,http://sample-druid-routers-0.sample-druid-pods.demo.svc.cluster.local:8888 + secret: + name: sample-druid-admin-cred + type: kubedb.com/druid + version: 30.0.1 +``` + +KubeStash uses the `AppBinding` CR to connect with the target database. It requires the following two fields to set in AppBinding's `.spec` section. + +- `.spec.clientConfig.service.name` specifies the name of the Service that connects to the database. +- `.spec.secret` specifies the name of the Secret that holds necessary credentials to access the database. +- `spec.type` specifies the types of the app that this AppBinding is pointing to. KubeDB generated AppBinding follows the following format: `/`. + +**Insert Sample Data:** + +We can access the [web console](https://druid.apache.org/docs/latest/operations/web-console) of Druid database from any browser by port-forwarding the routers. Let’s port-forward the port `8888` to local machine: +```bash +kubectl port-forward -n demo svc/sample-druid-routers 8888 +Forwarding from 127.0.0.1:8888 -> 8888 +Forwarding from [::1]:8888 -> 8888 +``` + +Now hit the `http://localhost:8888` from any browser, and you will be prompted to provide the credential of the druid database. By following the steps discussed below, you can get the credential generated by the KubeDB operator for your Druid database. + +**Connection information:** + +- Username: + + ```bash + $ kubectl get secret -n demo sample-druid-admin-cred -o jsonpath='{.data.username}' | base64 -d + admin + ``` + +- Password: + + ```bash + $ kubectl get secret -n demo sample-druid-admin-cred -o jsonpath='{.data.password}' | base64 -d + DqG5E63NtklAkxqC + ``` + +After providing the credentials correctly, you should be able to access the web console like shown below. + +

+  lifecycle +

+ +Now select the `Load Data` option and then select `Batch - classic` from the drop-down menu. +

+  lifecycle +

+ +Select `Example data` and click `Load example` to insert the example `Wikipedia Edits` datasource. + +

+  lifecycle +

+ +After clicking `Next` multiple times, click `Submit` + +

+  lifecycle +

+ +Within a minute status of the ingestion task should become `SUCCESS` +

+  lifecycle +

+ +Now, we are ready to backup the database. + +### Prepare Backend + +We are going to store our backed up data into a GCS bucket. We have to create a Secret with necessary credentials and a `BackupStorage` CR to use this backend. If you want to use a different backend, please read the respective backend configuration doc from [here](https://kubestash.com/docs/latest/guides/backends/overview/). + +**Create Secret:** + +Let's create a secret called `gcs-secret` with access credentials to our desired GCS bucket, + +```bash +$ echo -n '' > GOOGLE_PROJECT_ID +$ cat /path/to/downloaded-sa-key.json > GOOGLE_SERVICE_ACCOUNT_JSON_KEY +$ kubectl create secret generic -n demo gcs-secret \ + --from-file=./GOOGLE_PROJECT_ID \ + --from-file=./GOOGLE_SERVICE_ACCOUNT_JSON_KEY +secret/gcs-secret created +``` + +**Create BackupStorage:** + +Now, create a `BackupStorage` using this secret. Below is the YAML of `BackupStorage` CR we are going to create, + +```yaml +apiVersion: storage.kubestash.com/v1alpha1 +kind: BackupStorage +metadata: + name: gcs-storage + namespace: demo +spec: + storage: + provider: gcs + gcs: + bucket: kubestash-qa + prefix: demo + secretName: gcs-secret + usagePolicy: + allowedNamespaces: + from: All + default: true + deletionPolicy: Delete +``` + +Let's create the BackupStorage we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/logical/examples/backupstorage.yaml +backupstorage.storage.kubestash.com/gcs-storage created +``` + +Now, we are ready to backup our database to our desired backend. + +**Create RetentionPolicy:** + +Now, let's create a `RetentionPolicy` to specify how the old Snapshots should be cleaned up. + +Below is the YAML of the `RetentionPolicy` object that we are going to create, + +```yaml +apiVersion: storage.kubestash.com/v1alpha1 +kind: RetentionPolicy +metadata: + name: demo-retention + namespace: demo +spec: + default: true + failedSnapshots: + last: 2 + maxRetentionPeriod: 2mo + successfulSnapshots: + last: 5 + usagePolicy: + allowedNamespaces: + from: All +``` + +Let’s create the above `RetentionPolicy`, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/logical/examples/retentionpolicy.yaml +retentionpolicy.storage.kubestash.com/demo-retention created +``` + +### Backup + +We have to create a `BackupConfiguration` targeting respective `sample-druid` Druid database. Then, KubeStash will create a `CronJob` for each session to take periodic backup of that database. + +At first, we need to create a secret with a Restic password for backup data encryption. + +**Create Secret:** + +Let's create a secret called `encrypt-secret` with the Restic password, + +```bash +$ echo -n 'changeit' > RESTIC_PASSWORD +$ kubectl create secret generic -n demo encrypt-secret \ + --from-file=./RESTIC_PASSWORD \ +secret "encrypt-secret" created +``` + +**Create BackupConfiguration:** + +Below is the YAML for `BackupConfiguration` CR to backup the `sample-druid` database that we have deployed earlier, + +```yaml +apiVersion: core.kubestash.com/v1alpha1 +kind: BackupConfiguration +metadata: + name: sample-druid-backup + namespace: demo +spec: + target: + apiGroup: kubedb.com + kind: Druid + namespace: demo + name: sample-druid + backends: + - name: gcs-backend + storageRef: + namespace: demo + name: gcs-storage + retentionPolicy: + name: demo-retention + namespace: demo + sessions: + - name: frequent-backup + scheduler: + schedule: "*/5 * * * *" + jobTemplate: + backoffLimit: 1 + repositories: + - name: gcs-druid-repo + backend: gcs-backend + directory: /druid + encryptionSecret: + name: encrypt-secret + namespace: demo + addon: + name: druid-addon + tasks: + - name: mysql-metadata-storage-backup +``` +- `.spec.sessions[*].schedule` specifies that we want to backup the database at `5 minutes` interval. +- `.spec.target` refers to the targeted `sample-druid` Druid database that we created earlier. + +> **Note**: To create `BackupConfiguration` for druid with `PostgreSQL` as metadata storage just update `spec.sessions[*].addon.tasks.name` to `pg-metadata-storage-backup` + +Let's create the `BackupConfiguration` CR that we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/logical/examples/backupconfiguration.yaml +backupconfiguration.core.kubestash.com/sample-druid-backup created +``` + +**Verify Backup Setup Successful** + +If everything goes well, the phase of the `BackupConfiguration` should be `Ready`. The `Ready` phase indicates that the backup setup is successful. Let's verify the `Phase` of the BackupConfiguration, + +```bash +$ kubectl get backupconfiguration -n demo +NAME PHASE PAUSED AGE +sample-druid-backup Ready 2m50s +``` + +Additionally, we can verify that the `Repository` specified in the `BackupConfiguration` has been created using the following command, + +```bash +$ kubectl get repo -n demo +NAME INTEGRITY SNAPSHOT-COUNT SIZE PHASE LAST-SUCCESSFUL-BACKUP AGE +gcs-druid-repo true 1 712.822 KiB Ready 5m 4m +``` + +KubeStash keeps the backup for `Repository` YAMLs. If we navigate to the GCS bucket, we will see the `Repository` YAML stored in the `demo/druid` directory. + +**Verify CronJob:** + +It will also create a `CronJob` with the schedule specified in `spec.sessions[*].scheduler.schedule` field of `BackupConfiguration` CR. + +Verify that the `CronJob` has been created using the following command, + +```bash +$ kubectl get cronjob -n demo +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +trigger-sample-druid-backup-frequent-backup */5 * * * * 0 2m45s 3m25s +``` + +**Verify BackupSession:** + +KubeStash triggers an instant backup as soon as the `BackupConfiguration` is ready. After that, backups are scheduled according to the specified schedule. + +```bash +$ kubectl get backupsession -n demo -w + +NAME INVOKER-TYPE INVOKER-NAME PHASE DURATION AGE +sample-druid-backup-frequent-backup-1724065200 BackupConfiguration sample-druid-backup Succeeded 7m22s +``` + +We can see from the above output that the backup session has succeeded. Now, we are going to verify whether the backed up data has been stored in the backend. + +**Verify Backup:** + +Once a backup is complete, KubeStash will update the respective `Repository` CR to reflect the backup. Check that the repository `sample-druid-backup` has been updated by the following command, + +```bash +$ kubectl get repository -n demo sample-druid-backup +NAME INTEGRITY SNAPSHOT-COUNT SIZE PHASE LAST-SUCCESSFUL-BACKUP AGE +sample-druid-backup true 1 806 B Ready 8m27s 9m18s +``` + +At this moment we have one `Snapshot`. Run the following command to check the respective `Snapshot` which represents the state of a backup run for an application. + +```bash +$ kubectl get snapshots -n demo -l=kubestash.com/repo-name=gcs-druid-repo +NAME REPOSITORY SESSION SNAPSHOT-TIME DELETION-POLICY PHASE AGE +gcs-druid-repo-sample-druid-backup-frequent-backup-1726656835 gcs-druid-repo frequent-backup 2024-09-18T10:54:07Z Delete Succeeded 11m +``` + +> **Note**: KubeStash creates a `Snapshot` with the following labels: +> - `kubestash.com/app-ref-kind: ` +> - `kubestash.com/app-ref-name: ` +> - `kubestash.com/app-ref-namespace: ` +> - `kubestash.com/repo-name: ` +> +> These labels can be used to watch only the `Snapshot`s related to our target Database or `Repository`. + +If we check the YAML of the `Snapshot`, we can find the information about the backed up components of the Database. + +```bash +$ kubectl get snapshots -n demo gcs-druid-repo-sample-druid-backup-frequent-backup-1724065200 -oyaml +``` + +```yaml +$ kubectl get snapshots -n demo gcs-druid-repo-sample-druid-backup-frequent-backup-1726656835 -oyaml +``` +``` +apiVersion: storage.kubestash.com/v1alpha1 +kind: Snapshot +metadata: + creationTimestamp: "2024-09-18T10:54:07Z" + finalizers: + - kubestash.com/cleanup + generation: 1 + labels: + kubestash.com/app-ref-kind: Druid + kubestash.com/app-ref-name: sample-druid + kubestash.com/app-ref-namespace: demo + kubestash.com/repo-name: gcs-druid-repo + annotations: + kubedb.com/db-version: 30.0.1 + name: gcs-druid-repo-sample-druid-backup-frequent-backup-1726656835 + namespace: demo + ownerReferences: + - apiVersion: storage.kubestash.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Repository + name: gcs-druid-repo + uid: 7656c292-4d59-4503-8462-5601823fc531 + resourceVersion: "1477854" + uid: 9a3bbb73-ae71-4fb4-a99b-72af62a95011 +spec: + appRef: + apiGroup: kubedb.com + kind: Druid + name: sample-druid + namespace: demo + backupSession: sample-druid-backup-frequent-backup-1726656835 + deletionPolicy: Delete + repository: gcs-druid-repo + session: frequent-backup + snapshotID: 01J82C980JHJ869SQYMGCH3S44 + type: FullBackup + version: v1 +status: + components: + dump: + driver: Restic + duration: 6.897377973s + integrity: true + path: repository/v1/frequent-backup/dump + phase: Succeeded + resticStats: + - hostPath: dumpfile.sql + id: d10ab158ce2667d03b08cb35573a6f049a2cef9ef2e96be847caed6660bbb904 + size: 4.322 MiB + uploaded: 4.323 MiB + size: 712.824 KiB + ... +``` + +> KubeStash uses the `mysqldump`/`postgresdump` command to take backups of the target metadata storage of Druid databases. Therefore, the component name for logical backups is set as `dump`. + +Now, if we navigate to the GCS bucket, we will see the backed up data stored in the `demo/druid/repository/v1/frequent-backup/dump` directory. KubeStash also keeps the backup for `Snapshot` YAMLs, which can be found in the `demo/dep/snapshots` directory. + +> **Note**: KubeStash stores all dumped data encrypted in the backup directory, meaning it remains unreadable until decrypted. + +## Restore + +In this section, we are going to restore the database from the backup we have taken in the previous section. We are going to deploy a new database and initialize it from the backup. + +#### Deploy Restored Database: + +Now, we have to deploy the restored database similarly as we have deployed the original `sample-druid` database. However, this time there will be the following differences: + +- We are going to specify `.spec.init.waitForInitialRestore` field that tells KubeDB to wait for first restore to complete before marking this database is ready to use. + +Below is the YAML for `Druid` CRD we are going deploy to initialize from backup, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: restored-druid + namespace: demo +spec: + init: + waitForInitialRestore: true + version: 30.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: WipeOut +``` + +Let's create the above database, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/logical/examples/restored-druid.yaml +druid.kubedb.com/restored-druid created +``` + +If you check the database status, you will see it is stuck in `Provisioning` state. + +```bash +$ kubectl get druid -n demo restored-druid +NAME TYPE VERSION STATUS AGE +restored-druid kubedb.com/v1alpha2 30.0.1 Provisioning 22s +``` + +#### Create RestoreSession: + +Now, we need to create a RestoreSession CRD pointing to targeted `Druid` database. + +Below, is the contents of YAML file of the `RestoreSession` object that we are going to create to restore backed up data into the newly created database provisioned by Druid object named `restored-druid`. + +```yaml +apiVersion: core.kubestash.com/v1alpha1 +kind: RestoreSession +metadata: + name: restore-sample-druid + namespace: demo +spec: + target: + apiGroup: kubedb.com + kind: Druid + name: restored-druid + namespace: demo + dataSource: + snapshot: latest + repository: gcs-druid-repo + encryptionSecret: + name: encrypt-secret + namespace: demo + addon: + name: druid-addon + tasks: + - name: mysql-metadata-storage-restore +``` + +Here, + +- `.spec.target` refers to the newly created `restored-druid` Druid object to where we want to restore backup data. +- `.spec.dataSource.repository` specifies the Repository object that holds the backed up data. +- `.spec.dataSource.snapshot` specifies to restore from latest `Snapshot`. + +> **Note**: To create `RestoreSession` for druid with `PostgreSQL` as metadata storage just update `spec.addon.tasks.name` to `postgres-metadata-storage-restore` + +Let's create the RestoreSession CRD object we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/backup/logical/examples/restoresession.yaml +restoresession.core.kubestash.com/sample-druid-restore created +``` + +Once, you have created the `RestoreSession` object, KubeStash will create restore Job. Run the following command to watch the phase of the `RestoreSession` object, + +```bash +$ watch kubectl get restoresession -n demo +Every 2.0s: kubectl get restores... AppsCode-PC-03: Wed Aug 21 10:44:05 2024 + +NAME REPOSITORY FAILURE-POLICY PHASE DURATION AGE +sample-restore gcs-demo-repo Succeeded 3s 53s +``` + +The `Succeeded` phase means that the restore process has been completed successfully. + + +#### Verify Restored Data: + +In this section, we are going to verify whether the desired data has been restored successfully. We are going to connect to the database server and check whether the database and the table we created earlier in the original database are restored. + +At first, check if the database has gone into `Ready` state by the following command, + +```bash +$ kubectl get druid -n demo restored-druid +NAME VERSION STATUS AGE +restored-druid 8.2.0 Ready 34m +``` + +Now, let's verify if our datasource `wikipedia` exists or not. For that, first find out the database `Sevices` by the following command, + +Now access the [web console](https://druid.apache.org/docs/latest/operations/web-console) of Druid database from any browser by port-forwarding the routers. Let’s port-forward the port `8888` to local machine: +```bash +$ kubectl get svc -n demo --selector="app.kubernetes.io/instance=restored-druid" +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +restored-druid-brokers ClusterIP 10.128.74.54 8082/TCP 10m +restored-druid-coordinators ClusterIP 10.128.30.124 8081/TCP 10m +restored-druid-pods ClusterIP None 8081/TCP,8090/TCP,8083/TCP,8091/TCP,8082/TCP,8888/TCP 10m +restored-druid-routers ClusterIP 10.128.228.193 8888/TCP 10m +``` +```bash +kubectl port-forward -n demo svc/sample-druid-routers 8888 +Forwarding from 127.0.0.1:8888 -> 8888 +Forwarding from [::1]:8888 -> 8888 +``` + +Then hit the `http://localhost:8888` from any browser, and you will be prompted to provide the credential of the druid database. By following the steps discussed below, you can get the credential generated by the KubeDB operator for your Druid database. +**Connection information:** +- Username: + + ```bash + $ kubectl get secret -n demo sample-druid-admin-cred -o jsonpath='{.data.username}' | base64 -d + admin + ``` + +- Password: + + ```bash + $ kubectl get secret -n demo sample-druid-admin-cred -o jsonpath='{.data.password}' | base64 -d + DqG5E63NtklAkxqC + ``` +After providing the credentials correctly, you should be able to access the web console like shown below. Now if you go to the `Datasources` section, you will see that our ingested datasource `wikipedia` exists in the list. +

+  lifecycle +

+ +So, from the above screenshot, we can see that the `wikipedia` datasource we have ingested earlier in the original database and now, it is restored successfully. + +## Cleanup + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete backupconfigurations.core.kubestash.com -n demo sample-druid-backup +kubectl delete restoresessions.core.kubestash.com -n demo restore-sample-druid +kubectl delete retentionpolicies.storage.kubestash.com -n demo demo-retention +kubectl delete backupstorage -n demo gcs-storage +kubectl delete secret -n demo gcs-secret +kubectl delete secret -n demo encrypt-secret +kubectl delete druid -n demo restored-druid +kubectl delete druid -n demo sample-druid +``` \ No newline at end of file diff --git a/docs/guides/druid/concepts/druid.md b/docs/guides/druid/concepts/druid.md index 9ededa0456..9f29c7bac6 100644 --- a/docs/guides/druid/concepts/druid.md +++ b/docs/guides/druid/concepts/druid.md @@ -45,8 +45,8 @@ spec: authSecret: kind: Secret name: druid-admin-cred - configSecret: - name: druid-custom-config + configuration: + secretName: druid-custom-config enableSSL: true healthChecker: failureThreshold: 3 @@ -228,9 +228,9 @@ type: Opaque Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). -### spec.configSecret +### spec.configuration -`spec.configSecret` is an optional field that points to a Secret used to hold custom Druid configuration. If not set, KubeDB operator will use default configuration for Druid. +`spec.configuration` is an optional field that points to a Secret used to hold custom Druid configuration. If not set, KubeDB operator will use default configuration for Druid. ### spec.topology diff --git a/docs/guides/druid/concepts/druid.md.bak b/docs/guides/druid/concepts/druid.md.bak new file mode 100644 index 0000000000..7b6e96228c --- /dev/null +++ b/docs/guides/druid/concepts/druid.md.bak @@ -0,0 +1,535 @@ +--- +title: Druid CRD +menu: + docs_{{ .version }}: + identifier: guides-druid-concepts-druid + name: Druid + parent: guides-druid-concepts + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Druid + +## What is Druid + +`Druid` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for [Druid](https://druid.apache.org/) in a Kubernetes native way. You only need to describe the desired database configuration in a `Druid`object, and the KubeDB operator will create Kubernetes objects in the desired state for you. + +## Druid Spec + +As with all other Kubernetes objects, a Druid needs `apiVersion`, `kind`, and `metadata` fields. It also needs a `.spec` section. Below is an example Druid object. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid + namespace: demo +spec: + deepStorage: + type: s3 + configuration: + secretName: deep-storage-config + metadataStorage: + type: PostgreSQL + name: pg-demo + namespace: demo + externallyManaged: true + zookeeperRef: + name: zk-demo + namespace: demo + externallyManaged: true + authSecret: + kind: Secret + name: druid-admin-cred + configuration: + secretName: druid-custom-config + enableSSL: true + healthChecker: + failureThreshold: 3 + periodSeconds: 20 + timeoutSeconds: 10 + keystoreCredSecret: + kind: Secret + name: druid-keystore-cred + deletionPolicy: DoNotTerminate + tls: + certificates: + - alias: server + secretName: druid-server-cert + - alias: client + secretName: druid-client-cert + issuerRef: + apiGroup: cert-manager.io + kind: Issuer + name: druid-ca-issuer + topology: + coordinators: + podTemplate: + spec: + containers: + - name: druid + resources: + requests: + cpu: 500m + memory: 1024Mi + limits: + cpu: 700m + memory: 2Gi + overlords: + podTemplate: + spec: + containers: + - name: druid + resources: + requests: + cpu: 500m + memory: 1024Mi + limits: + cpu: 700m + memory: 2Gi + brokers: + podTemplate: + spec: + containers: + - name: druid + resources: + requests: + cpu: 500m + memory: 1024Mi + limits: + cpu: 700m + memory: 2Gi + routers: + podTemplate: + spec: + containers: + - name: druid + resources: + requests: + cpu: 500m + memory: 1024Mi + limits: + cpu: 700m + memory: 2Gi + middleManagers: + podTemplate: + spec: + containers: + - name: druid + resources: + requests: + cpu: 500m + memory: 1024Mi + limits: + cpu: 700m + memory: 2Gi + storageType: Durable + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: standard + historicals: + podTemplate: + spec: + containers: + - name: druid + resources: + requests: + cpu: 500m + memory: 1024Mi + limits: + cpu: 700m + memory: 2Gi + storageType: Durable + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: standard + monitor: + agent: prometheus.io/operator + prometheus: + exporter: + port: 56790 + serviceMonitor: + labels: + release: prometheus + interval: 10s + version: 30.0.1 +``` + +### spec.version + +`spec.version` is a required field specifying the name of the [DruidVersion](/docs/guides/druid/concepts/druidversion.md) crd where the docker images are specified. Currently, when you install KubeDB, it creates the following `Druid` resources, + +- `28.0.1` +- `30.0.1` +- `31.0.1` + +### spec.replicas + +`spec.replicas` the number of members in Druid replicaset. + +If `spec.topology` is set, then `spec.replicas` needs to be empty. Instead use `spec.topology.controller.replicas` and `spec.topology.broker.replicas`. You need to set both of them for topology clustering. + +KubeDB uses `PodDisruptionBudget` to ensure that majority of these replicas are available during [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions) so that quorum is maintained. + +### spec.authSecret + +`spec.authSecret` is an optional field that points to a Secret used to hold credentials for `druid` admin user. If not set, KubeDB operator creates a new Secret `{druid-object-name}-auth` for storing the password for `admin` user for each Druid object. + +We can use this field in 3 mode. +1. Using an external secret. In this case, You need to create an auth secret first with required fields, then specify the secret name when creating the Druid object using `spec.authSecret.name` & set `spec.authSecret.externallyManaged` to true. +```yaml +authSecret: + name: + externallyManaged: true +``` + +2. Specifying the secret name only. In this case, You need to specify the secret name when creating the Druid object using `spec.authSecret.name`. `externallyManaged` is by default false. +```yaml +authSecret: + name: +``` + +3. Let KubeDB do everything for you. In this case, no work for you. + +AuthSecret contains a `user` key and a `password` key which contains the `username` and `password` respectively for Druid `admin` user. + +Example: + +```bash +$ kubectl create secret generic druid-auth -n demo \ +--from-literal=username=jhon-doe \ +--from-literal=password=6q8u_2jMOW-OOZXk +secret "druid-auth" created +``` + +```yaml +apiVersion: v1 +data: + password: NnE4dV8yak1PVy1PT1pYaw== + username: amhvbi1kb2U= +kind: Secret +metadata: + name: druid-auth + namespace: demo +type: Opaque +``` + +Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). + +### spec.configSecret + +`spec.configSecret` is an optional field that points to a Secret used to hold custom Druid configuration. If not set, KubeDB operator will use default configuration for Druid. + +### spec.topology + +`spec.topology` represents the topology configuration for Druid cluster in KRaft mode. + +When `spec.topology` is set, the following fields needs to be empty, otherwise validating webhook will throw error. + +- `spec.replicas` +- `spec.podTemplate` +- `spec.storage` + +#### spec.topology.coordinators + +`coordinators` represents configuration for coordinators node of Druid. It is a mandatory node. So, if not mentioned in the `YAML`, this node will be initialized by `KubeDB` operator. + +Available configurable fields: + +- `topology.coordinators`: + - `replicas` (`: "1"`) - is an `optional` field to specify the number of nodes (ie. pods ) that act as the dedicated Druid `coordinators` pods. Defaults to `1`. + - `suffix` (`: "coordinators"`) - is an `optional` field that is added as the suffix of the coordinators PetSet name. Defaults to `coordinators`. + - `resources` (`: "cpu: 500m, memory: 1Gi" `) - is an `optional` field that specifies how much computational resources to request or to limit for each of the `coordinators` pods. + +#### spec.topology.overlords + +`overlords` represents configuration for overlords node of Druid. It is an optional node. So, it is only going to be deployed by the `KubeDB` operator if explicitly mentioned in the `YAML`. Otherwise, `coordinators` node will act as `overlords`. + +Available configurable fields: + +- `topology.overlords`: + - `replicas` (`: "1"`) - is an `optional` field to specify the number of nodes (ie. pods ) that act as the dedicated Druid `overlords` pods. Defaults to `1`. + - `suffix` (`: "overlords"`) - is an `optional` field that is added as the suffix of the overlords PetSet name. Defaults to `overlords`. + - `resources` (`: "cpu: 500m, memory: 1Gi" `) - is an `optional` field that specifies how much computational resources to request or to limit for each of the `overlords` pods. + +#### spec.topology.brokers + +`brokers` represents configuration for brokers node of Druid. It is a mandatory node. So, if not mentioned in the `YAML`, this node will be initialized by `KubeDB` operator. + +Available configurable fields: + +- `topology.brokers`: + - `replicas` (`: "1"`) - is an `optional` field to specify the number of nodes (ie. pods ) that act as the dedicated Druid `brokers` pods. Defaults to `1`. + - `suffix` (`: "brokers"`) - is an `optional` field that is added as the suffix of the brokers PetSet name. Defaults to `brokers`. + - `resources` (`: "cpu: 500m, memory: 1Gi" `) - is an `optional` field that specifies how much computational resources to request or to limit for each of the `brokers` pods. + +#### spec.topology.routers + +`routers` represents configuration for routers node of Druid. It is an optional node. So, it is only going to be deployed by the `KubeDB` operator if explicitly mentioned in the `YAML`. Otherwise, `coordinators` node will act as `routers`. + +Available configurable fields: + +- `topology.routers`: + - `replicas` (`: "1"`) - is an `optional` field to specify the number of nodes (ie. pods ) that act as the dedicated Druid `routers` pods. Defaults to `1`. + - `suffix` (`: "routers"`) - is an `optional` field that is added as the suffix of the routers PetSet name. Defaults to `routers`. + - `resources` (`: "cpu: 500m, memory: 1Gi" `) - is an `optional` field that specifies how much computational resources to request or to limit for each of the `routers` pods. + +#### spec.topology.historicals + +`historicals` represents configuration for historicals node of Druid. It is a mandatory node. So, if not mentioned in the `YAML`, this node will be initialized by `KubeDB` operator. + +Available configurable fields: + +- `topology.historicals`: + - `replicas` (`: "1"`) - is an `optional` field to specify the number of nodes (ie. pods ) that act as the dedicated Druid `historicals` pods. Defaults to `1`. + - `suffix` (`: "historicals"`) - is an `optional` field that is added as the suffix of the controller PetSet name. Defaults to `historicals`. + - `storage` is a `required` field that specifies how much storage to claim for each of the `historicals` pods. + - `resources` (`: "cpu: 500m, memory: 1Gi" `) - is an `optional` field that specifies how much computational resources to request or to limit for each of the `historicals` pods. + +#### spec.topology.middleManagers + +`middleManagers` represents configuration for middleManagers node of Druid. It is a mandatory node. So, if not mentioned in the `YAML`, this node will be initialized by `KubeDB` operator. + +Available configurable fields: + +- `topology.middleManagers`: + - `replicas` (`: "1"`) - is an `optional` field to specify the number of nodes (ie. pods ) that act as the dedicated Druid `middleManagers` pods. Defaults to `1`. + - `suffix` (`: "middleManagers"`) - is an `optional` field that is added as the suffix of the controller PetSet name. Defaults to `middleManagers`. + - `storage` is a `required` field that specifies how much storage to claim for each of the `middleManagers` pods. + - `resources` (`: "cpu: 500m, memory: 1Gi" `) - is an `optional` field that specifies how much computational resources to request or to limit for each of the `middleManagers` pods. + + +### spec.enableSSL + +`spec.enableSSL` is an `optional` field that specifies whether to enable TLS to HTTP layer. The default value of this field is `false`. + +```yaml +spec: + enableSSL: true +``` + +### spec.tls + +`spec.tls` specifies the TLS/SSL configurations. The KubeDB operator supports TLS management by using the [cert-manager](https://cert-manager.io/). Currently, the operator only supports the `PKCS#8` encoded certificates. + +```yaml +spec: + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: druid-issuer + certificates: + - alias: server + privateKey: + encoding: PKCS8 + secretName: druid-client-cert + subject: + organizations: + - kubedb + - alias: http + privateKey: + encoding: PKCS8 + secretName: druid-server-cert + subject: + organizations: + - kubedb +``` + +The `spec.tls` contains the following fields: + +- `tls.issuerRef` - is an `optional` field that references to the `Issuer` or `ClusterIssuer` custom resource object of [cert-manager](https://cert-manager.io/docs/concepts/issuer/). It is used to generate the necessary certificate secrets for Druid. If the `issuerRef` is not specified, the operator creates a self-signed CA and also creates necessary certificate (valid: 365 days) secrets using that CA. + - `apiGroup` - is the group name of the resource that is being referenced. Currently, the only supported value is `cert-manager.io`. + - `kind` - is the type of resource that is being referenced. The supported values are `Issuer` and `ClusterIssuer`. + - `name` - is the name of the resource ( `Issuer` or `ClusterIssuer` ) that is being referenced. + +- `tls.certificates` - is an `optional` field that specifies a list of certificate configurations used to configure the certificates. It has the following fields: + - `alias` - represents the identifier of the certificate. It has the following possible value: + - `server` - is used for the server certificate configuration. + - `client` - is used for the client certificate configuration. + + - `secretName` - ( `string` | `"-alias-cert"` ) - specifies the k8s secret name that holds the certificates. + + - `subject` - specifies an `X.509` distinguished name (DN). It has the following configurable fields: + - `organizations` ( `[]string` | `nil` ) - is a list of organization names. + - `organizationalUnits` ( `[]string` | `nil` ) - is a list of organization unit names. + - `countries` ( `[]string` | `nil` ) - is a list of country names (ie. Country Codes). + - `localities` ( `[]string` | `nil` ) - is a list of locality names. + - `provinces` ( `[]string` | `nil` ) - is a list of province names. + - `streetAddresses` ( `[]string` | `nil` ) - is a list of street addresses. + - `postalCodes` ( `[]string` | `nil` ) - is a list of postal codes. + - `serialNumber` ( `string` | `""` ) is a serial number. + + For more details, visit [here](https://golang.org/pkg/crypto/x509/pkix/#Name). + + - `duration` ( `string` | `""` ) - is the period during which the certificate is valid. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300m"`, `"1.5h"` or `"20h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + - `renewBefore` ( `string` | `""` ) - is a specifiable time before expiration duration. + - `dnsNames` ( `[]string` | `nil` ) - is a list of subject alt names. + - `ipAddresses` ( `[]string` | `nil` ) - is a list of IP addresses. + - `uris` ( `[]string` | `nil` ) - is a list of URI Subject Alternative Names. + - `emailAddresses` ( `[]string` | `nil` ) - is a list of email Subject Alternative Names. + + +### spec..storageType + +`spec.storageType` is an optional field that specifies the type of storage to use for database. It can be either `Durable` or `Ephemeral`. The default value of this field is `Durable`. If `Ephemeral` is used then KubeDB will create Druid cluster using [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) volume. + +### spec..storage + +If you set `spec..storageType:` to `Durable`, then `spec..storage` is a required field that specifies the StorageClass of PVCs dynamically allocated to store data for the database. This storage spec will be passed to the PetSet created by KubeDB operator to run database pods. You can specify any StorageClass available in your cluster with appropriate resource requests. + +- `spec..storage.storageClassName` is the name of the StorageClass used to provision PVCs. PVCs don’t necessarily have to request a class. A PVC with its storageClassName set equal to "" is always interpreted to be requesting a PV with no class, so it can only be bound to PVs with no class (no annotation or one set equal to ""). A PVC with no storageClassName is not quite the same and is treated differently by the cluster depending on whether the DefaultStorageClass admission plugin is turned on. +- `spec..storage.accessModes` uses the same conventions as Kubernetes PVCs when requesting storage with specific access modes. +- `spec..storage.resources` can be used to request specific quantities of storage. This follows the same resource model used by PVCs. + +To learn how to configure `spec..storage`, please visit the links below: + +- https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + +### spec.monitor + +Druid managed by KubeDB can be monitored with Prometheus operator out-of-the-box. To learn more, +- [Monitor Apache Druid with Prometheus operator](/docs/guides/druid/monitoring/using-prometheus-operator.md) +- [Monitor Apache Druid with Built-in Prometheus](/docs/guides/druid/monitoring/using-builtin-prometheus.md) + +### spec..podTemplate + +KubeDB allows providing a template for database pod through `spec..podTemplate`. KubeDB operator will pass the information provided in `spec..podTemplate` to the PetSet created for Druid cluster. + +KubeDB accept following fields to set in `spec..podTemplate:` + +- metadata: + - annotations (pod's annotation) + - labels (pod's labels) +- controller: + - annotations (petset's annotation) + - labels (petset's labels) +- spec: + - containers + - volumes + - podPlacementPolicy + - initContainers + - containers + - imagePullSecrets + - nodeSelector + - serviceAccountName + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + +You can check out the full list [here](https://github.com/kmodules/offshoot-api/blob/master/api/v2/types.go#L26C1-L279C1). +Uses of some field of `spec..podTemplate` is described below, + +#### spec..podTemplate.spec.tolerations + +The `spec.podTemplate.spec.tolerations` is an optional field. This can be used to specify the pod's tolerations. + +#### spec..podTemplate.spec.volumes + +The `spec..podTemplate..volumes` is an optional field. This can be used to provide the list of volumes that can be mounted by containers belonging to the pod. + +#### spec..podTemplate.spec.podPlacementPolicy + +`spec.podTemplate.spec.podPlacementPolicy` is an optional field. This can be used to provide the reference of the `podPlacementPolicy`. `name` of the podPlacementPolicy is referred under this attribute. This will be used by our Petset controller to place the db pods throughout the region, zone & nodes according to the policy. It utilizes kubernetes affinity & podTopologySpreadContraints feature to do so. +```yaml +spec: + podPlacementPolicy: + name: default +``` + +#### spec..podTemplate.spec.nodeSelector + +`spec..podTemplate.spec.nodeSelector` is an optional field that specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). To learn more, see [here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) . + +### spec.serviceTemplates + +You can also provide template for the services created by KubeDB operator for Druid cluster through `spec.serviceTemplates`. This will allow you to set the type and other properties of the services. + +KubeDB allows following fields to set in `spec.serviceTemplates`: +- `alias` represents the identifier of the service. It has the following possible value: + - `stats` for is used for the `exporter` service identification. + +Druid comes with four services for `coordinators`, `overlords`, `routers` and `brokers`. There are two options for providing serviceTemplates: + - To provide `serviceTemplates` for a specific service, the `serviceTemplates.ports.port` should be equal to the port of that service and `serviceTemplate` will be used for that particular service only. + - However, to provide a common `serviceTemplates`, `serviceTemplates.ports.port` should be empty. + +- metadata: + - labels + - annotations +- spec: + - type + - ports + - clusterIP + - externalIPs + - loadBalancerIP + - loadBalancerSourceRanges + - externalTrafficPolicy + - healthCheckNodePort + - sessionAffinityConfig + +See [here](https://github.com/kmodules/offshoot-api/blob/kubernetes-1.21.1/api/v1/types.go#L237) to understand these fields in detail. + + +#### spec..podTemplate.spec.containers + +The `spec..podTemplate.spec.containers` can be used to provide the list containers and their configurations for to the database pod. some of the fields are described below, + +##### spec..podTemplate.spec.containers[].name +The `spec..podTemplate.spec.containers[].name` field used to specify the name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + +##### spec..podTemplate.spec.containers[].args +`spec..podTemplate.spec.containers[].args` is an optional field. This can be used to provide additional arguments to database installation. + +##### spec..podTemplate.spec.containers[].env + +`spec..podTemplate.spec.containers[].env` is an optional field that specifies the environment variables to pass to the Redis containers. + +##### spec..podTemplate.spec.containers[].resources + +`spec..podTemplate.spec.containers[].resources` is an optional field. This can be used to request compute resources required by containers of the database pods. To learn more, visit [here](http://kubernetes.io/docs/user-guide/compute-resources/). + +### spec.deletionPolicy + +`deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `Druid` crd or which resources KubeDB should keep or delete when you delete `Druid` crd. KubeDB provides following four deletion policies: + +- DoNotTerminate +- WipeOut +- Halt +- Delete + +When `deletionPolicy` is `DoNotTerminate`, KubeDB takes advantage of `ValidationWebhook` feature in Kubernetes 1.9.0 or later clusters to implement `DoNotTerminate` feature. If admission webhook is enabled, `DoNotTerminate` prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. + +> For more details you can visit [here](https://appscode.com/blog/post/deletion-policy/) + +## spec.healthChecker +It defines the attributes for the health checker. +- `spec.healthChecker.periodSeconds` specifies how often to perform the health check. +- `spec.healthChecker.timeoutSeconds` specifies the number of seconds after which the probe times out. +- `spec.healthChecker.failureThreshold` specifies minimum consecutive failures for the healthChecker to be considered failed. +- `spec.healthChecker.disableWriteCheck` specifies whether to disable the writeCheck or not. + +Know details about KubeDB Health checking from this [blog post](https://appscode.com/blog/post/kubedb-health-checker/). + +## Next Steps + +- Learn how to use KubeDB to run Apache Druid cluster [here](/docs/guides/druid/README.md). +- Deploy [dedicated topology cluster](/docs/guides/druid/clustering/guide/index.md) for Apache Druid +- Monitor your Druid cluster with KubeDB using [`out-of-the-box` Prometheus operator](/docs/guides/druid/monitoring/using-prometheus-operator.md). +- Detail concepts of [DruidVersion object](/docs/guides/druid/concepts/druidversion.md). + +[//]: # (- Learn to use KubeDB managed Druid objects using [CLIs](/docs/guides/druid/cli/cli.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/druid/configuration/config-file/yamls/druid-with-config.yaml b/docs/guides/druid/configuration/config-file/yamls/druid-with-config.yaml index b2225f22b1..4d6dc601ec 100644 --- a/docs/guides/druid/configuration/config-file/yamls/druid-with-config.yaml +++ b/docs/guides/druid/configuration/config-file/yamls/druid-with-config.yaml @@ -5,12 +5,12 @@ metadata: namespace: demo spec: version: 28.0.1 - configSecret: - name: config-secret + configuration: + secretName: config-secret deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/configuration/config-file/yamls/druid-with-config.yaml.bak b/docs/guides/druid/configuration/config-file/yamls/druid-with-config.yaml.bak new file mode 100644 index 0000000000..b2225f22b1 --- /dev/null +++ b/docs/guides/druid/configuration/config-file/yamls/druid-with-config.yaml.bak @@ -0,0 +1,17 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-with-config + namespace: demo +spec: + version: 28.0.1 + configSecret: + name: config-secret + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: WipeOut diff --git a/docs/guides/druid/configuration/podtemplating/yamls/druid-cluster.yaml b/docs/guides/druid/configuration/podtemplating/yamls/druid-cluster.yaml index 2004002096..08e703c19f 100644 --- a/docs/guides/druid/configuration/podtemplating/yamls/druid-cluster.yaml +++ b/docs/guides/druid/configuration/podtemplating/yamls/druid-cluster.yaml @@ -5,12 +5,12 @@ metadata: namespace: demo spec: version: 28.0.1 - configSecret: - name: config-secret + configuration: + secretName: config-secret deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: coordinators: replicas: 1 diff --git a/docs/guides/druid/configuration/podtemplating/yamls/druid-cluster.yaml.bak b/docs/guides/druid/configuration/podtemplating/yamls/druid-cluster.yaml.bak new file mode 100644 index 0000000000..2004002096 --- /dev/null +++ b/docs/guides/druid/configuration/podtemplating/yamls/druid-cluster.yaml.bak @@ -0,0 +1,43 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-cluster + namespace: demo +spec: + version: 28.0.1 + configSecret: + name: config-secret + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + coordinators: + replicas: 1 + podTemplate: + spec: + containers: + - name: druid + resources: + limits: + memory: "2Gi" + cpu: "600m" + requests: + memory: "2Gi" + cpu: "600m" + brokers: + replicas: 1 + podTemplate: + spec: + containers: + - name: druid + resources: + limits: + memory: "2Gi" + cpu: "600m" + requests: + memory: "2Gi" + cpu: "600m" + routers: + replicas: 1 + deletionPolicy: WipeOut diff --git a/docs/guides/druid/configuration/podtemplating/yamls/druid-node-selector.yaml b/docs/guides/druid/configuration/podtemplating/yamls/druid-node-selector.yaml index 7ad2eae717..34a22d6fb7 100644 --- a/docs/guides/druid/configuration/podtemplating/yamls/druid-node-selector.yaml +++ b/docs/guides/druid/configuration/podtemplating/yamls/druid-node-selector.yaml @@ -7,8 +7,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/configuration/podtemplating/yamls/druid-node-selector.yaml.bak b/docs/guides/druid/configuration/podtemplating/yamls/druid-node-selector.yaml.bak new file mode 100644 index 0000000000..7ad2eae717 --- /dev/null +++ b/docs/guides/druid/configuration/podtemplating/yamls/druid-node-selector.yaml.bak @@ -0,0 +1,20 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-node-selector + namespace: demo +spec: + version: 28.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + coordinators: + podTemplate: + spec: + nodeSelector: + disktype: ssd + deletionPolicy: Delete \ No newline at end of file diff --git a/docs/guides/druid/configuration/podtemplating/yamls/druid-with-tolerations.yaml b/docs/guides/druid/configuration/podtemplating/yamls/druid-with-tolerations.yaml index 4ef158f85a..e4c085d07a 100644 --- a/docs/guides/druid/configuration/podtemplating/yamls/druid-with-tolerations.yaml +++ b/docs/guides/druid/configuration/podtemplating/yamls/druid-with-tolerations.yaml @@ -7,8 +7,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: podTemplate: diff --git a/docs/guides/druid/configuration/podtemplating/yamls/druid-with-tolerations.yaml.bak b/docs/guides/druid/configuration/podtemplating/yamls/druid-with-tolerations.yaml.bak new file mode 100644 index 0000000000..4ef158f85a --- /dev/null +++ b/docs/guides/druid/configuration/podtemplating/yamls/druid-with-tolerations.yaml.bak @@ -0,0 +1,58 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-cluster + namespace: demo +spec: + version: 28.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + podTemplate: + spec: + tolerations: + - key: "key1" + operator: "Equal" + value: "node1" + effect: "NoSchedule" + replicas: 1 + coordinators: + podTemplate: + spec: + tolerations: + - key: "key1" + operator: "Equal" + value: "node1" + effect: "NoSchedule" + replicas: 1 + brokers: + podTemplate: + spec: + tolerations: + - key: "key1" + operator: "Equal" + value: "node1" + effect: "NoSchedule" + replicas: 1 + historicals: + podTemplate: + spec: + tolerations: + - key: "key1" + operator: "Equal" + value: "node1" + effect: "NoSchedule" + replicas: 1 + middleManagers: + podTemplate: + spec: + tolerations: + - key: "key1" + operator: "Equal" + value: "node1" + effect: "NoSchedule" + replicas: 1 + deletionPolicy: Delete diff --git a/docs/guides/druid/configuration/podtemplating/yamls/druid-without-tolerations.yaml b/docs/guides/druid/configuration/podtemplating/yamls/druid-without-tolerations.yaml index 1098f3d70d..31e2345e72 100644 --- a/docs/guides/druid/configuration/podtemplating/yamls/druid-without-tolerations.yaml +++ b/docs/guides/druid/configuration/podtemplating/yamls/druid-without-tolerations.yaml @@ -7,8 +7,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/configuration/podtemplating/yamls/druid-without-tolerations.yaml.bak b/docs/guides/druid/configuration/podtemplating/yamls/druid-without-tolerations.yaml.bak new file mode 100644 index 0000000000..1098f3d70d --- /dev/null +++ b/docs/guides/druid/configuration/podtemplating/yamls/druid-without-tolerations.yaml.bak @@ -0,0 +1,15 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-without-tolerations + namespace: demo +spec: + version: 28.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: Delete diff --git a/docs/guides/druid/monitoring/using-builtin-prometheus.md b/docs/guides/druid/monitoring/using-builtin-prometheus.md index 69ec2aa62e..027748ad76 100644 --- a/docs/guides/druid/monitoring/using-builtin-prometheus.md +++ b/docs/guides/druid/monitoring/using-builtin-prometheus.md @@ -52,8 +52,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/monitoring/using-builtin-prometheus.md.bak b/docs/guides/druid/monitoring/using-builtin-prometheus.md.bak new file mode 100644 index 0000000000..69ec2aa62e --- /dev/null +++ b/docs/guides/druid/monitoring/using-builtin-prometheus.md.bak @@ -0,0 +1,372 @@ +--- +title: Monitor Druid using Builtin Prometheus Discovery +menu: + docs_{{ .version }}: + identifier: guides-druid-monitoring-builtin-monitoring + name: Builtin Prometheus + parent: guides-druid-monitoring + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Monitoring Druid with builtin Prometheus + +This tutorial will show you how to monitor Druid cluster using builtin [Prometheus](https://github.com/prometheus/prometheus) scraper. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- If you are not familiar with how to configure Prometheus to scrape metrics from various Kubernetes resources, please read the tutorial from [here](https://github.com/appscode/third-party-tools/tree/master/monitoring/prometheus/builtin). + +- To learn how Prometheus monitoring works with KubeDB in general, please visit [here](/docs/guides/druid/monitoring/overview.md). + +- To keep Prometheus resources isolated, we are going to use a separate namespace called `monitoring` to deploy respective monitoring resources. We are going to deploy database in `demo` namespace. + + ```bash + $ kubectl create ns monitoring + namespace/monitoring created + + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/druid](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/druid) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Deploy Druid with Monitoring Enabled + +At first, let's deploy a Druid cluster with monitoring enabled. Below is the Druid object that we are going to create. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-with-monitoring + namespace: demo +spec: + version: 28.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + monitor: + agent: prometheus.io/builtin + prometheus: + exporter: + port: 56790 + serviceMonitor: + labels: + release: prometheus + interval: 10s + deletionPolicy: WipeOut +``` + +Here, + +- `spec.monitor.agent: prometheus.io/builtin` specifies that we are going to monitor this server using builtin Prometheus scraper. +- `spec.monitor.prometheus.exporter.port: 56790` specifies the port where the exporter is running. + +Let's create the Druid crd we have shown above. + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/monitoring/yamls/druid-builtin-monitoring.yaml +druid.kubedb.com/druid-with-monitoring created +``` + +Now, wait for the cluster to go into `Ready` state. + +```bash +NAME TYPE VERSION STATUS AGE +druid-with-monitoring kubedb.com/v1alpha2 28.0.1 Ready 31s +``` + +KubeDB will create a separate stats service with name `{Druid crd name}-stats` for monitoring purpose. + +```bash +$ kubectl get svc -n demo --selector="app.kubernetes.io/instance=druid-with-monitoring" +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +druid-with-monitoring-brokers ClusterIP 10.96.28.252 8082/TCP 2m13s +druid-with-monitoring-coordinators ClusterIP 10.96.52.186 8081/TCP 2m13s +druid-with-monitoring-pods ClusterIP None 8081/TCP,8090/TCP,8083/TCP,8091/TCP,8082/TCP,8888/TCP 2m13s +druid-with-monitoring-routers ClusterIP 10.96.134.202 8888/TCP 2m13s +druid-with-monitoring-stats ClusterIP 10.96.222.96 56790/TCP 2m13s +``` + +Here, `druid-with-monitoring-stats` service has been created for monitoring purpose. Let's describe the service. + +```bash +$ kubectl describe svc -n druid-demo builtin-prom-stats +Name: druid-with-monitoring-stats +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=druid-with-monitoring + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=druids.kubedb.com + kubedb.com/role=stats +Annotations: monitoring.appscode.com/agent: prometheus.io/builtin + prometheus.io/path: /metrics + prometheus.io/port: 56790 + prometheus.io/scrape: true +Selector: app.kubernetes.io/instance=druid-with-monitoring,app.kubernetes.io/managed-by=kubedb.com,app.kubernetes.io/name=druids.kubedb.com +Type: ClusterIP +IP Family Policy: SingleStack +IP Families: IPv4 +IP: 10.96.222.96 +IPs: 10.96.222.96 +Port: metrics 56790/TCP +TargetPort: metrics/TCP +Endpoints: 10.244.0.31:56790,10.244.0.33:56790 +Session Affinity: None +Events: +``` + +You can see that the service contains following annotations. + +```bash +prometheus.io/path: /metrics +prometheus.io/port: 56790 +prometheus.io/scrape: true +``` + +The Prometheus server will discover the service endpoint using these specifications and will scrape metrics from the exporter. + +## Configure Prometheus Server + +Now, we have to configure a Prometheus scraping job to scrape the metrics using this service. We are going to configure scraping job similar to this [kubernetes-service-endpoints](https://github.com/appscode/third-party-tools/tree/master/monitoring/prometheus/builtin#kubernetes-service-endpoints) job that scrapes metrics from endpoints of a service. + +Let's configure a Prometheus scraping job to collect metrics from this service. + +```yaml +- job_name: 'kubedb-databases' + honor_labels: true + scheme: http + kubernetes_sd_configs: + - role: endpoints + # by default Prometheus server select all Kubernetes services as possible target. + # relabel_config is used to filter only desired endpoints + relabel_configs: + # keep only those services that has "prometheus.io/scrape","prometheus.io/path" and "prometheus.io/port" anootations + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape, __meta_kubernetes_service_annotation_prometheus_io_port] + separator: ; + regex: true;(.*) + action: keep + # currently KubeDB supported databases uses only "http" scheme to export metrics. so, drop any service that uses "https" scheme. + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: drop + regex: https + # only keep the stats services created by KubeDB for monitoring purpose which has "-stats" suffix + - source_labels: [__meta_kubernetes_service_name] + separator: ; + regex: (.*-stats) + action: keep + # service created by KubeDB will have "app.kubernetes.io/name" and "app.kubernetes.io/instance" annotations. keep only those services that have these annotations. + - source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_name] + separator: ; + regex: (.*) + action: keep + # read the metric path from "prometheus.io/path: " annotation + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + # read the port from "prometheus.io/port: " annotation and update scraping address accordingly + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + # add service namespace as label to the scraped metrics + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: namespace + replacement: $1 + action: replace + # add service name as a label to the scraped metrics + - source_labels: [__meta_kubernetes_service_name] + separator: ; + regex: (.*) + target_label: service + replacement: $1 + action: replace + # add stats service's labels to the scraped metrics + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) +``` + +### Configure Existing Prometheus Server + +If you already have a Prometheus server running, you have to add above scraping job in the `ConfigMap` used to configure the Prometheus server. Then, you have to restart it for the updated configuration to take effect. + +>If you don't use a persistent volume for Prometheus storage, you will lose your previously scraped data on restart. + +### Deploy New Prometheus Server + +If you don't have any existing Prometheus server running, you have to deploy one. In this section, we are going to deploy a Prometheus server in `monitoring` namespace to collect metrics using this stats service. + +**Create ConfigMap:** + +At first, create a ConfigMap with the scraping configuration. Bellow, the YAML of ConfigMap that we are going to create in this tutorial. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-config + labels: + app: prometheus-demo + namespace: monitoring +data: + prometheus.yml: |- + global: + scrape_interval: 5s + evaluation_interval: 5s + scrape_configs: + - job_name: 'kubedb-databases' + honor_labels: true + scheme: http + kubernetes_sd_configs: + - role: endpoints + # by default Prometheus server select all Kubernetes services as possible target. + # relabel_config is used to filter only desired endpoints + relabel_configs: + # keep only those services that has "prometheus.io/scrape","prometheus.io/path" and "prometheus.io/port" anootations + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape, __meta_kubernetes_service_annotation_prometheus_io_port] + separator: ; + regex: true;(.*) + action: keep + # currently KubeDB supported databases uses only "http" scheme to export metrics. so, drop any service that uses "https" scheme. + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: drop + regex: https + # only keep the stats services created by KubeDB for monitoring purpose which has "-stats" suffix + - source_labels: [__meta_kubernetes_service_name] + separator: ; + regex: (.*-stats) + action: keep + # service created by KubeDB will have "app.kubernetes.io/name" and "app.kubernetes.io/instance" annotations. keep only those services that have these annotations. + - source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_name] + separator: ; + regex: (.*) + action: keep + # read the metric path from "prometheus.io/path: " annotation + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + # read the port from "prometheus.io/port: " annotation and update scraping address accordingly + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + # add service namespace as label to the scraped metrics + - source_labels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + target_label: namespace + replacement: $1 + action: replace + # add service name as a label to the scraped metrics + - source_labels: [__meta_kubernetes_service_name] + separator: ; + regex: (.*) + target_label: service + replacement: $1 + action: replace + # add stats service's labels to the scraped metrics + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) +``` + +Let's create above `ConfigMap`, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/monitoring/builtin-prometheus/prom-config.yaml +configmap/prometheus-config created +``` + +**Create RBAC:** + +If you are using an RBAC enabled cluster, you have to give necessary RBAC permissions for Prometheus. Let's create necessary RBAC stuffs for Prometheus, + +```bash +$ kubectl apply -f https://github.com/appscode/third-party-tools/raw/master/monitoring/prometheus/builtin/artifacts/rbac.yaml +clusterrole.rbac.authorization.k8s.io/prometheus created +serviceaccount/prometheus created +clusterrolebinding.rbac.authorization.k8s.io/prometheus created +``` + +>YAML for the RBAC resources created above can be found [here](https://github.com/appscode/third-party-tools/blob/master/monitoring/prometheus/builtin/artifacts/rbac.yaml). + +**Deploy Prometheus:** + +Now, we are ready to deploy Prometheus server. We are going to use following [deployment](https://github.com/appscode/third-party-tools/blob/master/monitoring/prometheus/builtin/artifacts/deployment.yaml) to deploy Prometheus server. + +Let's deploy the Prometheus server. + +```bash +$ kubectl apply -f https://github.com/appscode/third-party-tools/raw/master/monitoring/prometheus/builtin/artifacts/deployment.yaml +deployment.apps/prometheus created +``` + +### Verify Monitoring Metrics + +Prometheus server is listening to port `9090`. We are going to use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. + +At first, let's check if the Prometheus pod is in `Running` state. + +```bash +$ kubectl get pod -n monitoring -l=app=prometheus +NAME READY STATUS RESTARTS AGE +prometheus-7bd56c6865-8dlpv 1/1 Running 0 28s +``` + +Now, run following command on a separate terminal to forward 9090 port of `prometheus-7bd56c6865-8dlpv` pod, + +```bash +$ kubectl port-forward -n monitoring prometheus-7bd56c6865-8dlpv 9090 +Forwarding from 127.0.0.1:9090 -> 9090 +Forwarding from [::1]:9090 -> 9090 +``` + +Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:9090](http://localhost:9090) in your browser. You should see the endpoint of `druid-with-monitoring-stats` service as one of the targets. + +

+  Prometheus Target +

+ +Check the labels marked with red rectangle. These labels confirm that the metrics are coming from `Druid` cluster `druid-with-monitoring` through stats service `druid-with-monitoring-stats`. + +Now, you can view the collected metrics and create a graph from homepage of this Prometheus dashboard. You can also use this Prometheus server as data source for [Grafana](https://grafana.com/) and create beautiful dashboard with collected metrics. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run following commands + +```bash +kubectl delete -n demo druid/druid-with-monitoring + +kubectl delete -n monitoring deployment.apps/prometheus + +kubectl delete -n monitoring clusterrole.rbac.authorization.k8s.io/prometheus +kubectl delete -n monitoring serviceaccount/prometheus +kubectl delete -n monitoring clusterrolebinding.rbac.authorization.k8s.io/prometheus + +kubectl delete ns demo +kubectl delete ns monitoring +``` + +## Next Steps + +- Learn how to configure [Druid Topology](/docs/guides/druid/clustering/overview/index.md). +- Monitor your Druid database with KubeDB using [`out-of-the-box` Prometheus operator](/docs/guides/druid/monitoring/using-prometheus-operator.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/druid/reconfigure/guide.md b/docs/guides/druid/reconfigure/guide.md index 88f5847a76..69b939c8c4 100644 --- a/docs/guides/druid/reconfigure/guide.md +++ b/docs/guides/druid/reconfigure/guide.md @@ -248,15 +248,15 @@ spec: databaseRef: name: druid-cluster configuration: - configSecret: - name: new-config + configuration: + secretName: new-config ``` Here, - `spec.databaseRef.name` specifies that we are reconfiguring `druid-prod` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configSecret.name` specifies the name of the new secret. +- `spec.configuration.secretName` specifies the name of the new secret. Let's create the `DruidOpsRequest` CR we have shown above, diff --git a/docs/guides/druid/reconfigure/guide.md.bak b/docs/guides/druid/reconfigure/guide.md.bak new file mode 100644 index 0000000000..cc7074c222 --- /dev/null +++ b/docs/guides/druid/reconfigure/guide.md.bak @@ -0,0 +1,704 @@ +--- +title: Reconfigure Druid Topology +menu: + docs_{{ .version }}: + identifier: guides-druid-reconfigure-guide + name: Reconfigure Druid + parent: guides-druid-reconfigure + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure Druid Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a Druid Topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Druid](/docs/guides/druid/concepts/druid.md) + - [Topology](/docs/guides/druid/clustering/overview/index.md) + - [DruidOpsRequest](/docs/guides/druid/concepts/druidopsrequest.md) + - [Reconfigure Overview](/docs/guides/druid/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [/docs/guides/druid/reconfigure/yamls](/docs/guides/druid/reconfigure/yamls) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `Druid` cluster using a supported version by `KubeDB` operator. Then we are going to apply `DruidOpsRequest` to reconfigure its configuration. + +### Prepare Druid Cluster + +Now, we are going to deploy a `Druid` topology cluster with version `28.0.1`. + +#### Create External Dependency (Deep Storage) + +Before proceeding further, we need to prepare deep storage, which is one of the external dependency of Druid and used for storing the segments. It is a storage mechanism that Apache Druid does not provide. **Amazon S3**, **Google Cloud Storage**, or **Azure Blob Storage**, **S3-compatible storage** (like **Minio**), or **HDFS** are generally convenient options for deep storage. + +In this tutorial, we will run a `minio-server` as deep storage in our local `kind` cluster using `minio-operator` and create a bucket named `druid` in it, which the deployed druid database will use. + +```bash + +$ helm repo add minio https://operator.min.io/ +$ helm repo update minio +$ helm upgrade --install --namespace "minio-operator" --create-namespace "minio-operator" minio/operator --set operator.replicaCount=1 + +$ helm upgrade --install --namespace "demo" --create-namespace druid-minio minio/tenant \ +--set tenant.pools[0].servers=1 \ +--set tenant.pools[0].volumesPerServer=1 \ +--set tenant.pools[0].size=1Gi \ +--set tenant.certificate.requestAutoCert=false \ +--set tenant.buckets[0].name="druid" \ +--set tenant.pools[0].name="default" + +``` + +Now we need to create a `Secret` named `deep-storage-config`. It contains the necessary connection information using which the druid database will connect to the deep storage. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: deep-storage-config + namespace: demo +stringData: + druid.storage.type: "s3" + druid.storage.bucket: "druid" + druid.storage.baseKey: "druid/segments" + druid.s3.accessKey: "minio" + druid.s3.secretKey: "minio123" + druid.s3.protocol: "http" + druid.s3.enablePathStyleAccess: "true" + druid.s3.endpoint.signingRegion: "us-east-1" + druid.s3.endpoint.url: "http://myminio-hl.demo.svc.cluster.local:9000/" +``` + +Let’s create the `deep-storage-config` Secret shown above: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/restart/yamls/deep-storage-config.yaml +secret/deep-storage-config created +``` + +Now, lets go ahead and create a druid database. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-cluster + namespace: demo +spec: + version: 28.0.1 + deepStorage: + type: s3 + configuration: + secretName: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: Delete +``` + +Let's create the `Druid` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/update-version/yamls/druid-cluster.yaml +druid.kubedb.com/druid-cluster created +``` + +### Reconfigure using config secret + +Say we want to change the default maximum number of tasks the MiddleManager can accept. Let's create the `middleManagers.properties` file with our desire configurations. + +**middleManagers.properties:** + +```properties +druid.worker.capacity=5 +``` + +**historicals.properties:** + +```properties +druid.processing.numThreads=3 +``` + +Then, we will create a new secret with this configuration file. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: config-secret + namespace: demo +stringData: + middleManagers.properties: |- + druid.worker.capacity=5 + historicals.properties: |- + druid.processing.numThreads=3 +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/update-version/yamls/config-secret.yaml +secret/new-config created +``` + +### Check Current Configuration + +Before creating the druidOpsRequest, first +Lets exec into one of the druid middleManagers pod that we have created and check the default configuration: + +Exec into the Druid middleManagers: + +```bash +$ kubectl exec -it -n demo druid-cluster-middleManagers-0 -- bash +Defaulted container "druid" out of: druid, init-druid (init) +bash-5.1$ +``` + +Now, execute the following commands to see the configurations: +```bash +bash-5.1$ cat conf/druid/cluster/data/middleManager/runtime.properties | grep druid.worker.capacity +druid.worker.capacity=2 +``` +Here, we can see that our given configuration is applied to the Druid cluster for all brokers. + +Now, lets exec into one of the druid historicals pod that we have created and check the configurations are applied or not: + +Exec into the Druid historicals: + +```bash +$ kubectl exec -it -n demo druid-cluster-historicals-0 -- bash +Defaulted container "druid" out of: druid, init-druid (init) +bash-5.1$ +``` + +Now, execute the following commands to see the metadata storage directory: +```bash +bash-5.1$ cat conf/druid/cluster/data/historical/runtime.properties | grep druid.processing.numThreads +druid.processing.numThreads=2 +``` + +Here, we can see that our given configuration is applied to the historicals. + +### Check Configuration from Druid UI + +You can also see the configuration changes from the druid ui. For that, follow the following steps: + +First port-forward the port `8888` to local machine: + +```bash +$ kubectl port-forward -n demo svc/druid-cluster-routers 8888 +Forwarding from 127.0.0.1:8888 -> 8888 +Forwarding from [::1]:8888 -> 8888 +``` + + +Now hit the `http://localhost:8888` from any browser, and you will be prompted to provide the credential of the druid database. By following the steps discussed below, you can get the credential generated by the KubeDB operator for your Druid database. + +**Connection information:** + +- Username: + + ```bash + $ kubectl get secret -n demo druid-cluster-auth -o jsonpath='{.data.username}' | base64 -d + admin + ``` + +- Password: + + ```bash + $ kubectl get secret -n demo druid-cluster-auth -o jsonpath='{.data.password}' | base64 -d + LzJtVRX5E8MorFaf + ``` + +After providing the credentials correctly, you should be able to access the web console like shown below. + +

+  lifecycle +

+ +You can see that there are 2 task slots reflecting with the configuration `druid.worker.capacity=2`. + + +#### Create DruidOpsRequest + +Now, we will use this secret to replace the previous secret using a `DruidOpsRequest` CR. The `DruidOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: DruidOpsRequest +metadata: + name: reconfigure-drops + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: druid-cluster + configuration: + configuration: + secretName: new-config +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `druid-prod` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configSecret.name` specifies the name of the new secret. + +Let's create the `DruidOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/reconfigure/yamls/reconfigure-druid-ops.yaml +druidopsrequest.ops.kubedb.com/reconfigure-drops created +``` + +#### Check new configuration + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `Druid` object. + +Let's wait for `DruidOpsRequest` to be `Successful`. Run the following command to watch `DruidOpsRequest` CR, + +```bash +$ kubectl get druidopsrequests -n demo +NAME TYPE STATUS AGE +reconfigure-drops Reconfigure Successful 4m55s +``` + +We can see from the above output that the `DruidOpsRequest` has succeeded. If we describe the `DruidOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe druidopsrequest -n demo reconfigure-drops +Name: reconfigure-drops +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: DruidOpsRequest +Metadata: + Creation Timestamp: 2024-08-02T05:08:37Z + Generation: 1 + Resource Version: 332491 + UID: b6e8cb1b-d29f-445e-bb01-60d29012c7eb +Spec: + Apply: IfReady + Configuration: + Config Secret: + Name: new-kf-topology-custom-config + Database Ref: + Name: druid-prod + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-08-02T05:08:37Z + Message: Druid ops-request has started to reconfigure druid nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-08-02T05:08:45Z + Message: check reconcile; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: CheckReconcile + Last Transition Time: 2024-08-02T05:09:42Z + Message: successfully reconciled the Druid with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-08-02T05:09:47Z + Message: get pod; ConditionStatus:True; PodName:druid-prod-historicals-0 + Observed Generation: 1 + Status: True + Type: GetPod--druid-prod-historicals-0 + Last Transition Time: 2024-08-02T05:09:47Z + Message: evict pod; ConditionStatus:True; PodName:druid-prod-historicals-0 + Observed Generation: 1 + Status: True + Type: EvictPod--druid-prod-historicals-0 + Last Transition Time: 2024-08-02T05:10:02Z + Message: check pod running; ConditionStatus:True; PodName:druid-prod-historicals-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--druid-prod-historicals-0 + Last Transition Time: 2024-08-02T05:10:07Z + Message: get pod; ConditionStatus:True; PodName:druid-prod-historicals-1 + Observed Generation: 1 + Status: True + Type: GetPod--druid-prod-historicals-1 + Last Transition Time: 2024-08-02T05:10:07Z + Message: evict pod; ConditionStatus:True; PodName:druid-prod-historicals-1 + Observed Generation: 1 + Status: True + Type: EvictPod--druid-prod-historicals-1 + Last Transition Time: 2024-08-02T05:10:22Z + Message: check pod running; ConditionStatus:True; PodName:druid-prod-historicals-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--druid-prod-historicals-1 + Last Transition Time: 2024-08-02T05:10:27Z + Message: get pod; ConditionStatus:True; PodName:druid-prod-middleManagers-0 + Observed Generation: 1 + Status: True + Type: GetPod--druid-prod-middleManagers-0 + Last Transition Time: 2024-08-02T05:10:27Z + Message: evict pod; ConditionStatus:True; PodName:druid-prod-middleManagers-0 + Observed Generation: 1 + Status: True + Type: EvictPod--druid-prod-middleManagers-0 + Last Transition Time: 2024-08-02T05:11:12Z + Message: check pod running; ConditionStatus:True; PodName:druid-prod-middleManagers-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--druid-prod-middleManagers-0 + Last Transition Time: 2024-08-02T05:11:17Z + Message: get pod; ConditionStatus:True; PodName:druid-prod-middleManagers-1 + Observed Generation: 1 + Status: True + Type: GetPod--druid-prod-middleManagers-1 + Last Transition Time: 2024-08-02T05:11:17Z + Message: evict pod; ConditionStatus:True; PodName:druid-prod-middleManagers-1 + Observed Generation: 1 + Status: True + Type: EvictPod--druid-prod-middleManagers-1 + Last Transition Time: 2024-08-02T05:11:32Z + Message: check pod running; ConditionStatus:True; PodName:druid-prod-middleManagers-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--druid-prod-middleManagers-1 + Last Transition Time: 2024-08-02T05:11:37Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-08-02T05:11:39Z + Message: Successfully completed reconfigure druid + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m7s KubeDB Ops-manager Operator Start processing for DruidOpsRequest: demo/reconfigure-drops + Normal Starting 3m7s KubeDB Ops-manager Operator Pausing Druid databse: demo/druid-prod + Normal Successful 3m7s KubeDB Ops-manager Operator Successfully paused Druid database: demo/druid-prod for DruidOpsRequest: reconfigure-drops + Warning check reconcile; ConditionStatus:False 2m59s KubeDB Ops-manager Operator check reconcile; ConditionStatus:False + Normal UpdatePetSets 2m2s KubeDB Ops-manager Operator successfully reconciled the Druid with new configure + Warning get pod; ConditionStatus:True; PodName:druid-prod-historicals-0 117s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-prod-historicals-0 + Warning evict pod; ConditionStatus:True; PodName:druid-prod-historicals-0 117s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-prod-historicals-0 + Warning check pod running; ConditionStatus:False; PodName:druid-prod-historicals-0 112s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:druid-prod-historicals-0 + Warning check pod running; ConditionStatus:True; PodName:druid-prod-historicals-0 102s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-prod-historicals-0 + Warning get pod; ConditionStatus:True; PodName:druid-prod-historicals-1 97s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-prod-historicals-1 + Warning evict pod; ConditionStatus:True; PodName:druid-prod-historicals-1 97s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-prod-historicals-1 + Warning check pod running; ConditionStatus:False; PodName:druid-prod-historicals-1 92s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:druid-prod-historicals-1 + Warning check pod running; ConditionStatus:True; PodName:druid-prod-historicals-1 82s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-prod-historicals-1 + Warning get pod; ConditionStatus:True; PodName:druid-prod-middleManagers-0 77s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-prod-middleManagers-0 + Warning evict pod; ConditionStatus:True; PodName:druid-prod-middleManagers-0 77s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-prod-middleManagers-0 + Warning check pod running; ConditionStatus:False; PodName:druid-prod-middleManagers-0 72s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:druid-prod-middleManagers-0 + Warning check pod running; ConditionStatus:True; PodName:druid-prod-middleManagers-0 32s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-prod-middleManagers-0 + Warning get pod; ConditionStatus:True; PodName:druid-prod-middleManagers-1 27s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-prod-middleManagers-1 + Warning evict pod; ConditionStatus:True; PodName:druid-prod-middleManagers-1 27s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-prod-middleManagers-1 + Warning check pod running; ConditionStatus:False; PodName:druid-prod-middleManagers-1 22s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:druid-prod-middleManagers-1 + Warning check pod running; ConditionStatus:True; PodName:druid-prod-middleManagers-1 12s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-prod-middleManagers-1 + Normal RestartNodes 7s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 5s KubeDB Ops-manager Operator Resuming Druid database: demo/druid-prod + Normal Successful 5s KubeDB Ops-manager Operator Successfully resumed Druid database: demo/druid-prod for DruidOpsRequest: reconfigure-drops +``` + +Now let's exec one of the instance and run a druid-configs.sh command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo druid-prod-middleManagers-0 -- druid-configs.sh --bootstrap-server localhost:9092 --command-config /opt/druid/config/clientauth.properties --describe --entity-type middleManagerss --all | grep 'log.retention.hours' + log.retention.hours=125 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=125, DEFAULT_CONFIG:log.retention.hours=168} + log.retention.hours=125 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=125, DEFAULT_CONFIG:log.retention.hours=168} +``` + +As we can see from the configuration of ready druid, the value of `log.retention.hours` has been changed from `100` to `125`. So the reconfiguration of the cluster is successful. + + +### Reconfigure using apply config + +Now we will reconfigure this cluster again to set `log.retention.hours` to `150`. This time we won't use a new secret. We will use the `applyConfig` field of the `DruidOpsRequest`. This will merge the new config in the existing secret. + +#### Create DruidOpsRequest + +Now, we will use the new configuration in the `applyConfig` field in the `DruidOpsRequest` CR. The `DruidOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: DruidOpsRequest +metadata: + name: kfops-reconfigure-apply-topology + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: druid-prod + configuration: + applyConfig: + middleManagers.properties: |- + log.retention.hours=150 + historicals.properties: |- + historicals.quorum.election.timeout.ms=4000 + historicals.quorum.fetch.timeout.ms=5000 + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `druid-prod` cluster. +- `spec.type` specifies that we are performing `Reconfigure` on druid. +- `spec.configuration.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `DruidOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/druid/reconfigure/druid-reconfigure-apply-topology.yaml +druidopsrequest.ops.kubedb.com/kfops-reconfigure-apply-topology created +``` + +#### Verify new configuration + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `DruidOpsRequest` to be `Successful`. Run the following command to watch `DruidOpsRequest` CR, + +```bash +$ kubectl get druidopsrequests -n demo kfops-reconfigure-apply-topology +NAME TYPE STATUS AGE +kfops-reconfigure-apply-topology Reconfigure Successful 55s +``` + +We can see from the above output that the `DruidOpsRequest` has succeeded. If we describe the `DruidOpsRequest` we will get an overview of the steps that were followed to reconfigure the cluster. + +```bash +$ kubectl describe druidopsrequest -n demo kfops-reconfigure-apply-topology +Name: kfops-reconfigure-apply-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: DruidOpsRequest +Metadata: + Creation Timestamp: 2024-08-02T05:14:42Z + Generation: 1 + Resource Version: 332996 + UID: 551d2c92-9431-47a7-a699-8f8115131b49 +Spec: + Apply: IfReady + Configuration: + Apply Config: + middleManagers.properties: log.retention.hours=150 + historicals.properties: historicals.quorum.election.timeout.ms=4000 +historicals.quorum.fetch.timeout.ms=5000 + Database Ref: + Name: druid-prod + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-08-02T05:14:42Z + Message: Druid ops-request has started to reconfigure druid nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-08-02T05:14:45Z + Message: Successfully prepared user provided custom config secret + Observed Generation: 1 + Reason: PrepareCustomConfig + Status: True + Type: PrepareCustomConfig + Last Transition Time: 2024-08-02T05:14:52Z + Message: successfully reconciled the Druid with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-08-02T05:14:57Z + Message: get pod; ConditionStatus:True; PodName:druid-prod-historicals-0 + Observed Generation: 1 + Status: True + Type: GetPod--druid-prod-historicals-0 + Last Transition Time: 2024-08-02T05:14:57Z + Message: evict pod; ConditionStatus:True; PodName:druid-prod-historicals-0 + Observed Generation: 1 + Status: True + Type: EvictPod--druid-prod-historicals-0 + Last Transition Time: 2024-08-02T05:15:07Z + Message: check pod running; ConditionStatus:True; PodName:druid-prod-historicals-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--druid-prod-historicals-0 + Last Transition Time: 2024-08-02T05:15:12Z + Message: get pod; ConditionStatus:True; PodName:druid-prod-historicals-1 + Observed Generation: 1 + Status: True + Type: GetPod--druid-prod-historicals-1 + Last Transition Time: 2024-08-02T05:15:12Z + Message: evict pod; ConditionStatus:True; PodName:druid-prod-historicals-1 + Observed Generation: 1 + Status: True + Type: EvictPod--druid-prod-historicals-1 + Last Transition Time: 2024-08-02T05:15:27Z + Message: check pod running; ConditionStatus:True; PodName:druid-prod-historicals-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--druid-prod-historicals-1 + Last Transition Time: 2024-08-02T05:15:32Z + Message: get pod; ConditionStatus:True; PodName:druid-prod-middleManagers-0 + Observed Generation: 1 + Status: True + Type: GetPod--druid-prod-middleManagers-0 + Last Transition Time: 2024-08-02T05:15:32Z + Message: evict pod; ConditionStatus:True; PodName:druid-prod-middleManagers-0 + Observed Generation: 1 + Status: True + Type: EvictPod--druid-prod-middleManagers-0 + Last Transition Time: 2024-08-02T05:16:07Z + Message: check pod running; ConditionStatus:True; PodName:druid-prod-middleManagers-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--druid-prod-middleManagers-0 + Last Transition Time: 2024-08-02T05:16:12Z + Message: get pod; ConditionStatus:True; PodName:druid-prod-middleManagers-1 + Observed Generation: 1 + Status: True + Type: GetPod--druid-prod-middleManagers-1 + Last Transition Time: 2024-08-02T05:16:12Z + Message: evict pod; ConditionStatus:True; PodName:druid-prod-middleManagers-1 + Observed Generation: 1 + Status: True + Type: EvictPod--druid-prod-middleManagers-1 + Last Transition Time: 2024-08-02T05:16:27Z + Message: check pod running; ConditionStatus:True; PodName:druid-prod-middleManagers-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--druid-prod-middleManagers-1 + Last Transition Time: 2024-08-02T05:16:32Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-08-02T05:16:35Z + Message: Successfully completed reconfigure druid + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m6s KubeDB Ops-manager Operator Start processing for DruidOpsRequest: demo/kfops-reconfigure-apply-topology + Normal Starting 2m6s KubeDB Ops-manager Operator Pausing Druid databse: demo/druid-prod + Normal Successful 2m6s KubeDB Ops-manager Operator Successfully paused Druid database: demo/druid-prod for DruidOpsRequest: kfops-reconfigure-apply-topology + Normal UpdatePetSets 116s KubeDB Ops-manager Operator successfully reconciled the Druid with new configure + Warning get pod; ConditionStatus:True; PodName:druid-prod-historicals-0 111s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-prod-historicals-0 + Warning evict pod; ConditionStatus:True; PodName:druid-prod-historicals-0 111s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-prod-historicals-0 + Warning check pod running; ConditionStatus:False; PodName:druid-prod-historicals-0 106s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:druid-prod-historicals-0 + Warning check pod running; ConditionStatus:True; PodName:druid-prod-historicals-0 101s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-prod-historicals-0 + Warning get pod; ConditionStatus:True; PodName:druid-prod-historicals-1 96s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-prod-historicals-1 + Warning evict pod; ConditionStatus:True; PodName:druid-prod-historicals-1 96s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-prod-historicals-1 + Warning check pod running; ConditionStatus:False; PodName:druid-prod-historicals-1 91s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:druid-prod-historicals-1 + Warning check pod running; ConditionStatus:True; PodName:druid-prod-historicals-1 81s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-prod-historicals-1 + Warning get pod; ConditionStatus:True; PodName:druid-prod-middleManagers-0 76s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-prod-middleManagers-0 + Warning evict pod; ConditionStatus:True; PodName:druid-prod-middleManagers-0 76s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-prod-middleManagers-0 + Warning check pod running; ConditionStatus:False; PodName:druid-prod-middleManagers-0 71s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:druid-prod-middleManagers-0 + Warning check pod running; ConditionStatus:True; PodName:druid-prod-middleManagers-0 41s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-prod-middleManagers-0 + Warning get pod; ConditionStatus:True; PodName:druid-prod-middleManagers-1 36s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-prod-middleManagers-1 + Warning evict pod; ConditionStatus:True; PodName:druid-prod-middleManagers-1 36s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-prod-middleManagers-1 + Warning check pod running; ConditionStatus:False; PodName:druid-prod-middleManagers-1 31s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:druid-prod-middleManagers-1 + Warning check pod running; ConditionStatus:True; PodName:druid-prod-middleManagers-1 21s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-prod-middleManagers-1 + Normal RestartNodes 15s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 14s KubeDB Ops-manager Operator Resuming Druid database: demo/druid-prod + Normal Successful 14s KubeDB Ops-manager Operator Successfully resumed Druid database: demo/druid-prod for DruidOpsRequest: kfops-reconfigure-apply-topology +``` + +Lets exec into one of the druid middleManagers pod that have updated and check the new configurations are applied or not: + +Exec into the Druid middleManagers: + +```bash +$ kubectl exec -it -n demo druid-with-config-middleManagers-0 -- bash +Defaulted container "druid" out of: druid, init-druid (init) +bash-5.1$ +``` + +Now, execute the following commands to see the configurations: +```bash +bash-5.1$ cat conf/druid/cluster/data/middleManager/runtime.properties | grep druid.worker.capacity +druid.worker.capacity=5 +``` +Here, we can see that our given configuration is applied to the Druid cluster for all brokers. + +Now, lets exec into one of the druid historicals pod that have updated and check the new configurations are applied or not: + +Exec into the Druid historicals: + +```bash +$ kubectl exec -it -n demo druid-with-config-historicals-0 -- bash +Defaulted container "druid" out of: druid, init-druid (init) +bash-5.1$ +``` + +Now, execute the following commands to see the metadata storage directory: +```bash +bash-5.1$ cat conf/druid/cluster/data/historical/runtime.properties | grep druid.processing.numThreads +druid.processing.numThreads=3 +``` + +Here, we can see that our given configuration is applied to the historicals. + +### Verify Configuration Change from Druid UI + +You can access the UI similarly by doing port-forward as mentioned in [Check Configuration from Druid UI](/docs/guides/druid/reconfigure/#CheckConfigurationfromDruidUI) + +You should be able to see the following changes in the UI: + +

+  lifecycle +

+ +You can see that there are 5 task slots reflecting with our provided custom configuration of `druid.worker.capacity=5`. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete kf -n demo druid-cluster +kubectl delete druidopsrequest -n demo reconfigure-drops +kubectl delete secret -n demo new-config +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Druid object](/docs/guides/druid/concepts/druid.md). +- Different Druid topology clustering modes [here](/docs/guides/druid/clustering/_index.md). +- Monitor your Druid database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/druid/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Druid database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/druid/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/druid/restart/guide.md b/docs/guides/druid/restart/guide.md index b172825913..83750e076d 100644 --- a/docs/guides/druid/restart/guide.md +++ b/docs/guides/druid/restart/guide.md @@ -96,8 +96,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/restart/guide.md.bak b/docs/guides/druid/restart/guide.md.bak new file mode 100644 index 0000000000..b172825913 --- /dev/null +++ b/docs/guides/druid/restart/guide.md.bak @@ -0,0 +1,283 @@ +--- +title: Restart Druid +menu: + docs_{{ .version }}: + identifier: guides-druid-restart-guide + name: Restart Druid + parent: guides-druid-restart + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Restart Druid + +KubeDB supports restarting the Druid database via a DruidOpsRequest. Restarting is useful if some pods are got stuck in some phase, or they are not working correctly. This tutorial will show you how to use that. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + +```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/druid](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/druid) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Deploy Druid + +In this section, we are going to deploy a Druid database using KubeDB. + +### Create External Dependency (Deep Storage) + +Before proceeding further, we need to prepare deep storage, which is one of the external dependency of Druid and used for storing the segments. It is a storage mechanism that Apache Druid does not provide. **Amazon S3**, **Google Cloud Storage**, or **Azure Blob Storage**, **S3-compatible storage** (like **Minio**), or **HDFS** are generally convenient options for deep storage. + +In this tutorial, we will run a `minio-server` as deep storage in our local `kind` cluster using `minio-operator` and create a bucket named `druid` in it, which the deployed druid database will use. + +```bash + +$ helm repo add minio https://operator.min.io/ +$ helm repo update minio +$ helm upgrade --install --namespace "minio-operator" --create-namespace "minio-operator" minio/operator --set operator.replicaCount=1 + +$ helm upgrade --install --namespace "demo" --create-namespace druid-minio minio/tenant \ +--set tenant.pools[0].servers=1 \ +--set tenant.pools[0].volumesPerServer=1 \ +--set tenant.pools[0].size=1Gi \ +--set tenant.certificate.requestAutoCert=false \ +--set tenant.buckets[0].name="druid" \ +--set tenant.pools[0].name="default" + +``` + +Now we need to create a `Secret` named `deep-storage-config`. It contains the necessary connection information using which the druid database will connect to the deep storage. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: deep-storage-config + namespace: demo +stringData: + druid.storage.type: "s3" + druid.storage.bucket: "druid" + druid.storage.baseKey: "druid/segments" + druid.s3.accessKey: "minio" + druid.s3.secretKey: "minio123" + druid.s3.protocol: "http" + druid.s3.enablePathStyleAccess: "true" + druid.s3.endpoint.signingRegion: "us-east-1" + druid.s3.endpoint.url: "http://myminio-hl.demo.svc.cluster.local:9000/" +``` + +Let’s create the `deep-storage-config` Secret shown above: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/restart/yamls/deep-storage-config.yaml +secret/deep-storage-config created +``` + +Now, lets go ahead and create a druid database. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-cluster + namespace: demo +spec: + version: 28.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: Delete +``` + +Let's create the `Druid` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/update-version/yamls/druid-cluster.yaml +druid.kubedb.com/druid-cluster created +``` + +## Apply Restart opsRequest + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: DruidOpsRequest +metadata: + name: restart + namespace: demo +spec: + type: Restart + databaseRef: + name: druid-cluster + timeout: 5m + apply: Always +``` + +- `spec.type` specifies the Type of the ops Request +- `spec.databaseRef` holds the name of the Druid CR. It should be available in the same namespace as the opsRequest +- The meaning of `spec.timeout` & `spec.apply` fields will be found [here](/docs/guides/druid/concepts/druidopsrequest.md#spectimeout) + +Let's create the `DruidOpsRequest` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/restart/restart.yaml +druidopsrequest.ops.kubedb.com/restart created +``` + +Now the Ops-manager operator will first restart the controller pods, then broker of the referenced druid. + +```shell +$ kubectl get drops -n demo +NAME TYPE STATUS AGE +restart Restart Successful 2m11s + +$ kubectl get drops -n demo restart -oyaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: DruidOpsRequest +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"ops.kubedb.com/v1alpha1","kind":"DruidOpsRequest","metadata":{"annotations":{},"name":"restart","namespace":"demo"},"spec":{"apply":"Always","databaseRef":{"name":"druid-cluster"},"timeout":"5m","type":"Restart"}} + creationTimestamp: "2024-10-21T10:30:53Z" + generation: 1 + name: restart + namespace: demo + resourceVersion: "83200" + uid: 0fcbc7d4-593f-45f7-8631-7483805efe1e +spec: + apply: Always + databaseRef: + name: druid-cluster + timeout: 5m + type: Restart +status: + conditions: + - lastTransitionTime: "2024-10-21T10:30:53Z" + message: Druid ops-request has started to restart druid nodes + observedGeneration: 1 + reason: Restart + status: "True" + type: Restart + - lastTransitionTime: "2024-10-21T10:31:51Z" + message: Successfully Restarted Druid nodes + observedGeneration: 1 + reason: RestartNodes + status: "True" + type: RestartNodes + - lastTransitionTime: "2024-10-21T10:31:01Z" + message: get pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 + observedGeneration: 1 + status: "True" + type: GetPod--druid-cluster-historicals-0 + - lastTransitionTime: "2024-10-21T10:31:01Z" + message: evict pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 + observedGeneration: 1 + status: "True" + type: EvictPod--druid-cluster-historicals-0 + - lastTransitionTime: "2024-10-21T10:31:06Z" + message: check pod running; ConditionStatus:True; PodName:druid-cluster-historicals-0 + observedGeneration: 1 + status: "True" + type: CheckPodRunning--druid-cluster-historicals-0 + - lastTransitionTime: "2024-10-21T10:31:11Z" + message: get pod; ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 + observedGeneration: 1 + status: "True" + type: GetPod--druid-cluster-middlemanagers-0 + - lastTransitionTime: "2024-10-21T10:31:11Z" + message: evict pod; ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 + observedGeneration: 1 + status: "True" + type: EvictPod--druid-cluster-middlemanagers-0 + - lastTransitionTime: "2024-10-21T10:31:16Z" + message: check pod running; ConditionStatus:True; PodName:druid-cluster-middlemanagers-0 + observedGeneration: 1 + status: "True" + type: CheckPodRunning--druid-cluster-middlemanagers-0 + - lastTransitionTime: "2024-10-21T10:31:21Z" + message: get pod; ConditionStatus:True; PodName:druid-cluster-brokers-0 + observedGeneration: 1 + status: "True" + type: GetPod--druid-cluster-brokers-0 + - lastTransitionTime: "2024-10-21T10:31:21Z" + message: evict pod; ConditionStatus:True; PodName:druid-cluster-brokers-0 + observedGeneration: 1 + status: "True" + type: EvictPod--druid-cluster-brokers-0 + - lastTransitionTime: "2024-10-21T10:31:26Z" + message: check pod running; ConditionStatus:True; PodName:druid-cluster-brokers-0 + observedGeneration: 1 + status: "True" + type: CheckPodRunning--druid-cluster-brokers-0 + - lastTransitionTime: "2024-10-21T10:31:31Z" + message: get pod; ConditionStatus:True; PodName:druid-cluster-routers-0 + observedGeneration: 1 + status: "True" + type: GetPod--druid-cluster-routers-0 + - lastTransitionTime: "2024-10-21T10:31:31Z" + message: evict pod; ConditionStatus:True; PodName:druid-cluster-routers-0 + observedGeneration: 1 + status: "True" + type: EvictPod--druid-cluster-routers-0 + - lastTransitionTime: "2024-10-21T10:31:36Z" + message: check pod running; ConditionStatus:True; PodName:druid-cluster-routers-0 + observedGeneration: 1 + status: "True" + type: CheckPodRunning--druid-cluster-routers-0 + - lastTransitionTime: "2024-10-21T10:31:41Z" + message: get pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + observedGeneration: 1 + status: "True" + type: GetPod--druid-cluster-coordinators-0 + - lastTransitionTime: "2024-10-21T10:31:41Z" + message: evict pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + observedGeneration: 1 + status: "True" + type: EvictPod--druid-cluster-coordinators-0 + - lastTransitionTime: "2024-10-21T10:31:46Z" + message: check pod running; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + observedGeneration: 1 + status: "True" + type: CheckPodRunning--druid-cluster-coordinators-0 + - lastTransitionTime: "2024-10-21T10:31:51Z" + message: Controller has successfully restart the Druid replicas + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete druidopsrequest -n demo restart +kubectl delete druid -n demo druid-cluster +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Druid object](/docs/guides/druid/concepts/druid.md). +- Different Druid topology clustering modes [here](/docs/guides/druid/clustering/_index.md). +- Monitor your Druid database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/druid/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Druid database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/druid/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/druid/restart/yamls/druid-cluster.yaml b/docs/guides/druid/restart/yamls/druid-cluster.yaml index 6351c2ddda..fde0e4ebed 100644 --- a/docs/guides/druid/restart/yamls/druid-cluster.yaml +++ b/docs/guides/druid/restart/yamls/druid-cluster.yaml @@ -7,8 +7,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/restart/yamls/druid-cluster.yaml.bak b/docs/guides/druid/restart/yamls/druid-cluster.yaml.bak new file mode 100644 index 0000000000..6351c2ddda --- /dev/null +++ b/docs/guides/druid/restart/yamls/druid-cluster.yaml.bak @@ -0,0 +1,16 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-cluster + namespace: demo +spec: + version: 28.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: Delete + diff --git a/docs/guides/druid/scaling/horizontal-scaling/guide.md b/docs/guides/druid/scaling/horizontal-scaling/guide.md index 6e6648b4f8..7b8d857f26 100644 --- a/docs/guides/druid/scaling/horizontal-scaling/guide.md +++ b/docs/guides/druid/scaling/horizontal-scaling/guide.md @@ -109,8 +109,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/scaling/horizontal-scaling/guide.md.bak b/docs/guides/druid/scaling/horizontal-scaling/guide.md.bak new file mode 100644 index 0000000000..6e6648b4f8 --- /dev/null +++ b/docs/guides/druid/scaling/horizontal-scaling/guide.md.bak @@ -0,0 +1,603 @@ +--- +title: Horizontal Scaling Druid Cluster +menu: + docs_{{ .version }}: + identifier: guides-druid-scaling-horizontal-scaling-guide + name: Druid Horizontal Scaling + parent: guides-druid-scaling-horizontal-scaling + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Horizontal Scale Druid Topology Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to scale the Druid topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Druid](/docs/guides/druid/concepts/druid.md) + - [Topology](/docs/guides/druid/clustering/overview/index.md) + - [DruidOpsRequest](/docs/guides/druid/concepts/druidopsrequest.md) + - [Horizontal Scaling Overview](/docs/guides/druid/scaling/horizontal-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/druid](/docs/examples/druid) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Horizontal Scaling on Druid Cluster + +Here, we are going to deploy a `Druid` cluster using a supported version by `KubeDB` operator. Then we are going to apply horizontal scaling on it. + +### Prepare Druid Topology cluster + +Now, we are going to deploy a `Druid` topology cluster with version `28.0.1`. + +### Create External Dependency (Deep Storage) + +Before proceeding further, we need to prepare deep storage, which is one of the external dependency of Druid and used for storing the segments. It is a storage mechanism that Apache Druid does not provide. **Amazon S3**, **Google Cloud Storage**, or **Azure Blob Storage**, **S3-compatible storage** (like **Minio**), or **HDFS** are generally convenient options for deep storage. + +In this tutorial, we will run a `minio-server` as deep storage in our local `kind` cluster using `minio-operator` and create a bucket named `druid` in it, which the deployed druid database will use. + +```bash + +$ helm repo add minio https://operator.min.io/ +$ helm repo update minio +$ helm upgrade --install --namespace "minio-operator" --create-namespace "minio-operator" minio/operator --set operator.replicaCount=1 + +$ helm upgrade --install --namespace "demo" --create-namespace druid-minio minio/tenant \ +--set tenant.pools[0].servers=1 \ +--set tenant.pools[0].volumesPerServer=1 \ +--set tenant.pools[0].size=1Gi \ +--set tenant.certificate.requestAutoCert=false \ +--set tenant.buckets[0].name="druid" \ +--set tenant.pools[0].name="default" + +``` + +Now we need to create a `Secret` named `deep-storage-config`. It contains the necessary connection information using which the druid database will connect to the deep storage. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: deep-storage-config + namespace: demo +stringData: + druid.storage.type: "s3" + druid.storage.bucket: "druid" + druid.storage.baseKey: "druid/segments" + druid.s3.accessKey: "minio" + druid.s3.secretKey: "minio123" + druid.s3.protocol: "http" + druid.s3.enablePathStyleAccess: "true" + druid.s3.endpoint.signingRegion: "us-east-1" + druid.s3.endpoint.url: "http://myminio-hl.demo.svc.cluster.local:9000/" +``` + +Let’s create the `deep-storage-config` Secret shown above: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/scaling/horizontal-scaling/yamls/deep-storage-config.yaml +secret/deep-storage-config created +``` + +### Deploy Druid topology cluster + +In this section, we are going to deploy a Druid topology cluster. Then, in the next section we will scale the cluster using `DruidOpsRequest` CRD. Below is the YAML of the `Druid` CR that we are going to create, + + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-cluster + namespace: demo +spec: + version: 28.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: Delete +``` + +Let's create the `Druid` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/druid/scaling/horizontal-scaling/yamls/druid-topology.yaml +druid.kubedb.com/druid-cluster created +``` + +Now, wait until `druid-cluster` has status `Ready`. i.e, + +```bash +$ kubectl get dr -n demo -w +NAME TYPE VERSION STATUS AGE +druid-cluster kubedb.com/v1aplha2 28.0.1 Provisioning 0s +druid-cluster kubedb.com/v1aplha2 28.0.1 Provisioning 24s +. +. +druid-cluster kubedb.com/v1aplha2 28.0.1 Ready 92s +``` + +Let's check the number of replicas has from druid object, number of pods the petset have, + +**Coordinators Replicas** + +```bash +$ kubectl get druid -n demo druid-cluster -o json | jq '.spec.topology.coordinators.replicas' +1 + +$ kubectl get petset -n demo druid-cluster-coordinators -o json | jq '.spec.replicas' +1 +``` + +**Historicals Replicas** + +```bash +$ kubectl get druid -n demo druid-cluster -o json | jq '.spec.topology.historicals.replicas' +1 + +$ kubectl get petset -n demo druid-cluster-historicals -o json | jq '.spec.replicas' +1 +``` + +We can see from commands that the cluster has 1 replicas for both coordinators and historicals. + +### Check Replica Count from Druid UI + +You can also see the replica count of each node from the druid ui. For that, follow the following steps: + +First port-forward the port `8888` to local machine: + +```bash +$ kubectl port-forward -n demo svc/druid-cluster-routers 8888 +Forwarding from 127.0.0.1:8888 -> 8888 +Forwarding from [::1]:8888 -> 8888 +``` + + +Now hit the `http://localhost:8888` from any browser, and you will be prompted to provide the credential of the druid database. By following the steps discussed below, you can get the credential generated by the KubeDB operator for your Druid database. + +**Connection information:** + +- Username: + + ```bash + $ kubectl get secret -n demo druid-cluster-admin-cred -o jsonpath='{.data.username}' | base64 -d + admin + ``` + +- Password: + + ```bash + $ kubectl get secret -n demo druid-cluster-admin-cred -o jsonpath='{.data.password}' | base64 -d + LzJtVRX5E8MorFaf + ``` + +After providing the credentials correctly, you should be able to access the web console like shown below. + +

+  lifecycle +

+ + +Here, we can see that there is 1 replica of each node including `coordinators` and `historicals`. + +We are now ready to apply the `DruidOpsRequest` CR to scale this cluster. + +## Scale Up Replicas + +Here, we are going to scale up the replicas of the topology cluster to meet the desired number of replicas after scaling. + +### Create DruidOpsRequest + +In order to scale up the replicas of the topology cluster, we have to create a `DruidOpsRequest` CR with our desired replicas. Below is the YAML of the `DruidOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: DruidOpsRequest +metadata: + name: druid-hscale-up + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: druid-cluster + horizontalScaling: + topology: + coordinators: 2 + historicals: 2 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling operation on `druid-cluster` cluster. +- `spec.type` specifies that we are performing `HorizontalScaling` on druid. +- `spec.horizontalScaling.topology.coordinators` specifies the desired replicas after scaling for coordinators. +- `spec.horizontalScaling.topology.historicals` specifies the desired replicas after scaling for historicals. + +> **Note:** Similarly you can scale other druid nodes horizontally by specifying the following fields: + > - For `overlords` use `spec.horizontalScaling.topology.overlords`. + > - For `brokers` use `spec.horizontalScaling.topology.brokers`. + > - For `middleManagers` use `spec.horizontalScaling.topology.middleManagers`. + > - For `routers` use `spec.horizontalScaling.topology.routers`. + +Let's create the `DruidOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/scaling/horizontal-scaling/yamls/druid-hscale-up.yaml +druidopsrequest.ops.kubedb.com/druid-hscale-up created +``` + +### Verify Topology cluster replicas scaled up successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Druid` object and related `PetSets` and `Pods`. + +Let's wait for `DruidOpsRequest` to be `Successful`. Run the following command to watch `DruidOpsRequest` CR, + +```bash +$ watch kubectl get druidopsrequest -n demo +NAME TYPE STATUS AGE +druid-hscale-up HorizontalScaling Successful 106s +``` + +We can see from the above output that the `DruidOpsRequest` has succeeded. If we describe the `DruidOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe druidopsrequests -n demo druid-hscale-up +Name: druid-hscale-up +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: DruidOpsRequest +Metadata: + Creation Timestamp: 2024-10-21T11:32:51Z + Generation: 1 + Managed Fields: + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:apply: + f:databaseRef: + f:horizontalScaling: + .: + f:topology: + .: + f:coordinators: + f:historicals: + f:type: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2024-10-21T11:32:51Z + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:status: + .: + f:conditions: + f:observedGeneration: + f:phase: + Manager: kubedb-ops-manager + Operation: Update + Subresource: status + Time: 2024-10-21T11:34:02Z + Resource Version: 91877 + UID: 824356ca-eafc-4266-8af1-c372b27f6ce7 +Spec: + Apply: IfReady + Database Ref: + Name: druid-cluster + Horizontal Scaling: + Topology: + Coordinators: 2 + Historicals: 2 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2024-10-21T11:32:51Z + Message: Druid ops-request has started to horizontally scaling the nodes + Observed Generation: 1 + Reason: HorizontalScaling + Status: True + Type: HorizontalScaling + Last Transition Time: 2024-10-21T11:33:17Z + Message: Successfully Scaled Up Broker + Observed Generation: 1 + Reason: ScaleUpCoordinators + Status: True + Type: ScaleUpCoordinators + Last Transition Time: 2024-10-21T11:33:02Z + Message: patch pet setdruid-cluster-coordinators; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPetSetdruid-cluster-coordinators + Last Transition Time: 2024-10-21T11:33:57Z + Message: node in cluster; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: NodeInCluster + Last Transition Time: 2024-10-21T11:34:02Z + Message: Successfully Scaled Up Broker + Observed Generation: 1 + Reason: ScaleUpHistoricals + Status: True + Type: ScaleUpHistoricals + Last Transition Time: 2024-10-21T11:33:22Z + Message: patch pet setdruid-cluster-historicals; ConditionStatus:True + Observed Generation: 1 + Status: True + Type: PatchPetSetdruid-cluster-historicals + Last Transition Time: 2024-10-21T11:34:02Z + Message: Successfully completed horizontally scale druid cluster + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 95s KubeDB Ops-manager Operator Start processing for DruidOpsRequest: demo/druid-hscale-up + Normal Starting 95s KubeDB Ops-manager Operator Pausing Druid databse: demo/druid-cluster + Normal Successful 95s KubeDB Ops-manager Operator Successfully paused Druid database: demo/druid-cluster for DruidOpsRequest: druid-hscale-up + Warning patch pet setdruid-cluster-coordinators; ConditionStatus:True 84s KubeDB Ops-manager Operator patch pet setdruid-cluster-coordinators; ConditionStatus:True + Warning node in cluster; ConditionStatus:False 76s KubeDB Ops-manager Operator node in cluster; ConditionStatus:False + Warning node in cluster; ConditionStatus:True 74s KubeDB Ops-manager Operator node in cluster; ConditionStatus:True + Normal ScaleUpCoordinators 69s KubeDB Ops-manager Operator Successfully Scaled Up Broker + Warning patch pet setdruid-cluster-historicals; ConditionStatus:True 64s KubeDB Ops-manager Operator patch pet setdruid-cluster-historicals; ConditionStatus:True + Warning node in cluster; ConditionStatus:False 56s KubeDB Ops-manager Operator node in cluster; ConditionStatus:False + Warning node in cluster; ConditionStatus:True 29s KubeDB Ops-manager Operator node in cluster; ConditionStatus:True + Normal ScaleUpHistoricals 24s KubeDB Ops-manager Operator Successfully Scaled Up Broker + Normal Starting 24s KubeDB Ops-manager Operator Resuming Druid database: demo/druid-cluster + Normal Successful 24s KubeDB Ops-manager Operator Successfully resumed Druid database: demo/druid-cluster for DruidOpsRequest: druid-hscale-up +``` + + +Now, we are going to verify the number of replicas this cluster has from the Druid object, number of pods the petset have, + +**Coordinators Replicas** + +```bash +$ kubectl get druid -n demo druid-cluster -o json | jq '.spec.topology.coordinators.replicas' +2 + +$ kubectl get petset -n demo druid-cluster-coordinators -o json | jq '.spec.replicas' +2 +``` + +**Historicals Replicas** + +```bash +$ kubectl get druid -n demo druid-cluster -o json | jq '.spec.topology.historicals.replicas' +2 + +$ kubectl get petset -n demo druid-cluster-historicals -o json | jq '.spec.replicas' +2 +``` + +Now, we are going to verify the number of replicas this cluster has from the Druid UI. + +### Verify Replica Count from Druid UI + +Verify the scaled replica count of nodes from the druid ui. To access the UI follow the steps described in the first part of this guide. [(Check Replica Count from Druid UI)](/docs/guides/druid/scaling/horizontal-scaling/#Check-Replica-Count-from-Druid-UI) + +If you follow the steps properly, you should be able to see that the replica count of both `coordinators` and `historicals` has become 2. Also as the `coordinators` is serving as the `overlords`, the count of `overlords` has also become 2. + +

+  lifecycle +

+ +## Scale Down Replicas + +Here, we are going to scale down the replicas of the druid topology cluster to meet the desired number of replicas after scaling. + +### Create DruidOpsRequest + +In order to scale down the replicas of the druid topology cluster, we have to create a `DruidOpsRequest` CR with our desired replicas. Below is the YAML of the `DruidOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: DruidOpsRequest +metadata: + name: druid-hscale-down + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: druid-cluster + horizontalScaling: + topology: + coordinators: 1 + historicals: 1 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling down operation on `druid-cluster` cluster. +- `spec.type` specifies that we are performing `HorizontalScaling` on druid. +- `spec.horizontalScaling.topology.coordinators` specifies the desired replicas after scaling for the coordinators nodes. +- `spec.horizontalScaling.topology.historicals` specifies the desired replicas after scaling for the historicals nodes. + +> **Note:** Similarly you can scale other druid nodes by specifying the following fields: +> - For `overlords` use `spec.horizontalScaling.topology.overlords`. +> - For `brokers` use `spec.horizontalScaling.topology.brokers`. +> - For `middleManagers` use `spec.horizontalScaling.topology.middleManagers`. +> - For `routers` use `spec.horizontalScaling.topology.routers`. + +Let's create the `DruidOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/druid/scaling/horizontal-scaling/druid-hscale-down-topology.yaml +druidopsrequest.ops.kubedb.com/druid-hscale-down created +``` + +#### Verify Topology cluster replicas scaled down successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `Druid` object and related `PetSets` and `Pods`. + +Let's wait for `DruidOpsRequest` to be `Successful`. Run the following command to watch `DruidOpsRequest` CR, + +```bash +$ watch kubectl get druidopsrequest -n demo +NAME TYPE STATUS AGE +druid-hscale-down HorizontalScaling Successful 2m32s +``` + +We can see from the above output that the `DruidOpsRequest` has succeeded. If we describe the `DruidOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl get druidopsrequest -n demo druid-hscale-down -oyaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: DruidOpsRequest +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"ops.kubedb.com/v1alpha1","kind":"DruidOpsRequest","metadata":{"annotations":{},"name":"druid-hscale-down","namespace":"demo"},"spec":{"databaseRef":{"name":"druid-cluster"},"horizontalScaling":{"topology":{"coordinators":1,"historicals":1}},"type":"HorizontalScaling"}} + creationTimestamp: "2024-10-21T12:42:09Z" + generation: 1 + name: druid-hscale-down + namespace: demo + resourceVersion: "99500" + uid: b3a81d07-be44-4adf-a8a7-36bb825f26a8 +spec: + apply: IfReady + databaseRef: + name: druid-cluster + horizontalScaling: + topology: + coordinators: 1 + historicals: 1 + type: HorizontalScaling +status: + conditions: + - lastTransitionTime: "2024-10-21T12:42:09Z" + message: Druid ops-request has started to horizontally scaling the nodes + observedGeneration: 1 + reason: HorizontalScaling + status: "True" + type: HorizontalScaling + - lastTransitionTime: "2024-10-21T12:42:33Z" + message: Successfully Scaled Down Broker + observedGeneration: 1 + reason: ScaleDownCoordinators + status: "True" + type: ScaleDownCoordinators + - lastTransitionTime: "2024-10-21T12:42:23Z" + message: reassign partitions; ConditionStatus:True + observedGeneration: 1 + status: "True" + type: ReassignPartitions + - lastTransitionTime: "2024-10-21T12:42:23Z" + message: is pet set patched; ConditionStatus:True + observedGeneration: 1 + status: "True" + type: IsPetSetPatched + - lastTransitionTime: "2024-10-21T12:42:28Z" + message: get pod; ConditionStatus:True + observedGeneration: 1 + status: "True" + type: GetPod + - lastTransitionTime: "2024-10-21T12:42:53Z" + message: Successfully Scaled Down Broker + observedGeneration: 1 + reason: ScaleDownHistoricals + status: "True" + type: ScaleDownHistoricals + - lastTransitionTime: "2024-10-21T12:42:43Z" + message: delete pvc; ConditionStatus:True + observedGeneration: 1 + status: "True" + type: DeletePvc + - lastTransitionTime: "2024-10-21T12:42:43Z" + message: get pvc; ConditionStatus:False + observedGeneration: 1 + status: "False" + type: GetPvc + - lastTransitionTime: "2024-10-21T12:42:53Z" + message: Successfully completed horizontally scale druid cluster + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +Now, we are going to verify the number of replicas this cluster has from the Druid object, number of pods the petset have, + +**Coordinators Replicas** + +```bash +$ kubectl get druid -n demo druid-cluster -o json | jq '.spec.topology.coordinators.replicas' +1 + +$ kubectl get petset -n demo druid-cluster-coordinators -o json | jq '.spec.replicas' +1 +``` + +**Historicals Replicas** + +```bash +$ kubectl get druid -n demo druid-cluster -o json | jq '.spec.topology.historicals.replicas' +1 + +$ kubectl get petset -n demo druid-cluster-historicals -o json | jq '.spec.replicas' +1 +``` + +Now, we are going to verify the number of replicas this cluster has from the Druid UI. + +### Verify Replica Count from Druid UI + +Verify the scaled replica count of nodes from the druid ui. To access the UI follow the steps described in the first part of this guide. [(Check Replica Count from Druid UI)](/docs/guides/druid/scaling/horizontal-scaling/#Check-Replica-Count-from-Druid-UI) + +If you follow the steps properly, you should be able to see that the replica count of both `coordinators` and `historicals` has become 1. Also as the `coordinators` is serving as the `overlords`, the count of `overlords` has also become 1. + +

+  lifecycle +

+ + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete dr -n demo druid-cluster +kubectl delete druidopsrequest -n demo druid-hscale-up druid-hscale-down +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Druid object](/docs/guides/druid/concepts/druid.md). +- Different Druid topology clustering modes [here](/docs/guides/druid/clustering/_index.md). +- Monitor your Druid with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/druid/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Druid with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/druid/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/druid/scaling/horizontal-scaling/yamls/druid-cluster.yaml b/docs/guides/druid/scaling/horizontal-scaling/yamls/druid-cluster.yaml index 6351c2ddda..fde0e4ebed 100644 --- a/docs/guides/druid/scaling/horizontal-scaling/yamls/druid-cluster.yaml +++ b/docs/guides/druid/scaling/horizontal-scaling/yamls/druid-cluster.yaml @@ -7,8 +7,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/scaling/horizontal-scaling/yamls/druid-cluster.yaml.bak b/docs/guides/druid/scaling/horizontal-scaling/yamls/druid-cluster.yaml.bak new file mode 100644 index 0000000000..6351c2ddda --- /dev/null +++ b/docs/guides/druid/scaling/horizontal-scaling/yamls/druid-cluster.yaml.bak @@ -0,0 +1,16 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-cluster + namespace: demo +spec: + version: 28.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: Delete + diff --git a/docs/guides/druid/scaling/vertical-scaling/guide.md b/docs/guides/druid/scaling/vertical-scaling/guide.md index cedf35b076..bf36322ce0 100644 --- a/docs/guides/druid/scaling/vertical-scaling/guide.md +++ b/docs/guides/druid/scaling/vertical-scaling/guide.md @@ -108,8 +108,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/scaling/vertical-scaling/guide.md.bak b/docs/guides/druid/scaling/vertical-scaling/guide.md.bak new file mode 100644 index 0000000000..cedf35b076 --- /dev/null +++ b/docs/guides/druid/scaling/vertical-scaling/guide.md.bak @@ -0,0 +1,454 @@ +--- +title: Vertical Scaling Druid Cluster +menu: + docs_{{ .version }}: + identifier: guides-druid-scaling-vertical-scaling-guide + name: Druid Vertical Scaling + parent: guides-druid-scaling-vertical-scaling + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Vertical Scale Druid Topology Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to update the resources of a Druid topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Druid](/docs/guides/druid/concepts/druid.md) + - [Topology](/docs/guides/druid/clustering/overview/index.md) + - [DruidOpsRequest](/docs/guides/druid/concepts/druidopsrequest.md) + - [Vertical Scaling Overview](/docs/guides/druid/scaling/vertical-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/druid](/docs/examples/druid) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Vertical Scaling on Topology Cluster + +Here, we are going to deploy a `Druid` topology cluster using a supported version by `KubeDB` operator. Then we are going to apply vertical scaling on it. + +### Prepare Druid Topology Cluster + +Now, we are going to deploy a `Druid` topology cluster database with version `28.0.1`. + +### Create External Dependency (Deep Storage) + +Before proceeding further, we need to prepare deep storage, which is one of the external dependency of Druid and used for storing the segments. It is a storage mechanism that Apache Druid does not provide. **Amazon S3**, **Google Cloud Storage**, or **Azure Blob Storage**, **S3-compatible storage** (like **Minio**), or **HDFS** are generally convenient options for deep storage. + +In this tutorial, we will run a `minio-server` as deep storage in our local `kind` cluster using `minio-operator` and create a bucket named `druid` in it, which the deployed druid database will use. + +```bash + +$ helm repo add minio https://operator.min.io/ +$ helm repo update minio +$ helm upgrade --install --namespace "minio-operator" --create-namespace "minio-operator" minio/operator --set operator.replicaCount=1 + +$ helm upgrade --install --namespace "demo" --create-namespace druid-minio minio/tenant \ +--set tenant.pools[0].servers=1 \ +--set tenant.pools[0].volumesPerServer=1 \ +--set tenant.pools[0].size=1Gi \ +--set tenant.certificate.requestAutoCert=false \ +--set tenant.buckets[0].name="druid" \ +--set tenant.pools[0].name="default" + +``` + +Now we need to create a `Secret` named `deep-storage-config`. It contains the necessary connection information using which the druid database will connect to the deep storage. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: deep-storage-config + namespace: demo +stringData: + druid.storage.type: "s3" + druid.storage.bucket: "druid" + druid.storage.baseKey: "druid/segments" + druid.s3.accessKey: "minio" + druid.s3.secretKey: "minio123" + druid.s3.protocol: "http" + druid.s3.enablePathStyleAccess: "true" + druid.s3.endpoint.signingRegion: "us-east-1" + druid.s3.endpoint.url: "http://myminio-hl.demo.svc.cluster.local:9000/" +``` + +Let’s create the `deep-storage-config` Secret shown above: + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/scaling/vertical-scaling/yamls/deep-storage-config.yaml +secret/deep-storage-config created +``` + +### Deploy Druid Cluster + +In this section, we are going to deploy a Druid topology cluster. Then, in the next section we will update the resources of the database using `DruidOpsRequest` CRD. Below is the YAML of the `Druid` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-cluster + namespace: demo +spec: + version: 28.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: Delete +``` + +Let's create the `Druid` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/druid/scaling/vertical-scaling/yamls/druid-cluster.yaml +druid.kubedb.com/druid-cluster created +``` + +Now, wait until `druid-cluster` has status `Ready`. i.e, + +```bash +$ kubectl get dr -n demo -w +NAME TYPE VERSION STATUS AGE +druid-cluster kubedb.com/v1aplha2 28.0.1 Provisioning 0s +druid-cluster kubedb.com/v1aplha2 28.0.1 Provisioning 24s +. +. +druid-cluster kubedb.com/v1aplha2 28.0.1 Ready 92s +``` + +Let's check the Pod containers resources for both `coordinators` and `historicals` of the Druid topology cluster. Run the following command to get the resources of the `coordinators` and `historicals` containers of the Druid topology cluster + +```bash +$ kubectl get pod -n demo druid-cluster-coordinators-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "1Gi" + }, + "requests": { + "cpu": "500m", + "memory": "1Gi" + } +} +``` + +```bash +$ kubectl get pod -n demo druid-cluster-historicals-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "1Gi" + }, + "requests": { + "cpu": "500m", + "memory": "1Gi" + } +} +``` +This is the default resources of the Druid topology cluster set by the `KubeDB` operator. + +We are now ready to apply the `DruidOpsRequest` CR to update the resources of this database. + +### Vertical Scaling + +Here, we are going to update the resources of the topology cluster to meet the desired resources after scaling. + +#### Create DruidOpsRequest + +In order to update the resources of the database, we have to create a `DruidOpsRequest` CR with our desired resources. Below is the YAML of the `DruidOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: DruidOpsRequest +metadata: + name: druid-vscale + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: druid-cluster + verticalScaling: + coordinators: + resources: + requests: + memory: "1.2Gi" + cpu: "0.6" + limits: + memory: "1.2Gi" + cpu: "0.6" + historicals: + resources: + requests: + memory: "1.1Gi" + cpu: "0.6" + limits: + memory: "1.1Gi" + cpu: "0.6" + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `druid-cluster` cluster. +- `spec.type` specifies that we are performing `VerticalScaling` on druid. +- `spec.VerticalScaling.coordinators` specifies the desired resources of `coordinators` node after scaling. +- `spec.VerticalScaling.historicals` specifies the desired resources of `historicals` node after scaling. + +> **Note:** Similarly you can scale other druid nodes vertically by specifying the following fields: +> - For `overlords` use `spec.verticalScaling.overlords`. +> - For `brokers` use `spec.verticalScaling.brokers`. +> - For `middleManagers` use `spec.verticalScaling.middleManagers`. +> - For `routers` use `spec.verticalScaling.routers`. + +Let's create the `DruidOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/druid/scaling/vertical-scaling/yamls/druid-vscale.yaml +druidopsrequest.ops.kubedb.com/druid-vscale created +``` + +#### Verify Druid cluster resources have been updated successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the resources of `Druid` object and related `PetSets` and `Pods`. + +Let's wait for `DruidOpsRequest` to be `Successful`. Run the following command to watch `DruidOpsRequest` CR, + +```bash +$ kubectl get druidopsrequest -n demo +NAME TYPE STATUS AGE +druid-vscale VerticalScaling Successful 3m56s +``` + +We can see from the above output that the `DruidOpsRequest` has succeeded. If we describe the `DruidOpsRequest` we will get an overview of the steps that were followed to scale the cluster. + +```bash +$ kubectl describe druidopsrequest -n demo druid-vscale +Name: druid-vscale +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: DruidOpsRequest +Metadata: + Creation Timestamp: 2024-10-21T12:53:55Z + Generation: 1 + Managed Fields: + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:apply: + f:databaseRef: + f:timeout: + f:type: + f:verticalScaling: + .: + f:coordinators: + .: + f:resources: + .: + f:limits: + .: + f:cpu: + f:memory: + f:requests: + .: + f:cpu: + f:memory: + f:historicals: + .: + f:resources: + .: + f:limits: + .: + f:cpu: + f:memory: + f:requests: + .: + f:cpu: + f:memory: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2024-10-21T12:53:55Z + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:status: + .: + f:conditions: + f:observedGeneration: + f:phase: + Manager: kubedb-ops-manager + Operation: Update + Subresource: status + Time: 2024-10-21T12:54:23Z + Resource Version: 102002 + UID: fe8bb22f-02e8-4a10-9a78-fc211371d581 +Spec: + Apply: IfReady + Database Ref: + Name: druid-cluster + Timeout: 5m + Type: VerticalScaling + Vertical Scaling: + Coordinators: + Resources: + Limits: + Cpu: 0.6 + Memory: 1.2Gi + Requests: + Cpu: 0.6 + Memory: 1.2Gi + Historicals: + Resources: + Limits: + Cpu: 0.6 + Memory: 1.1Gi + Requests: + Cpu: 0.6 + Memory: 1.1Gi +Status: + Conditions: + Last Transition Time: 2024-10-21T12:53:55Z + Message: Druid ops-request has started to vertically scale the Druid nodes + Observed Generation: 1 + Reason: VerticalScaling + Status: True + Type: VerticalScaling + Last Transition Time: 2024-10-21T12:53:58Z + Message: Successfully updated PetSets Resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-10-21T12:54:23Z + Message: Successfully Restarted Pods With Resources + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-10-21T12:54:03Z + Message: get pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + Observed Generation: 1 + Status: True + Type: GetPod--druid-cluster-coordinators-0 + Last Transition Time: 2024-10-21T12:54:03Z + Message: evict pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + Observed Generation: 1 + Status: True + Type: EvictPod--druid-cluster-coordinators-0 + Last Transition Time: 2024-10-21T12:54:08Z + Message: check pod running; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--druid-cluster-coordinators-0 + Last Transition Time: 2024-10-21T12:54:13Z + Message: get pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 + Observed Generation: 1 + Status: True + Type: GetPod--druid-cluster-historicals-0 + Last Transition Time: 2024-10-21T12:54:13Z + Message: evict pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 + Observed Generation: 1 + Status: True + Type: EvictPod--druid-cluster-historicals-0 + Last Transition Time: 2024-10-21T12:54:18Z + Message: check pod running; ConditionStatus:True; PodName:druid-cluster-historicals-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--druid-cluster-historicals-0 + Last Transition Time: 2024-10-21T12:54:23Z + Message: Successfully completed the vertical scaling for RabbitMQ + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 67s KubeDB Ops-manager Operator Start processing for DruidOpsRequest: demo/druid-vscale + Normal Starting 67s KubeDB Ops-manager Operator Pausing Druid databse: demo/druid-cluster + Normal Successful 67s KubeDB Ops-manager Operator Successfully paused Druid database: demo/druid-cluster for DruidOpsRequest: druid-vscale + Normal UpdatePetSets 64s KubeDB Ops-manager Operator Successfully updated PetSets Resources + Warning get pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 59s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + Warning evict pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 59s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + Warning check pod running; ConditionStatus:True; PodName:druid-cluster-coordinators-0 54s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-cluster-coordinators-0 + Warning get pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 49s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 + Warning evict pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 49s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:druid-cluster-historicals-0 + Warning check pod running; ConditionStatus:True; PodName:druid-cluster-historicals-0 44s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:druid-cluster-historicals-0 + Normal RestartPods 39s KubeDB Ops-manager Operator Successfully Restarted Pods With Resources + Normal Starting 39s KubeDB Ops-manager Operator Resuming Druid database: demo/druid-cluster + Normal Successful 39s KubeDB Ops-manager Operator Successfully resumed Druid database: demo/druid-cluster for DruidOpsRequest: druid-vscale +``` +Now, we are going to verify from one of the Pod yaml whether the resources of the topology cluster has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get pod -n demo druid-cluster-coordinators-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "600m", + "memory": "1288490188800m" + }, + "requests": { + "cpu": "600m", + "memory": "1288490188800m" + } +} +$ kubectl get pod -n demo druid-cluster-historicals-1 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "600m", + "memory": "1181116006400m" + }, + "requests": { + "cpu": "600m", + "memory": "1181116006400m" + } +} +``` + +The above output verifies that we have successfully scaled up the resources of the Druid topology cluster. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete dr -n demo druid-cluster +kubectl delete druidopsrequest -n demo druid-vscale +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Druid object](/docs/guides/druid/concepts/druid.md). +- Different Druid topology clustering modes [here](/docs/guides/druid/clustering/_index.md). +- Monitor your Druid database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/druid/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Druid database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/druid/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/druid/scaling/vertical-scaling/yamls/druid-cluster.yaml b/docs/guides/druid/scaling/vertical-scaling/yamls/druid-cluster.yaml index 7a89d0dc91..1b37cfe269 100644 --- a/docs/guides/druid/scaling/vertical-scaling/yamls/druid-cluster.yaml +++ b/docs/guides/druid/scaling/vertical-scaling/yamls/druid-cluster.yaml @@ -7,8 +7,8 @@ spec: version: 28.0.1 deepStorage: type: s3 - configSecret: - name: deep-storage-config + configuration: + secretName: deep-storage-config topology: routers: replicas: 1 diff --git a/docs/guides/druid/scaling/vertical-scaling/yamls/druid-cluster.yaml.bak b/docs/guides/druid/scaling/vertical-scaling/yamls/druid-cluster.yaml.bak new file mode 100644 index 0000000000..7a89d0dc91 --- /dev/null +++ b/docs/guides/druid/scaling/vertical-scaling/yamls/druid-cluster.yaml.bak @@ -0,0 +1,15 @@ +apiVersion: kubedb.com/v1alpha2 +kind: Druid +metadata: + name: druid-cluster + namespace: demo +spec: + version: 28.0.1 + deepStorage: + type: s3 + configSecret: + name: deep-storage-config + topology: + routers: + replicas: 1 + deletionPolicy: Delete diff --git a/docs/guides/elasticsearch/concepts/elasticsearch-dashboard/index.md b/docs/guides/elasticsearch/concepts/elasticsearch-dashboard/index.md index 1870d54879..3d95337dd4 100644 --- a/docs/guides/elasticsearch/concepts/elasticsearch-dashboard/index.md +++ b/docs/guides/elasticsearch/concepts/elasticsearch-dashboard/index.md @@ -35,8 +35,8 @@ spec: enableSSL: true authSecret: name: es-cluster-user-cred - configSecret: - name: custom-configuration + configuration: + secretName: custom-configuration databaseRef: name: es-cluster podTemplate: @@ -81,9 +81,9 @@ The k8s secret must be of type: kubernetes.io/basic-auth with the following keys - `password`: Password for the `elastic`/`admin` user. If `spec.authSecret` is not set, dashboard operator will use the authSecret from referred database object. -### spec.configSecret +### spec.configuration -`spec.configSecret` is an optional field that allows users to provide custom configuration for `ElasticsearchDashboard`. It contains a k8s secret name that holds the configuration files for `ElasticsearchDashboard`. If not provided, operator generated configurations will be applied to dashboard. If `configSecret` is provided, it will be merged with the operator-generated configuration. The user-provided configuration has higher precedence over the operator-generated configuration. The configuration file names are used as secret keys. +`spec.configuration` is an optional field that allows users to provide custom configuration for `ElasticsearchDashboard`. It contains a k8s secret name that holds the configuration files for `ElasticsearchDashboard`. If not provided, operator generated configurations will be applied to dashboard. If `configSecret` is provided, it will be merged with the operator-generated configuration. The user-provided configuration has higher precedence over the operator-generated configuration. The configuration file names are used as secret keys. - Kibana: - `kibana.yml` for configuring Kibana diff --git a/docs/guides/elasticsearch/concepts/elasticsearch-dashboard/index.md.bak b/docs/guides/elasticsearch/concepts/elasticsearch-dashboard/index.md.bak new file mode 100644 index 0000000000..f2aac711e0 --- /dev/null +++ b/docs/guides/elasticsearch/concepts/elasticsearch-dashboard/index.md.bak @@ -0,0 +1,137 @@ +--- +title: ElasticsearchDashboard +menu: + docs_{{ .version }}: + identifier: es-dashboard-concepts + name: ElasticsearchDashboard + parent: es-concepts-elasticsearch + weight: 21 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# ElasticsearchDashboard + +## What is ElasticsearchDashboard + +`ElasticsearchDashboard` is a Kubernetes `Custom Resource Definitions` (CRD). It provides a declarative configuration to specify the docker images to be used for Elasticsearch Dashboard (`Kibana`, `Opensearch_Dashboards`) deployed with KubeDB in Kubernetes native way. When you install KubeDB, an `ElasticsearchVersion` custom resource will be created automatically for every supported `ElasticsearchDashboard` version. +Suppose you have a KubeDB-managed [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) provisioned in your cluster. You have to specify the name of `Elasticsearch` CRD in `spec.databaseRef.name` field of `ElasticsearchDashboard` CRD. Then, KubeDB will use the docker images specified in the `ElasticsearchVersion` CRD to create your expected dashboard. + + +## ElasticsearchDashboard Specification + +As with all other Kubernetes objects, an `ElasticsearchDashboard` needs `apiVersion`, `kind`, and `metadata` fields. It also needs a `spec` section. + +```yaml +apiVersion: elasticsearch.kubedb.com/v1alpha1 +kind: ElasticsearchDashboard +metadata: + name: es-cluster-dashboard + namespace: demo +spec: + replicas: 1 + enableSSL: true + authSecret: + name: es-cluster-user-cred + configuration: + secretName: custom-configuration + databaseRef: + name: es-cluster + podTemplate: + spec: + containers: + - name: elasticsearch + resources: + limits: + memory: 1.5Gi + requests: + cpu: 500m + memory: 1.5Gi + serviceTemplates: + - alias: primary + spec: + ports: + - port: 5601 + tls: + certificates: + - alias: database-client + secretName: es-cluster-client-cert + deletionPolicy: WipeOut +``` + + + +### spec.replicas + +`spec.replicas` is an optional field that can be used if `spec.topology` is not specified. This field specifies the number of nodes (ie. pods) in the Elasticsearch cluster. The default value of this field is 1. + +### spec.enableSSL + +`spec.enableSSL` is an `optional` field that specifies whether to enable TLS to HTTP layer. The default value of this field is `false`. Enabling TLS from `ElasticsearchDashboard` CRD ensures secure connectivity with dashboard. In order to enable TLS in HTTP layer, the `spec.enableSSL` field in `elasticsearch` CRD has to be set to `true`. + +### spec.authSecret + +`spec.authSecret` is an `optional` field that points to a k8s secret used to hold the Elasticsearch `elastic`/`admin` user credentials. In order to access elastic search dashboard these credentials will be required. + +The k8s secret must be of type: kubernetes.io/basic-auth with the following keys: + +- `username`: Must be `elastic` for `x-pack`, and `admin` for `OpenSearch`. +- `password`: Password for the `elastic`/`admin` user. + If `spec.authSecret` is not set, dashboard operator will use the authSecret from referred database object. + +### spec.configSecret + +`spec.configSecret` is an optional field that allows users to provide custom configuration for `ElasticsearchDashboard`. It contains a k8s secret name that holds the configuration files for `ElasticsearchDashboard`. If not provided, operator generated configurations will be applied to dashboard. If `configSecret` is provided, it will be merged with the operator-generated configuration. The user-provided configuration has higher precedence over the operator-generated configuration. The configuration file names are used as secret keys. + +- Kibana: + - `kibana.yml` for configuring Kibana + +- Opensearch_dashboards: + - `opensearch_dashboards.yml` for configuring OpenSearch_Dashboards + +### spec.databaseRef + +`spec.databaseRef` specifies the database name to which `ElasticsearchDashboard` is pointing. Referenced Elasticsearch instance must be deployed in the same namespace with dashboard. The dashboard will not become ready until database is ready and accepting connection requests. + +### spec.podTemplate + +KubeDB allows providing a template for database pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the PetSet created for the Elasticsearch database. + +KubeDB accepts the following fields to set in `spec.podTemplate`: + +- metadata + - annotations (pod’s annotation) + +- controller + - annotations (deployment's annotation) + +- spec: + - env + - resources + - initContainers + - imagePullSecrets + - nodeSelector + - serviceAccountName + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + + +### spec.serviceTemplates + +`spec.serviceTemplates` is an optional field that contains a list of the `serviceTemplate`. The templates are identified by the alias. For Dashboard, the only configurable service alias is `primary`. + +### spec.tls + +`spec.tls` specifies the TLS/SSL configurations. User can provide custom TLS certificates using k8s secrets with allowed certificate aliases.`ElasticsearchDashboard` supports certificate with alias `database-client` to securely communicate with elasticsearch, alias `ca` to provide ca certificates and alias `server` for securely communicating with dashboard server. If `spec.tls` is not set the operator generated self-signed certificates will be used for secure connectivity with database and dashboard server. + + +## Next Steps + +- Learn about Elasticsearch CRD [here](/docs/guides/elasticsearch/concepts/elasticsearch/index.md). +- Deploy your first Elasticsearch database with KubeDB by following the guide [here](/docs/guides/elasticsearch/quickstart/overview/elasticsearch/index.md). +- Deploy your first OpenSearch database with KubeDB by following the guide [here](/docs/guides/elasticsearch/quickstart/overview/opensearch/index.md). diff --git a/docs/guides/elasticsearch/concepts/elasticsearch/index.md b/docs/guides/elasticsearch/concepts/elasticsearch/index.md index 240f8f52dc..b8513632b8 100644 --- a/docs/guides/elasticsearch/concepts/elasticsearch/index.md +++ b/docs/guides/elasticsearch/concepts/elasticsearch/index.md @@ -635,14 +635,14 @@ Elasticsearch managed by KubeDB can be monitored with builtin-Prometheus and Pro - [Monitor Elasticsearch with builtin Prometheus](/docs/guides/elasticsearch/monitoring/using-builtin-prometheus.md) - [Monitor Elasticsearch with Prometheus operator](/docs/guides/elasticsearch/monitoring/using-prometheus-operator.md) -### spec.configSecret +### spec.configuration -`spec.configSecret` is an `optional` field that allows users to provide custom configuration for Elasticsearch. It contains a k8s secret name that holds the configuration files for both Elasticsearch and the security plugins (ie. x-pack, SearchGuard, and openDistro). +`spec.configuration` is an `optional` field that allows users to provide custom configuration for Elasticsearch. It contains a k8s secret name that holds the configuration files for both Elasticsearch and the security plugins (ie. x-pack, SearchGuard, and openDistro). ```yaml spec: - configSecret: - name: es-custom-config + configuration: + secretName: es-custom-config ``` The configuration file names are used as secret keys. diff --git a/docs/guides/elasticsearch/concepts/elasticsearch/index.md.bak b/docs/guides/elasticsearch/concepts/elasticsearch/index.md.bak new file mode 100644 index 0000000000..b48ac6fee7 --- /dev/null +++ b/docs/guides/elasticsearch/concepts/elasticsearch/index.md.bak @@ -0,0 +1,917 @@ +--- +title: Elasticsearch CRD +menu: + docs_{{ .version }}: + identifier: es-elasticsearch-concepts + name: Elasticsearch + parent: es-concepts-elasticsearch + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Elasticsearch + +## What is Elasticsearch + +`Elasticsearch` is a Kubernetes `Custom Resource Definitions` (CRD). It provides a declarative configuration for [Elasticsearch](https://www.elastic.co/products/elasticsearch) and [OpenSearch](https://opensearch.org/) in a Kubernetes native way. You only need to describe the desired database configuration in an Elasticsearch object, and the KubeDB operator will create Kubernetes objects in the desired state for you. + +## Elasticsearch Spec + +As with all other Kubernetes objects, an Elasticsearch needs `apiVersion`, `kind`, and `metadata` fields. It also needs a `.spec` section. Below is an example Elasticsearch object. + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: myes + namespace: demo +spec: + autoOps: + disabled: true + authSecret: + kind: Secret + name: es-admin-cred + externallyManaged: false + configuration: + secretName: es-custom-config + enableSSL: true + internalUsers: + metrics_exporter: {} + rolesMapping: + SGS_READALL_AND_MONITOR: + users: + - metrics_exporter + kernelSettings: + privileged: true + sysctls: + - name: vm.max_map_count + value: "262144" + maxUnavailable: 1 + monitor: + agent: prometheus.io + prometheus: + exporter: + port: 56790 + podTemplate: + controller: + annotations: + passTo: petSets + metadata: + annotations: + passTo: pods + spec: + nodeSelector: + kubernetes.io/os: linux + containers: + - name: elasticsearch + env: + - name: node.processors + value: "2" + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 500m + memory: 512Mi + serviceAccountName: es + replicas: 3 + serviceTemplates: + - alias: primary + metadata: + annotations: + passTo: service + spec: + type: NodePort + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: es-issuer + certificates: + - alias: transport + privateKey: + encoding: PKCS8 + secretName: es-transport-cert + subject: + organizations: + - kubedb + - alias: http + privateKey: + encoding: PKCS8 + secretName: es-http-cert + subject: + organizations: + - kubedb + - alias: admin + privateKey: + encoding: PKCS8 + secretName: es-admin-cert + subject: + organizations: + - kubedb + - alias: metrics-exporter + privateKey: + encoding: PKCS8 + secretName: es-metrics-exporter-cert + subject: + organizations: + - kubedb + healthChecker: + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 2 + disableWriteCheck: false + version: xpack-8.11.1 +``` +### spec.autoOps +AutoOps is an optional field to control the generation of versionUpdate & TLS-related recommendations. + +### spec.version +`spec.version` is a `required` field that specifies the name of the [ElasticsearchVersion](/docs/guides/elasticsearch/concepts/catalog/index.md) CRD where the docker images are specified. + +- Name format: `{Security Plugin Name}-{Application Version}-{Modification Tag}` + +- Samples: `xpack-8.2.3`, `xpack-8.11.1`, `opensearch-1.3.0`, etc. + +```yaml +spec: + version: xpack-8.11.1 +``` + +### spec.kernelSettings + +`spec.kernelSettings` is an `optional` field that is used to configure the k8s-cluster node's kernel settings. It let users to perform `sysctl -w key=value` commands to the node's kernel. These commands are performed from an `initContainer`. If any of those commands require `privileged` access, you need to set the `kernelSettings.privileged` to `true` resulting in the `initContainer` running in `privileged` mode. + +```yaml +spec: + kernelSettings: + privileged: true + sysctls: + - name: vm.max_map_count + value: "262144" +``` + +To disable the kernetSetting `initContainer`, set the `kernelSettings.disableDefaults` to `true` . + +```yaml +spec: + kernelSettings: + disableDefaults: true +``` + +> Note: Make sure that `vm.max_map_count` is greater or equal to `262144`, otherwise the Elasticsearch may fail to bootstrap. + + +### spec.disableSecurity + +`spec.disableSecurity` is an `optional` field that allows a user to run the Elasticsearch with the security plugin `disabled`. Default to `false`. + +```yaml +spec: + disableSecurity: true +``` + +### spec.internalUsers + +`spec.internalUsers` provides an alternative way to configure the existing internal users or create new users without using the `internal_users.yml` file. This field expects the input format to be in the `map[username]ElasticsearchUserSpec` format. The KubeDB operator creates and synchronizes secure passwords for those users and stores in k8s secrets. The k8s secret names are formed by the following format: `{Elasticsearch Instance Name}-{Username}-cred`. + +The `ElasticsearchUserSpec` contains the following fields: +- `hash` ( `string` | `""` ) - Specifies the hash of the password. +- `full_name` ( `string` | `""` ) - Specifies The full name of the user. Only applicable for xpack authplugin. +- `metadata` ( `map[string]string` | `""` ) - Specifies Arbitrary metadata that you want to associate with the user. Only applicable for xpack authplugin. +- `secretName` ( `string` | `""` ) - Specifies the k8s secret name that holds the user credentials. Defaults to "--cred". +- `roles` ( `[]string` | `nil` ) - A set of roles the user has. The roles determine the user’s access permissions. To create a user without any roles, specify an empty list: []. Only applicable for xpack authplugin. +- `email` ( `string` | `""` ) - Specifies the email of the user. Only applicable for xpack authplugin. +- `reserved` ( `bool` | `false` ) - specifies the reserved status. The resources that have this set to `true` cannot be changed using the REST API or Kibana. +- `hidden` ( `bool` | `false` ) - specifies the hidden status. The resources that have this set to true are not returned by the REST API and not visible in Kibana. +- `backendRoles` (`[]string` | `nil`) - specifies a list of backend roles assigned to this user. The backend roles can come from the internal user database, LDAP groups, JSON web token claims, or SAML assertions. +- `searchGuardRoles` ( `[]string` | `nil` ) - specifies a list of SearchGuard security plugin roles assigned to this user. +- `opendistroSecurityRoles` ( `[]string` | `nil` ) - specifies a list of opendistro security plugin roles assigned to this user. +- `attributes` ( `map[string]string` | `nil` )- specifies one or more custom attributes which can be used in index names and DLS queries. +- `description` ( `string` | `""` ) - specifies the description of the user. + +Here's how `.spec.internalUsers` can be configured for `searchguard` or `opendistro` auth plugins. + +```yaml +spec: + internalUsers: + # update the attribute of default kibanaro user + kibanaro: + attributes: + attribute1: "value-a" + attribute2: "value-b" + attribute3: "value-c" + # update the desciption of snapshotrestore user + snapshotrestore: + description: "This is the new description" + # Create a new readall user + custom_readall_user: + backend_roles: + - "readall" + description: "Custom readall user" +``` + +Here's how `.spec.internalUsers` can be configured for `xpack` auth plugins. + +```yaml +spec: + internalUsers: + apm_system: + backendRoles: + - apm_system + secretName: es-cluster-apm-system-cred + beats_system: + backendRoles: + - beats_system + secretName: es-cluster-beats-system-cred + elastic: + backendRoles: + - superuser + secretName: es-cluster-elastic-cred + kibana_system: + backendRoles: + - kibana_system + secretName: es-cluster-kibana-system-cred + logstash_system: + backendRoles: + - logstash_system + secretName: es-cluster-logstash-system-cred + remote_monitoring_user: + backendRoles: + - remote_monitoring_collector + - remote_monitoring_agent + secretName: es-cluster-remote-monitoring-user-cred +``` +**ElasticStack:** + +Default Users: [Official Docs](https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html) + +- `elastic` - Has direct read-only access to restricted indices, such as .security. This user also has the ability to manage security and create roles with unlimited privileges +- `kibana_system` - The user Kibana uses to connect and communicate with Elasticsearch. +- `logstash_system` - The user Logstash uses when storing monitoring information in Elasticsearch. +- `beats_system` - The user the Beats use when storing monitoring information in Elasticsearch. +- `apm_system` - The user the APM server uses when storing monitoring information in Elasticsearch. +- `remote_monitoring_user` - The user Metricbeat uses when collecting and storing monitoring information in Elasticsearch. It has the remote_monitoring_agent and remote_monitoring_collector built-in roles. + +**SearchGuard:** + +Default Users: [Official Docs](https://docs.search-guard.com/latest/demo-users-roles) + +- `admin` - Full access to the cluster and all indices. +- `kibanaserver` - Has all permissions on the `.kibana` index. +- `kibanaro` - Has `SGS_READ` access to all indices and all permissions on the `.kibana` index. +- `logstash` - Has `SGS_CRUD` and `SGS_CREATE_INDEX` permissions on all logstash and beats indices. +- `readall` - Has read access to all indices. +- `snapshotrestore` - Has permissions to perform snapshot and restore operations. + +**OpenDistro:** + +Default Users: [Official Docs](https://opendistro.github.io/for-elasticsearch-docs/docs/security/access-control/users-roles/) + +- `admin` - Grants full access to the cluster: all cluster-wide operations, write to all indices, write to all tenants. +- `kibanaserver` - Has all permissions on the `.kibana` index +- `kibanaro` - Grants permissions to use Kibana: cluster-wide searches, index monitoring, and write to various Kibana indices. +- `logstash` - Grants permissions for Logstash to interact with the cluster: cluster-wide searches, cluster monitoring, and write to the various Logstash indices. +- `readall` - Grants permissions for cluster-wide searches like msearch and search permissions for all indices. +- `snapshotrestore` - Grants permissions to manage snapshot repositories, take snapshots, and restore snapshots. + +### spec.rolesMapping + +`spec.rolesMapping` provides an alternative way to map backend roles, hosts and users to roles without using the `roles_mapping.yml` file. Only works with `SearchGurad` and `OpenDistro` security plugins. This field expects the input format to be in the `map[rolename]RoleSpec` format. + +The `RoleSpec` contains the following fields: + +- `reserved` ( `bool` | `false` ) - specifies the reserved status. The resources that have this set to `true`, cannot be changed using the REST API or Kibana. +- `hidden` ( `bool` | `false` ) - specifies the hidden status. The resources that have this field set to `true` are not returned by the REST API and not visible in Kibana. +- `backendRoles` ( `[]string` | `nil` )- specifies a list of backend roles assigned to this role. The backend roles can come from the internal user database, LDAP groups, JSON web token-claims or SAML assertions. +- `hosts` ( `[]string` | `nil` ) - specifies a list of hosts assigned to this role. +- `users` ( `[]string` | `nil` ) - specifies a list of users assigned to this role. +- ` + +```yaml +spec: + rolesMapping: + # create role mapping for the custom readall user + readall: + users: + - custom_readall_user +``` + +For the default roles visit the [SearchGurad docs](https://docs.search-guard.com/latest/roles-permissions), [OpenDistro docs](https://opendistro.github.io/for-elasticsearch-docs/docs/security/access-control/users-roles/#create-roles). + +### spec.topology + +`spec.topology` is an `optional` field that provides a way to configure different types of nodes for the Elasticsearch cluster. This field enables you to specify how many nodes you want to act as `master`, `data`, `ingest` or other node roles for Elasticsearch. You can also specify how much storage and resources to allocate for each type of node independently. + +Currently supported node types are - +- **data**: Data nodes hold the shards that contain the documents you have indexed. Data nodes handle data related operations like CRUD, search, and aggregations +- **ingest**: Ingest nodes can execute pre-processing pipelines, composed of one or more ingest processors +- **master**: The master node is responsible for lightweight cluster-wide actions such as creating or deleting an index, tracking which nodes are part of the cluster, and deciding which shards to allocate to which nodes. It is important for cluster health to have a stable master node. +- **dataHot**: Hot data nodes are part of the hot tier. The hot tier is the Elasticsearch entry point for time series data and holds your most-recent, most-frequently-searched time series data. +- **dataWarm**: Warm data nodes are part of the warm tier. Time series data can move to the warm tier once it is being queried less frequently than the recently-indexed data in the hot tier. +- **dataCold**: Cold data nodes are part of the cold tier. When you no longer need to search time series data regularly, it can move from the warm tier to the cold tier. +- **dataFrozen**: Frozen data nodes are part of the frozen tier. Once data is no longer being queried, or being queried rarely, it may move from the cold tier to the frozen tier where it stays for the rest of its life. +- **dataContent**: Content data nodes are part of the content tier. Data stored in the content tier is generally a collection of items such as a product catalog or article archive. Unlike time series data, the value of the content remains relatively constant over time, so it doesn’t make sense to move it to a tier with different performance characteristics as it ages. +- **ml**: Machine learning nodes run jobs and handle machine learning API requests. +- **transform**: Transform nodes run transforms and handle transform API requests. +- **coordinating**: The coordinating node forwards the request to the data nodes which hold the data. + +```yaml + topology: + data: + maxUnavailable: 1 + replicas: 3 + podTemplate: + spec: + containers: + - name: "elasticsearch" + resources: + requests: + cpu: "500m" + limits: + cpu: "600m" + memory: "1.5Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + suffix: data + ingest: + maxUnavailable: 1 + replicas: 3 + podTemplate: + spec: + containers: + - name: "elasticsearch" + resources: + requests: + cpu: "500m" + limits: + cpu: "600m" + memory: "1.5Gi" + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + suffix: ingest + master: + maxUnavailable: 1 + replicas: 2 + podTemplate: + spec: + containers: + - name: "elasticsearch" + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 500m + memory: 1Gi + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + suffix: master +``` + +The `spec.topology` contains the following fields: + +- `topology.master`: + - `replicas` (`: "1"`) - is an `optional` field to specify the number of nodes (ie. pods ) that act as the `master` nodes. Defaults to `1`. + - `suffix` (`: "master"`) - is an `optional` field that is added as the suffix of the master PetSet name. Defaults to `master`. + - `storage` is a `required` field that specifies how much storage to claim for each of the `master` nodes. + - `resources` (`: "cpu: 500m, memory: 1Gi" `) - is an `optional` field that specifies how much computational resources to request or to limit for each of the `master` nodes. + - `maxUnavailable` is an `optional` field that specifies the exact number of master nodes (ie. pods) that can be safely evicted before the pod disruption budget (PDB) kicks in. KubeDB uses Pod Disruption Budget to ensure that desired number of replicas are available during [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions) so that no data loss occurs. + +- `topology.data`: + - `replicas` (`: "1"`) - is an `optional` field to specify the number of nodes (ie. pods ) that act as the `data` nodes. Defaults to `1`. + - `suffix` (`: "data"`) - is an `optional` field that is added as the suffix of the data PetSet name. Defaults to `data`. + - `storage` is a `required` field that specifies how much storage to claim for each of the `data` nodes. + - `resources` (` cpu: 500m, memory: 1Gi `) - is an `optional` field that specifies which amount of computational resources to request or to limit for each of the `data` nodes. + - `maxUnavailable` is an `optional` field that specifies the exact number of data nodes (ie. pods) that can be safely evicted before the pod disruption budget (PDB) kicks in. KubeDB uses Pod Disruption Budget to ensure that desired number of replicas are available during [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions) so that no data loss occurs. + +- `topology.ingest`: + - `replicas` (`: "1"`) - is an `optional` field to specify the number of nodes (ie. pods ) that act as the `ingest` nodes. Defaults to `1`. + - `suffix` (`: "ingest"`) - is an `optional` field that is added as the suffix of the data PetSet name. Defaults to `ingest`. + - `storage` is a `required` field that specifies how much storage to claim for each of the `ingest` nodes. + - `resources` (` cpu: 500m, memory: 1Gi `) - is an `optional` field that specifies which amount of computational resources to request or to limit for each of the `data` nodes. + - `maxUnavailable` is an `optional` field that specifies the exact number of ingest nodes (ie. pods) that can be safely evicted before the pod disruption budget (PDB) kicks in. KubeDB uses Pod Disruption Budget to ensure that desired number of replicas are available during [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions) so that no data loss is occurs. + +> Note: Any two types of nodes can't have the same `suffix`. + +If you specify `spec.topology` field then you **do not need** to specify the following fields in Elasticsearch CRD. + +- `spec.replicas` +- `spec.storage` +- `spec.podTemplate.spec.resources` + +If you do not specify `spec.topology` field, the Elasticsearch Cluster runs in combined mode. + +> Combined Mode: all nodes of the Elasticsearch cluster will work as `master`, `data` and `ingest` nodes simultaneously. + +### spec.replicas + +`spec.replicas` is an `optional` field that can be used if `spec.topology` is not specified. This field specifies the number of nodes (ie. pods) in the Elasticsearch cluster. The default value of this field is `1`. + +```yaml +spec: + replicas: 3 +``` + +### spec.maxUnavailable + +`spec.maxUnavailable` is an `optional` field that is used to specify the exact number of cluster replicas that can be safely evicted before the pod disruption budget kicks in to prevent unwanted data loss. + +```yaml +spec: + maxUnavailable: 1 +``` + +### spec.enableSSL + +`spec.enableSSL` is an `optional` field that specifies whether to enable TLS to HTTP layer. The default value of this field is `false`. + +```yaml +spec: + enableSSL: true +``` + +> Note: The `transport` layer of an Elasticsearch cluster is always secured with certificates. If you want to disable it, you need to disable the security plugin by setting the `spec.disableSecurity` to `true`. + +### spec.tls + +`spec.tls` specifies the TLS/SSL configurations. The KubeDB operator supports TLS management by using the [cert-manager](https://cert-manager.io/). Currently, the operator only supports the `PKCS#8` encoded certificates. + +```yaml +spec: + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: es-issuer + certificates: + - alias: transport + privateKey: + encoding: PKCS8 + secretName: es-transport-cert + subject: + organizations: + - kubedb + - alias: http + privateKey: + encoding: PKCS8 + secretName: es-http-cert + subject: + organizations: + - kubedb +``` + +The `spec.tls` contains the following fields: + +- `tls.issuerRef` - is an `optional` field that references to the `Issuer` or `ClusterIssuer` custom resource object of [cert-manager](https://cert-manager.io/docs/concepts/issuer/). It is used to generate the necessary certificate secrets for Elasticsearch. If the `issuerRef` is not specified, the operator creates a self-signed CA and also creates necessary certificate (valid: 365 days) secrets using that CA. + - `apiGroup` - is the group name of the resource that is being referenced. Currently, the only supported value is `cert-manager.io`. + - `kind` - is the type of resource that is being referenced. The supported values are `Issuer` and `ClusterIssuer`. + - `name` - is the name of the resource ( `Issuer` or `ClusterIssuer` ) that is being referenced. + +- `tls.certificates` - is an `optional` field that specifies a list of certificate configurations used to configure the certificates. It has the following fields: + - `alias` - represents the identifier of the certificate. It has the following possible value: + - `transport` - is used for the transport layer certificate configuration. + - `http` - is used for the HTTP layer certificate configuration. + - `admin` - is used for the admin certificate configuration. Available for the `SearchGuard` and the `OpenDistro` auth-plugins. + - `metrics-exporter` - is used for the metrics-exporter sidecar certificate configuration. + + - `secretName` - ( `string` | `"-alias-cert"` ) - specifies the k8s secret name that holds the certificates. + + - `subject` - specifies an `X.509` distinguished name (DN). It has the following configurable fields: + - `organizations` ( `[]string` | `nil` ) - is a list of organization names. + - `organizationalUnits` ( `[]string` | `nil` ) - is a list of organization unit names. + - `countries` ( `[]string` | `nil` ) - is a list of country names (ie. Country Codes). + - `localities` ( `[]string` | `nil` ) - is a list of locality names. + - `provinces` ( `[]string` | `nil` ) - is a list of province names. + - `streetAddresses` ( `[]string` | `nil` ) - is a list of street addresses. + - `postalCodes` ( `[]string` | `nil` ) - is a list of postal codes. + - `serialNumber` ( `string` | `""` ) is a serial number. + + For more details, visit [here](https://golang.org/pkg/crypto/x509/pkix/#Name). + + - `duration` ( `string` | `""` ) - is the period during which the certificate is valid. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300m"`, `"1.5h"` or `"20h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + - `renewBefore` ( `string` | `""` ) - is a specifiable time before expiration duration. + - `dnsNames` ( `[]string` | `nil` ) - is a list of subject alt names. + - `ipAddresses` ( `[]string` | `nil` ) - is a list of IP addresses. + - `uris` ( `[]string` | `nil` ) - is a list of URI Subject Alternative Names. + - `emailAddresses` ( `[]string` | `nil` ) - is a list of email Subject Alternative Names. + +### spec.authSecret + +`spec.authSecret` is an `optional` field that points to a k8s secret used to hold the Elasticsearch `elastic`/`admin` user credentials. + +```yaml +spec: + authSecret: + name: es-admin-cred +``` + +The k8s secret must be of `type: kubernetes.io/basic-auth` with the following keys: + +- `username`: Must be `elastic` for x-pack, or `admin` for searchGuard and OpenDistro. +- `password`: Password for the `elastic`/`admin` user. + +If not set, the KubeDB operator creates a new Secret `{Elasticsearch name}-{UserName}-cred` with randomly generated secured credentials. + +We can use this field in 3 mode. +1. Using an external secret. In this case, You need to create an auth secret first with required fields, then specify the secret name when creating the Elasticsearch object using `spec.authSecret.name` & set `spec.authSecret.externallyManaged` to true. +```yaml +authSecret: + name: + externallyManaged: true +``` + +2. Specifying the secret name only. In this case, You need to specify the secret name when creating the Elasticsearch object using `spec.authSecret.name`. `externallyManaged` is by default false. +```yaml +authSecret: + name: +``` + +3. Let KubeDB do everything for you. In this case, no work for you. + +AuthSecret contains a `user` key and a `password` key which contains the `username` and `password` respectively for `elastic` superuser. + +Example: + +```bash +$ kubectl create secret generic elastic-auth -n demo \ +--from-literal=username=jhon-doe \ +--from-literal=password=6q8u_2jMOW-OOZXk +secret "elastic-auth" created +``` + +```yaml +apiVersion: v1 +data: + password: NnE4dV8yak1PVy1PT1pYaw== + username: amhvbi1kb2U= +kind: Secret +metadata: + name: elastic-auth + namespace: demo +type: Opaque +``` + +Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). + +### spec.storageType + +`spec.storageType` is an `optional` field that specifies the type of storage to use for the database. It can be either `Durable` or `Ephemeral`. The default value of this field is `Durable`. If `Ephemeral` is used then KubeDB will create Elasticsearch database using [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) volume. In this case, you don't have to specify `spec.storage` field. + +```yaml +spec: + storageType: Durable +``` + +### spec.storage + +If the `spec.storageType` is not set to `Ephemeral` and if the `spec.topology` field also is not set then `spec.storage` field is `required`. This field specifies the StorageClass of the PVCs dynamically allocated to store data for the database. This storage spec will be passed to the PetSet created by the KubeDB operator to run database pods. You can specify any StorageClass available in your cluster with appropriate resource requests. + +```yaml +spec: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard +``` + +- `storage.storageClassName` - is the name of the StorageClass used to provision the PVCs. The PVCs don’t necessarily have to request a class. A PVC with the storageClassName set equal to "" is always interpreted to be requesting a PV with no class, so it can only be bound to PVs with no class (no annotation or one set equal to ""). A PVC with no storageClassName is not quite the same and is treated differently by the cluster depending on whether the DefaultStorageClass admission plugin is turned on. +- `storage.accessModes` uses the same conventions as Kubernetes PVCs when requesting storage with specific access modes. +- `spec.storage.resources` can be used to request specific quantities of storage. This follows the same resource model used by PVCs. + +To learn how to configure `spec.storage`, please visit the links below: + +- https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + +### spec.init + +`spec.init` is an `optional` section that can be used to initialize a newly created Elasticsearch cluster from prior snapshots, taken by [Stash](/docs/guides/elasticsearch/backup/stash/overview/index.md). + +```yaml +spec: + init: + waitForInitialRestore: true +``` + +When the `waitForInitialRestore` is set to true, the Elasticsearch instance will be stack in the `Provisioning` state until the initial backup is completed. On completion of the very first restore operation, the Elasticsearch instance will go to the `Ready` state. + +For detailed tutorial on how to initialize Elasticsearch from Stash backup, please visit [here](/docs/guides/elasticsearch/backup/stash/overview/index.md). + +### spec.monitor + +Elasticsearch managed by KubeDB can be monitored with builtin-Prometheus and Prometheus operator out-of-the-box. To learn more, + +- [Monitor Elasticsearch with builtin Prometheus](/docs/guides/elasticsearch/monitoring/using-builtin-prometheus.md) +- [Monitor Elasticsearch with Prometheus operator](/docs/guides/elasticsearch/monitoring/using-prometheus-operator.md) + +### spec.configSecret + +`spec.configSecret` is an `optional` field that allows users to provide custom configuration for Elasticsearch. It contains a k8s secret name that holds the configuration files for both Elasticsearch and the security plugins (ie. x-pack, SearchGuard, and openDistro). + +```yaml +spec: + configuration: + secretName: es-custom-config +``` + +The configuration file names are used as secret keys. + +**Elasticsearch:** + +- `elasticsearch.yml` - for configuring Elasticsearch +- `jvm.options` - for configuring Elasticsearch JVM settings +- `log4j2.properties` - for configuring Elasticsearch logging + +**X-Pack:** + +- `roles.yml` - define roles and the associated permissions. +- `role_mapping.yml` - define which roles should be assigned to each user based on their username, groups, or other metadata. + +**SearchGuard:** + +- `sg_config.yml` - configure authenticators and authorization backends. +- `sg_roles.yml` - define roles and the associated permissions. +- `sg_roles_mapping.yml` - map backend roles, hosts, and users to roles. +- `sg_internal_users.yml` - stores users, and hashed passwords in the internal user database. +- `sg_action_groups.yml` - define named permission groups. +- `sg_tenants.yml` - defines tenants for configuring the Kibana access. +- `sg_blocks.yml` - defines blocked users and IP addresses. + +**OpenDistro:** + +- `internal_users.yml` - contains any initial users that you want to add to the security plugin’s internal user database. +- `roles.yml` - contains any initial roles that you want to add to the security plugin. +- `roles_mapping.yml` - maps backend roles, hosts and users to roles. +- `action_groups.yml` - contains any initial action groups that you want to add to the security plugin. +- `tenants.yml` - contains the tenant configurations. +- `nodes_dn.yml` - contains nodesDN mapping name and corresponding values. + +**How the resultant configuration files are generated?** + +- `YML`: The default configuration file pre-stored at config directories is overwritten by the operator-generated configuration file (if any). Then the resultant configuration file is overwritten by the user-provided custom configuration file (if any). The [yq](https://github.com/mikefarah/yq) tool is used to merge two YAML files. + + ```bash + $ yq merge -i --overwrite file1.yml file2.yml + ``` + +- `Non-YML`: The default configuration file is replaced by the operator-generated one (if any). Then the resultant configuration file is replaced by the user-provided custom configuration file (if any). + + ```bash + $ cp -f file2 file1 + ``` + +**How to provide node-role specific configurations?** + +If an Elasticsearch cluster is running in the topology mode (ie. `spec.topology` is set), a user may want to provide node-role specific configurations, say configurations that will only be merged to `master` nodes. To achieve this, users need to add the node role as a prefix to the file name. + +- Format: `-.extension` +- Samples: + - `data-elasticsearch.yml`: Only applied to `data` nodes. + - `master-jvm.options`: Only applied to `master` nodes. + - `ingest-log4j2.properties`: Only applied to `ingest` nodes. + +**How to provide additional files that is referenced from the configurations?** + +All these files provided via `configSecret` is stored in each Elasticsearch node (i.e. pod) at `ES_CONFIG_DIR/custom_config/` ( i.e. `/usr/share/elasticsearch/config/custom_config/`) directory. So, user can use this path while configuring the Elasticsearch. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: es-custom-config + namespace: demo +stringData: + elasticsearch.yml: |- + logger.org.elasticsearch.discovery: DEBUG +``` + +### spec.podTemplate + +KubeDB allows providing a template for database pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the PetSet created for Elasticsearch database. + +KubeDB accept following fields to set in `spec.podTemplate:` + +- metadata: + - annotations (pod's annotation) + - labels (pod's labels) +- controller: + - annotations (petset's annotation) + - labels (petset's labels) +- spec: + - containers + - volumes + - podPlacementPolicy + - initContainers + - imagePullSecrets + - nodeSelector + - serviceAccountName + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + +You can check out the full list [here](https://github.com/kmodules/offshoot-api/blob/master/api/v2/types.go#L26C1-L279C1). + +Uses of some fields of `spec.podTemplate` are described below, + + + +#### spec.podTemplate.spec.tolerations + +The `spec.podTemplate.spec.tolerations` is an optional field. This can be used to specify the pod's tolerations. + +#### spec.podTemplate.spec.volumes + +The `spec.podTemplate.spec.volumes` is an optional field. This can be used to provide the list of volumes that can be mounted by containers belonging to the pod. + +#### spec.podTemplate.spec.podPlacementPolicy + +`spec.podTemplate.spec.podPlacementPolicy` is an optional field. This can be used to provide the reference of the `podPlacementPolicy`. `name` of the podPlacementPolicy is referred under this attribute. This will be used by our Petset controller to place the db pods throughout the region, zone & nodes according to the policy. It utilizes kubernetes affinity & podTopologySpreadContraints feature to do so. +```yaml +spec: + podPlacementPolicy: + name: default +``` + +#### spec.podTemplate.spec.imagePullSecrets + +`spec.podTemplate.spec.imagePullSecrets` is an optional field that points to secrets to be used for pulling docker image when you are using a private docker registry. For more details on how to use private docker registry, please visit [here](/docs/guides/elasticsearch/private-registry/using-private-registry.md). + +#### spec.podTemplate.spec.nodeSelector + +`spec.podTemplate.spec.nodeSelector` is an `optional` field that specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). To learn more, see [here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) . + +#### spec.podTemplate.spec.serviceAccountName + +`serviceAccountName` is an optional field supported by KubeDB Operator (version 0.13.0 and higher) that can be used to specify a custom service account to fine-tune role-based access control. + +If this field is left empty, the KubeDB operator will create a service account name matching the Elasticsearch instance name. Role and RoleBinding that provide necessary access permissions will also be generated automatically for this service account. + +If a service account name is given, but there's no existing service account by that name, the KubeDB operator will create one, and Role and RoleBinding that provide necessary access permissions will also be generated for this service account. + +If a service account name is given, and there's an existing service account by that name, the KubeDB operator will use that existing service account. Since this service account is not managed by KubeDB, users are responsible for providing necessary access permissions manually. Follow the guide [here](/docs/guides/elasticsearch/custom-rbac/using-custom-rbac.md) to grant necessary permissions in this scenario. + +```yaml +spec: + podTemplate: + spec: + serviceAccountName: es +``` + +#### spec.podTemplate.spec.containers + +The `spec.podTemplate.spec.containers` can be used to provide the list containers and their configurations for to the database pod. some of the fields are described below, + +##### spec.podTemplate.spec.containers[].name +The `spec.podTemplate.spec.containers[].name` field used to specify the name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + +##### spec.podTemplate.spec.containers[].args +`spec.podTemplate.spec.containers[].args` is an optional field. This can be used to provide additional arguments to database installation. + +##### spec.podTemplate.spec.containers[].env + +`spec.podTemplate.spec.env` is an `optional` field that specifies the environment variables to pass to the Elasticsearch Containers. + +You are not allowed to pass the following `env`: +- `node.name` +- `node.ingest` +- `node.master` +- `node.data` + + +```ini +Error from server (Forbidden): error when creating "./elasticsearch.yaml": admission webhook "elasticsearch.validators.kubedb.com" denied the request: environment variable node.name is forbidden to use in Elasticsearch spec +``` + +##### spec.podTemplate.spec.containers[].resources + +`spec.podTemplate.spec.containers[].resources` is an `optional` field. then it can be used to request or limit computational resources required by the database pods. To learn more, visit [here](http://kubernetes.io/docs/user-guide/compute-resources/). + +```yaml +spec: + podTemplate: + spec: + containers: + - name: "elasticsearch" + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 500m + memory: 1Gi +``` + + + +### spec.serviceTemplates + +`spec.serviceTemplates` is an `optional` field that contains a list of the serviceTemplate. The templates are identified by the `alias`. For Elasticsearch, the configurable services' `alias` are `primary` and `stats`. + +You can also provide template for the services created by KubeDB operator for Elasticsearch database through `spec.serviceTemplates`. This will allow you to set the type and other properties of the services. + +KubeDB allows following fields to set in `spec.serviceTemplates`: +- metadata: + - labels + - annotations +- spec: + - type + - ports + - clusterIP + - externalIPs + - loadBalancerIP + - loadBalancerSourceRanges + - externalTrafficPolicy + - healthCheckNodePort + - sessionAffinityConfig + +See [here](https://github.com/kmodules/offshoot-api/blob/kubernetes-1.21.1/api/v1/types.go#L237) to understand these fields in detail. + +```yaml +spec: + serviceTemplates: + - alias: primary + metadata: + annotations: + passTo: service + spec: + type: NodePort + - alias: stats + # stats service configurations +``` + +See [here](https://github.com/kmodules/offshoot-api/blob/kubernetes-1.18.9/api/v1/types.go#L192) to understand these fields in detail. + +### spec.deletionPolicy + +`deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `Elasticsearch` CRD or which resources KubeDB should keep or delete when you delete `Elasticsearch` CRD. The KubeDB operator provides the following termination policies: + +- DoNotTerminate +- Halt +- Delete (`Default`) +- WipeOut + +When `deletionPolicy` is `DoNotTerminate`, KubeDB takes advantage of `ValidationWebhook` feature in Kubernetes v1.9+ to provide safety from accidental deletion of the database. If admission webhook is enabled, KubeDB prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. + +Following table show what KubeDB does when you delete Elasticsearch CRD for different termination policies, + +| Behavior | DoNotTerminate | Halt | Delete | WipeOut | +| ----------------------------------- | :------------: | :------: | :------: | :------: | +| 1. Block Delete operation | ✓ | ✗ | ✗ | ✗ | +| 2. Delete PetSet | ✗ | ✓ | ✓ | ✓ | +| 3. Delete Services | ✗ | ✓ | ✓ | ✓ | +| 4. Delete TLS Credential Secrets | ✗ | ✓ | ✓ | ✓ | +| 5. Delete PVCs | ✗ | ✗ | ✓ | ✓ | +| 6. Delete User Credential Secrets | ✗ | ✗ | ✗ | ✓ | + + +If the `spec.deletionPolicy` is not specified, the KubeDB operator defaults it to `Delete`. + +> For more details you can visit [here](https://appscode.com/blog/post/deletion-policy/) + +## spec.healthChecker +It defines the attributes for the health checker. +- `spec.healthChecker.periodSeconds` specifies how often to perform the health check. +- `spec.healthChecker.timeoutSeconds` specifies the number of seconds after which the probe times out. +- `spec.healthChecker.failureThreshold` specifies minimum consecutive failures for the healthChecker to be considered failed. +- `spec.healthChecker.disableWriteCheck` specifies whether to disable the writeCheck or not. + +Know details about KubeDB Health checking from this [blog post](https://appscode.com/blog/post/kubedb-health-checker/). + +## Next Steps + +- Learn how to use KubeDB to run an Elasticsearch database [here](/docs/guides/elasticsearch/README.md). +- Learn how to use ElasticsearchOpsRequest [here](/docs/guides/elasticsearch/concepts/elasticsearch-ops-request/index.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/elasticsearch/configuration/combined-cluster/index.md b/docs/guides/elasticsearch/configuration/combined-cluster/index.md index 253aaaa124..65873f13d1 100644 --- a/docs/guides/elasticsearch/configuration/combined-cluster/index.md +++ b/docs/guides/elasticsearch/configuration/combined-cluster/index.md @@ -104,8 +104,8 @@ spec: version: xpack-8.11.1 enableSSL: true replicas: 3 - configSecret: - name: es-custom-config # mentioned here! + configuration: + secretName: es-custom-config # mentioned here! storageType: Durable storage: storageClassName: "standard" diff --git a/docs/guides/elasticsearch/configuration/combined-cluster/index.md.bak b/docs/guides/elasticsearch/configuration/combined-cluster/index.md.bak new file mode 100644 index 0000000000..253aaaa124 --- /dev/null +++ b/docs/guides/elasticsearch/configuration/combined-cluster/index.md.bak @@ -0,0 +1,514 @@ +--- +title: Configuring Elasticsearch Combined Cluster +menu: + docs_{{ .version }}: + identifier: es-configuration-combined-cluster + name: Combined Cluster + parent: es-configuration + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Configure Elasticsearch Combined Cluster + +In Elasticsearch combined cluster, every node can perform as master, data, and ingest nodes simultaneously. In this tutorial, we will see how to configure a combined cluster. + +## Before You Begin + +At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +Now, install the KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create namespace demo +namespace/demo created + +$ kubectl get namespace +NAME STATUS AGE +demo Active 9s +``` + +> Note: YAML files used in this tutorial are stored in [here](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/guides/elasticsearch/configuration/combined-cluster/yamls +) in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Find Available StorageClass + +We will have to provide `StorageClass` in Elasticsearch CR specification. Check available `StorageClass` in your cluster using the following command, + +```bash +$ kubectl get storageclass +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +standard (default) rancher.io/local-path Delete WaitForFirstConsumer false 1h +``` + +Here, we have `standard` StorageClass in our cluster from [Local Path Provisioner](https://github.com/rancher/local-path-provisioner). + +## Use Custom Configuration + +Say we want to change the default log directory for our cluster and want to configure disk-based shard allocation. Let's create the `elasticsearch.yml` file with our desire configurations. + +**elasticsearch.yml:** + +```yaml +path: + logs: "/usr/share/elasticsearch/data/new-logs-dir" +# For 100gb node space: +# Enable disk-based shard allocation +cluster.routing.allocation.disk.threshold_enabled: true +# prevent Elasticsearch from allocating shards to the node if less than the 15gb of space is available +cluster.routing.allocation.disk.watermark.low: 15gb +# relocate shards away from the node if the node has less than 10gb of free space +cluster.routing.allocation.disk.watermark.high: 10gb +# enforce a read-only index block if the node has less than 5gb of free space +cluster.routing.allocation.disk.watermark.flood_stage: 5gb +``` + +Let's create a k8s secret containing the above configuration where the file name will be the key and the file-content as the value: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: es-custom-config + namespace: demo +stringData: + elasticsearch.yml: |- + path: + logs: "/usr/share/elasticsearch/data/new-logs-dir" + cluster.routing.allocation.disk.threshold_enabled: true + cluster.routing.allocation.disk.watermark.low: 15gb + cluster.routing.allocation.disk.watermark.high: 10gb + cluster.routing.allocation.disk.watermark.flood_stage: 5gb +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/elasticsearch/configuration/combined-cluster/yamls/config-secret.yaml +secret/es-custom-config created +``` + +Now that the config secret is created, it needs to be mention in the [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) object's yaml: + + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-multinode + namespace: demo +spec: + version: xpack-8.11.1 + enableSSL: true + replicas: 3 + configSecret: + name: es-custom-config # mentioned here! + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + deletionPolicy: WipeOut +``` + +Now, create the Elasticsearch object by the following command: + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/elasticsearch/configuration/combined-cluster/yamls/es-combined.yaml +elasticsearch.kubedb.com/es-multinode created +``` + +Now, wait for the Elasticsearch to become ready: + +```bash +$ kubectl get es -n demo -w +NAME VERSION STATUS AGE +es-multinode xpack-8.11.1 Provisioning 18s +es-multinode xpack-8.11.1 Provisioning 2m5s +es-multinode xpack-8.11.1 Ready 2m5s +``` + +## Verify Configuration + +Let's connect to the Elasticsearch cluster that we have created and check the node settings to verify whether our configurations are applied or not: + +Connect to the Cluster: + +```bash +# Port-forward the service to local machine +$ kubectl port-forward -n demo svc/es-multinode 9200 +Forwarding from 127.0.0.1:9200 -> 9200 +Forwarding from [::1]:9200 -> 9200 +``` + +Now, our Elasticsearch cluster is accessible at `localhost:9200`. + +**Connection information:** + +- Address: `localhost:9200` +- Username: + + ```bash + $ kubectl get secret -n demo es-multinode-elastic-cred -o jsonpath='{.data.username}' | base64 -d + elastic + ``` + +- Password: + + ```bash + $ kubectl get secret -n demo es-multinode-elastic-cred -o jsonpath='{.data.password}' | base64 -d + ehG7*7SJZ0o9PA05 + ``` + +Now, we will query for settings of all nodes in an Elasticsearch cluster, + +```bash +$ curl -XGET -k -u 'elastic:ehG7*7SJZ0o9PA05' "https://localhost:9200/_nodes/_all/settings?pretty" + +``` + +This will return a large JSON with node settings. Here is the prettified JSON response, + +```json +{ + "_nodes" : { + "total" : 3, + "successful" : 3, + "failed" : 0 + }, + "cluster_name" : "es-multinode", + "nodes" : { + "_xWvqAU4QJeMaV4MayTgeg" : { + "name" : "es-multinode-0", + "transport_address" : "10.244.0.25:9300", + "host" : "10.244.0.25", + "ip" : "10.244.0.25", + "version" : "7.9.1", + "build_flavor" : "default", + "build_type" : "docker", + "build_hash" : "083627f112ba94dffc1232e8b42b73492789ef91", + "roles" : [ + "data", + "ingest", + "master", + "ml", + "remote_cluster_client", + "transform" + ], + "attributes" : { + "ml.machine_memory" : "1073741824", + "xpack.installed" : "true", + "transform.node" : "true", + "ml.max_open_jobs" : "20" + }, + "settings" : { + "cluster" : { + "name" : "es-multinode", + "routing" : { + "allocation" : { + "disk" : { + "threshold_enabled" : "true", + "watermark" : { + "low" : "15gb", + "flood_stage" : "5gb", + "high" : "10gb" + } + } + } + }, + "election" : { + "strategy" : "supports_voting_only" + }, + "initial_master_nodes" : "es-multinode-0,es-multinode-1,es-multinode-2" + }, + "node" : { + "name" : "es-multinode-0", + "attr" : { + "transform" : { + "node" : "true" + }, + "xpack" : { + "installed" : "true" + }, + "ml" : { + "machine_memory" : "1073741824", + "max_open_jobs" : "20" + } + }, + "data" : "true", + "ingest" : "true", + "master" : "true" + }, + "path" : { + "logs" : "/usr/share/elasticsearch/data/new-logs-dir", + "home" : "/usr/share/elasticsearch" + }, + "discovery" : { + "seed_hosts" : "es-multinode-master" + }, + "client" : { + "type" : "node" + }, + "http" : { + "compression" : "false", + "type" : "security4", + "type.default" : "netty4" + }, + "transport" : { + "type" : "security4", + "features" : { + "x-pack" : "true" + }, + "type.default" : "netty4" + }, + "xpack" : { + "security" : { + "http" : { + "ssl" : { + "enabled" : "true" + } + }, + "enabled" : "true", + "transport" : { + "ssl" : { + "enabled" : "true" + } + } + } + }, + "network" : { + "host" : "0.0.0.0" + } + } + }, + "0q1IcSSARwu9HrQmtvjDGA" : { + "name" : "es-multinode-1", + "transport_address" : "10.244.0.27:9300", + "host" : "10.244.0.27", + "ip" : "10.244.0.27", + "version" : "7.9.1", + "build_flavor" : "default", + "build_type" : "docker", + "build_hash" : "083627f112ba94dffc1232e8b42b73492789ef91", + "roles" : [ + "data", + "ingest", + "master", + "ml", + "remote_cluster_client", + "transform" + ], + "attributes" : { + "ml.machine_memory" : "1073741824", + "ml.max_open_jobs" : "20", + "xpack.installed" : "true", + "transform.node" : "true" + }, + "settings" : { + "cluster" : { + "name" : "es-multinode", + "routing" : { + "allocation" : { + "disk" : { + "threshold_enabled" : "true", + "watermark" : { + "low" : "15gb", + "flood_stage" : "5gb", + "high" : "10gb" + } + } + } + }, + "election" : { + "strategy" : "supports_voting_only" + }, + "initial_master_nodes" : "es-multinode-0,es-multinode-1,es-multinode-2" + }, + "node" : { + "name" : "es-multinode-1", + "attr" : { + "transform" : { + "node" : "true" + }, + "xpack" : { + "installed" : "true" + }, + "ml" : { + "machine_memory" : "1073741824", + "max_open_jobs" : "20" + } + }, + "data" : "true", + "ingest" : "true", + "master" : "true" + }, + "path" : { + "logs" : "/usr/share/elasticsearch/data/new-logs-dir", + "home" : "/usr/share/elasticsearch" + }, + "discovery" : { + "seed_hosts" : "es-multinode-master" + }, + "client" : { + "type" : "node" + }, + "http" : { + "compression" : "false", + "type" : "security4", + "type.default" : "netty4" + }, + "transport" : { + "type" : "security4", + "features" : { + "x-pack" : "true" + }, + "type.default" : "netty4" + }, + "xpack" : { + "security" : { + "http" : { + "ssl" : { + "enabled" : "true" + } + }, + "enabled" : "true", + "transport" : { + "ssl" : { + "enabled" : "true" + } + } + } + }, + "network" : { + "host" : "0.0.0.0" + } + } + }, + "ITvdnOcERwuG0qBmBJLaww" : { + "name" : "es-multinode-2", + "transport_address" : "10.244.0.29:9300", + "host" : "10.244.0.29", + "ip" : "10.244.0.29", + "version" : "7.9.1", + "build_flavor" : "default", + "build_type" : "docker", + "build_hash" : "083627f112ba94dffc1232e8b42b73492789ef91", + "roles" : [ + "data", + "ingest", + "master", + "ml", + "remote_cluster_client", + "transform" + ], + "attributes" : { + "ml.machine_memory" : "1073741824", + "ml.max_open_jobs" : "20", + "xpack.installed" : "true", + "transform.node" : "true" + }, + "settings" : { + "cluster" : { + "name" : "es-multinode", + "routing" : { + "allocation" : { + "disk" : { + "threshold_enabled" : "true", + "watermark" : { + "low" : "15gb", + "flood_stage" : "5gb", + "high" : "10gb" + } + } + } + }, + "election" : { + "strategy" : "supports_voting_only" + }, + "initial_master_nodes" : "es-multinode-0,es-multinode-1,es-multinode-2" + }, + "node" : { + "name" : "es-multinode-2", + "attr" : { + "transform" : { + "node" : "true" + }, + "xpack" : { + "installed" : "true" + }, + "ml" : { + "machine_memory" : "1073741824", + "max_open_jobs" : "20" + } + }, + "data" : "true", + "ingest" : "true", + "master" : "true" + }, + "path" : { + "logs" : "/usr/share/elasticsearch/data/new-logs-dir", + "home" : "/usr/share/elasticsearch" + }, + "discovery" : { + "seed_hosts" : "es-multinode-master" + }, + "client" : { + "type" : "node" + }, + "http" : { + "compression" : "false", + "type" : "security4", + "type.default" : "netty4" + }, + "transport" : { + "type" : "security4", + "features" : { + "x-pack" : "true" + }, + "type.default" : "netty4" + }, + "xpack" : { + "security" : { + "http" : { + "ssl" : { + "enabled" : "true" + } + }, + "enabled" : "true", + "transport" : { + "ssl" : { + "enabled" : "true" + } + } + } + }, + "network" : { + "host" : "0.0.0.0" + } + } + } + } +} +``` + +Here we can see that our given configuration is merged to the default configurations. + +## Cleanup + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +$ kubectl delete elasticsearch -n demo es-multinode + +$ kubectl delete secret -n demo es-custom-config + +$ kubectl delete namespace demo +``` + +## Next Steps diff --git a/docs/guides/elasticsearch/configuration/combined-cluster/yamls/es-combined.yaml b/docs/guides/elasticsearch/configuration/combined-cluster/yamls/es-combined.yaml index 36ee6c3b9d..3a1cc8bf78 100644 --- a/docs/guides/elasticsearch/configuration/combined-cluster/yamls/es-combined.yaml +++ b/docs/guides/elasticsearch/configuration/combined-cluster/yamls/es-combined.yaml @@ -7,8 +7,8 @@ spec: version: xpack-8.11.1 enableSSL: true replicas: 3 - configSecret: - name: es-custom-config + configuration: + secretName: es-custom-config storageType: Durable storage: storageClassName: "standard" diff --git a/docs/guides/elasticsearch/configuration/combined-cluster/yamls/es-combined.yaml.bak b/docs/guides/elasticsearch/configuration/combined-cluster/yamls/es-combined.yaml.bak new file mode 100644 index 0000000000..36ee6c3b9d --- /dev/null +++ b/docs/guides/elasticsearch/configuration/combined-cluster/yamls/es-combined.yaml.bak @@ -0,0 +1,20 @@ +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-multinode + namespace: demo +spec: + version: xpack-8.11.1 + enableSSL: true + replicas: 3 + configSecret: + name: es-custom-config + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/guides/elasticsearch/configuration/jvm-options/index.md b/docs/guides/elasticsearch/configuration/jvm-options/index.md index 0d8f0fb5db..eadb8b0c6f 100644 --- a/docs/guides/elasticsearch/configuration/jvm-options/index.md +++ b/docs/guides/elasticsearch/configuration/jvm-options/index.md @@ -82,8 +82,8 @@ metadata: namespace: demo spec: # Make sure that you've mentioned the config secret name here - configSecret: - name: es-custom-config + configuration: + secretName: es-custom-config enableSSL: false version: opensearch-2.8.0 storageType: Durable diff --git a/docs/guides/elasticsearch/configuration/jvm-options/index.md.bak b/docs/guides/elasticsearch/configuration/jvm-options/index.md.bak new file mode 100644 index 0000000000..0d8f0fb5db --- /dev/null +++ b/docs/guides/elasticsearch/configuration/jvm-options/index.md.bak @@ -0,0 +1,139 @@ +--- +title: Configuring Elasticsearch JVM Options +menu: + docs_{{ .version }}: + identifier: es-configuration-jvm-options + name: JVM Options + parent: es-configuration + weight: 25 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +# Configure Elasticsearch JVM Options + +The Elasticsearch offers users to configure the JVM settings by using `jvm.options` file. The `jvm.options` file located at the `$ES_HOME/config` (ie. `/usr/share/elasticsearch/config`) directory. + +## Deploy Elasticsearch with Custom jvm.options File + +Before deploying the Elasticsearch instance, you need to create a k8s secret with the custom config files (here: `jvm.options`). + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: es-custom-config + namespace: demo +stringData: + jvm.options: |- + ## G1GC Configuration + + 10-:-XX:+UseG1GC + 10-13:-XX:-UseConcMarkSweepGC + 10-13:-XX:-UseCMSInitiatingOccupancyOnly + 10-:-XX:G1ReservePercent=25 + 10-:-XX:InitiatingHeapOccupancyPercent=30 + + ## JVM temporary directory + -Djava.io.tmpdir=${ES_TMPDIR} + + ## heap dumps + + # generate a heap dump when an allocation from the Java heap fails + # heap dumps are created in the working directory of the JVM + -XX:+HeapDumpOnOutOfMemoryError + + # specify an alternative path for heap dumps; ensure the directory exists and + # has sufficient space + -XX:HeapDumpPath=data + + # specify an alternative path for JVM fatal error logs + -XX:ErrorFile=logs/hs_err_pid%p.log + + # JDK 9+ GC logging + 9-:-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m +``` + +If you want to provide node-role specific settings, say you want to configure ingest nodes with a different setting than others in a topology cluster, add node `role` as a prefix in the file name. + +```yaml +stringData: + ingest-jvm.options: |- + ... ... + master-jvm.options: |- + ... ... + ... ... +``` + +Deploy the k8s secret: + +```bash +kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/elasticsearch/configuration/jvm-options/yamls/custom-config.yaml +secret/es-custom-config created +``` + +Now Deploy the Elasticsearch Cluster with the custom `jvm.options` file: + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-test + namespace: demo +spec: + # Make sure that you've mentioned the config secret name here + configSecret: + name: es-custom-config + enableSSL: false + version: opensearch-2.8.0 + storageType: Durable + deletionPolicy: WipeOut + topology: + master: + suffix: master + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + suffix: data + replicas: 2 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + ingest: + suffix: ingest + replicas: 2 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Deploy Elasticsearch: + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/elasticsearch/configuration/jvm-options/yamls/elasticsearch.yaml +elasticsearch/es-test created +``` + +Wait for the Elasticsearch to become ready: + +```bash +$ kubectl get elasticsearch -n demo -w +NAME VERSION STATUS AGE +es-test opensearch-2.8.0 Provisioning 12s +es-test opensearch-2.8.0 Provisioning 2m2s +es-test opensearch-2.8.0 Ready 2m2s +``` diff --git a/docs/guides/elasticsearch/configuration/jvm-options/yamls/elasticsearch.yaml b/docs/guides/elasticsearch/configuration/jvm-options/yamls/elasticsearch.yaml index c11f4fd81e..b0de5a9b82 100644 --- a/docs/guides/elasticsearch/configuration/jvm-options/yamls/elasticsearch.yaml +++ b/docs/guides/elasticsearch/configuration/jvm-options/yamls/elasticsearch.yaml @@ -4,8 +4,8 @@ metadata: name: es-test namespace: demo spec: - configSecret: - name: es-custom-config + configuration: + secretName: es-custom-config enableSSL: false version: opensearch-2.8.0 storageType: Durable diff --git a/docs/guides/elasticsearch/configuration/jvm-options/yamls/elasticsearch.yaml.bak b/docs/guides/elasticsearch/configuration/jvm-options/yamls/elasticsearch.yaml.bak new file mode 100644 index 0000000000..c11f4fd81e --- /dev/null +++ b/docs/guides/elasticsearch/configuration/jvm-options/yamls/elasticsearch.yaml.bak @@ -0,0 +1,43 @@ +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-test + namespace: demo +spec: + configSecret: + name: es-custom-config + enableSSL: false + version: opensearch-2.8.0 + storageType: Durable + deletionPolicy: WipeOut + topology: + master: + suffix: master + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + suffix: data + replicas: 2 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + ingest: + suffix: ingest + replicas: 2 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/docs/guides/elasticsearch/configuration/overview/index.md b/docs/guides/elasticsearch/configuration/overview/index.md index 39edc93ff1..5f0e944286 100644 --- a/docs/guides/elasticsearch/configuration/overview/index.md +++ b/docs/guides/elasticsearch/configuration/overview/index.md @@ -52,7 +52,7 @@ The `OpenDistro` security plugin has the following configuration files: ## Custom Config Seceret -The custom configuration files are passed via a Kubernetes secret. The **file names are the keys** of the Secret with the **file-contents as the values**. The secret name needs to be mentioned in `spec.configSecret.name` of the [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) object. +The custom configuration files are passed via a Kubernetes secret. The **file names are the keys** of the Secret with the **file-contents as the values**. The secret name needs to be mentioned in `spec.configuration.secretName` of the [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) object. ```yaml apiVersion: kubedb.com/v1 @@ -62,8 +62,8 @@ metadata: namespace: demo spec: version: xpack-8.11.1 - configSecret: - name: es-custom-config + configuration: + secretName: es-custom-config ``` ```yaml diff --git a/docs/guides/elasticsearch/configuration/overview/index.md.bak b/docs/guides/elasticsearch/configuration/overview/index.md.bak new file mode 100644 index 0000000000..2bd7c0d164 --- /dev/null +++ b/docs/guides/elasticsearch/configuration/overview/index.md.bak @@ -0,0 +1,112 @@ +--- +title: Run Elasticsearch with Custom Configuration +menu: + docs_{{ .version }}: + identifier: es-overview-configuration + name: Overview + parent: es-configuration + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Elasticsearch with Custom Configuration Files + +The KubeDB operator allows a user to deploy an Elasticsearch cluster with custom configuration files. The operator also allows the user to configure the security plugins such as X-Pack, SearchGurad, and OpenDistro. If the custom configuration files are not provided, the operator will start the cluster with default configurations. + +## Overview + +Elasticsearch has three configuration files: + +- `elasticsearch.yml`: for configuring Elasticsearch +- `jvm.options`: for configuring Elasticsearch JVM settings +- `log4j2.properties`: for configuring Elasticsearch logging + +In KubeDB managed Elasticsearch cluster, the configuration files are located at `/usr/share/elasticsearch/config` directory of Elasticsearch pods. To know more about configuring the Elasticsearch cluster see [here](https://www.elastic.co/guide/en/elasticsearch/reference/7.10/settings.html). + +The `X-Pack` security plugin has the following configuration files: + +- `roles.yml` - define roles and the associated permissions. +- `role_mapping.yml` - define which roles should be assigned to each user based on their username, groups, or other metadata. + +The `SearchGuard` security plugin has the following configuration files: + +- `sg_config.yml` - configure authenticators and authorization backends. +- `sg_roles.yml` - define roles and the associated permissions. +- `sg_roles_mapping.yml` - map backend roles, hosts, and users to roles. +- `sg_internal_users.yml` - stores users, and hashed passwords in the internal user database. +- `sg_action_groups.yml` - define named permission groups. +- `sg_tenants.yml` - defines tenants for configuring the Kibana access. +- `sg_blocks.yml` - defines blocked users and IP addresses. + +The `OpenDistro` security plugin has the following configuration files: + +- `internal_users.yml` - contains any initial users that you want to add to the security plugin’s internal user database. +- `roles.yml` - contains any initial roles that you want to add to the security plugin. +- `roles_mapping.yml` - maps backend roles, hosts, and users to roles. +- `action_groups.yml` - contains any initial action groups that you want to add to the security plugin. +- `tenants.yml` - contains the tenant configurations. +- `nodes_dn.yml` - contains nodesDN mapping name and corresponding values. + +## Custom Config Seceret + +The custom configuration files are passed via a Kubernetes secret. The **file names are the keys** of the Secret with the **file-contents as the values**. The secret name needs to be mentioned in `spec.configSecret.name` of the [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) object. + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-custom-config + namespace: demo +spec: + version: xpack-8.11.1 + configuration: + secretName: es-custom-config +``` + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: es-custom-config + namespace: demo +stringData: + elasticsearch.yml: |- + logger.org.elasticsearch.discovery: DEBUG +``` + +**How the resultant configuration files are generated?** + +- `YML`: The default configuration file pre-stored at config directories is overwritten by the operator-generated configuration file (if any). Then the resultant configuration file is overwritten by the user-provided custom configuration file (if any). The [yq](https://github.com/mikefarah/yq) tool is used to merge two YAML files. + + ```bash + $ yq merge -i --overwrite file1.yml file2.yml + ``` + +- `Non-YML`: The default configuration file is replaced by the operator-generated one (if any). Then the resultant configuration file is replaced by the user-provided custom configuration file (if any). + + ```bash + $ cp -f file2 file1 + ``` + +**How to provide node-role specific configurations?** + +If an Elasticsearch cluster is running in the topology mode (ie. `spec.topology` is set), a user may want to provide node-role specific configurations, say configurations that will only be merged to `master` nodes. To achieve this, users need to add the node role as a prefix to the file name. + +- Format: `-.extension` +- Samples: + - `data-elasticsearch.yml`: Only applied to `data` nodes. + - `master-jvm.options`: Only applied to `master` nodes. + - `ingest-log4j2.properties`: Only applied to `ingest` nodes. + - `elasticsearch.yml`: applied to all nodes. + +**How to provide additional files that are referenced from the configurations?** + +All these files provided via `configSecret` is stored in each Elasticsearch node (i.e. pod) at `ES_CONFIG_DIR/custom_config/` ( i.e. `/usr/share/elasticsearch/config/custom_config/`) directory. So, user can use this path while configuring the Elasticsearch. + +## Next Steps + +- Learn how to use custom configuration in combined cluster from [here](/docs/guides/elasticsearch/configuration/combined-cluster/index.md). +- Learn how to use custom configuration in topology cluster from [here](/docs/guides/elasticsearch/configuration/topology-cluster/index.md). diff --git a/docs/guides/elasticsearch/configuration/topology-cluster/index.md b/docs/guides/elasticsearch/configuration/topology-cluster/index.md index e44707edbb..de85c1a666 100644 --- a/docs/guides/elasticsearch/configuration/topology-cluster/index.md +++ b/docs/guides/elasticsearch/configuration/topology-cluster/index.md @@ -141,8 +141,8 @@ metadata: spec: enableSSL: true version: xpack-8.11.1 - configSecret: - name: es-custom-config # mentioned here! + configuration: + secretName: es-custom-config # mentioned here! storageType: Durable deletionPolicy: WipeOut topology: diff --git a/docs/guides/elasticsearch/configuration/topology-cluster/index.md.bak b/docs/guides/elasticsearch/configuration/topology-cluster/index.md.bak new file mode 100644 index 0000000000..e44707edbb --- /dev/null +++ b/docs/guides/elasticsearch/configuration/topology-cluster/index.md.bak @@ -0,0 +1,540 @@ +--- +title: Configuring Elasticsearch Topology Cluster +menu: + docs_{{ .version }}: + identifier: es-configuration-topology-cluster + name: Topology Cluster + parent: es-configuration + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Configure Elasticsearch Topology Cluster + +In an Elasticsearch topology cluster, each node is assigned with a dedicated role such as master, data, and ingest. The cluster must have at least one master node, one data node, and one ingest node. In this tutorial, we will see how to configure a topology cluster. + +## Before You Begin + +At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +Now, install the KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create namespace demo +namespace/demo created + +$ kubectl get namespace +NAME STATUS AGE +demo Active 9s +``` + +> Note: YAML files used in this tutorial are stored in [here](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/guides/elasticsearch/configuration/combined-cluster/yamls +) in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Find Available StorageClass + +We will have to provide `StorageClass` in Elasticsearch CR specification. Check available `StorageClass` in your cluster using the following command, + +```bash +$ kubectl get storageclass +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +standard (default) rancher.io/local-path Delete WaitForFirstConsumer false 1h +``` + +Here, we have `standard` StorageClass in our cluster from [Local Path Provisioner](https://github.com/rancher/local-path-provisioner). + +## Use Custom Configuration + +Say we want to change the default log directories for our cluster and want to configure disk-based shard allocation. We also want that the log directory name should have node-role in it (ie. demonstrating node-role specific configurations). + +If a user may want to provide node-role specific configurations, say configurations that will only be merged to master nodes. To achieve this, users need to add the node role as a prefix to the file name. + +- Format: `-.extension` +- Samples: + - `data-elasticsearch.yml`: Only applied to data nodes. + - `master-jvm.options`: Only applied to master nodes. + - `ingest-log4j2.properties`: Only applied to ingest nodes. + - `elasticsearch.yml`: Empty node-role means it will be applied to all nodes. + +Let's create the `elasticsearch.yml` files with our desire configurations. + +**elasticsearch.yml** is for all nodes: + +```yaml +node.processors: 2 +``` + +**master-elasticsearch.yml** is for master nodes: + +```yaml +path: + logs: "/usr/share/elasticsearch/data/master-logs-dir" +``` + +**data-elasticsearch.yml** is for data nodes: + +```yaml +path: + logs: "/usr/share/elasticsearch/data/data-logs-dir" +# For 100gb node space: +# Enable disk-based shard allocation +cluster.routing.allocation.disk.threshold_enabled: true +# prevent Elasticsearch from allocating shards to the node if less than the 15gb of space is available +cluster.routing.allocation.disk.watermark.low: 15gb +# relocate shards away from the node if the node has less than 10gb of free space +cluster.routing.allocation.disk.watermark.high: 10gb +# enforce a read-only index block if the node has less than 5gb of free space +cluster.routing.allocation.disk.watermark.flood_stage: 5gb +``` + +**ingest-elasticsearch.yml** is for ingest nodes: + +```yaml +path: + logs: "/usr/share/elasticsearch/data/ingest-logs-dir" +``` + +Let's create a k8s secret containing the above configurations where the file name will be the key and the file-content as the value: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: es-custom-config + namespace: demo +stringData: + elasticsearch.yml: |- + node.processors: 2 + master-elasticsearch.yml: |- + path: + logs: "/usr/share/elasticsearch/data/master-logs-dir" + ingest-elasticsearch.yml: |- + path: + logs: "/usr/share/elasticsearch/data/ingest-logs-dir" + data-elasticsearch.yml: |- + path: + logs: "/usr/share/elasticsearch/data/data-logs-dir" + cluster.routing.allocation.disk.threshold_enabled: true + cluster.routing.allocation.disk.watermark.low: 15gb + cluster.routing.allocation.disk.watermark.high: 10gb + cluster.routing.allocation.disk.watermark.flood_stage: 5gb +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/elasticsearch/configuration/topology-cluster/yamls/config-secret.yaml +secret/es-custom-config created +``` + +Now that the config secret is created, it needs to be mention in the [Elasticsearch](/docs/guides/elasticsearch/concepts/elasticsearch/index.md) object's yaml: + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-topology + namespace: demo +spec: + enableSSL: true + version: xpack-8.11.1 + configSecret: + name: es-custom-config # mentioned here! + storageType: Durable + deletionPolicy: WipeOut + topology: + master: + replicas: 1 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 1 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + ingest: + replicas: 1 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Now, create the Elasticsearch object by the following command: + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/elasticsearch/configuration/topology-cluster/yamls/es-topology.yaml +elasticsearch.kubedb.com/es-topology created +``` + +Now, wait for the Elasticsearch to become ready: + +```bash +$ kubectl get elasticsearch -n demo -w +NAME VERSION STATUS AGE +es-topology xpack-8.11.1 Provisioning 12s +es-topology xpack-8.11.1 Provisioning 2m2s +es-topology xpack-8.11.1 Ready 2m2s +``` + +## Verify Configuration + +Let's connect to the Elasticsearch cluster that we have created and check the node settings to verify whether our configurations are applied or not: + +Connect to the Cluster: + +```bash +# Port-forward the service to local machine +$ kubectl port-forward -n demo svc/es-topology 9200 +Forwarding from 127.0.0.1:9200 -> 9200 +Forwarding from [::1]:9200 -> 9200 +``` + +Now, our Elasticsearch cluster is accessible at `localhost:9200`. + +**Connection information:** + +- Address: `localhost:9200` +- Username: + + ```bash + $ kubectl get secret -n demo es-topology-elastic-cred -o jsonpath='{.data.username}' | base64 -d + elastic + ``` + +- Password: + + ```bash + $ kubectl get secret -n demo es-topology-elastic-cred -o jsonpath='{.data.password}' | base64 -d + F2sIde1TbZqOR_gF + ``` + +Now, we will query for settings of all nodes in an Elasticsearch cluster, + +```bash +$ curl -XGET -k -u 'elastic:F2sIde1TbZqOR_gF' "https://localhost:9200/_nodes/_all/settings?pretty" +``` + +This will return a large JSON with node settings. Here is the prettified JSON response, + +```json +{ + "_nodes" : { + "total" : 3, + "successful" : 3, + "failed" : 0 + }, + "cluster_name" : "es-topology", + "nodes" : { + "PnvWHS4tTZaNLX8yiUykEg" : { + "name" : "es-topology-data-0", + "transport_address" : "10.244.0.37:9300", + "host" : "10.244.0.37", + "ip" : "10.244.0.37", + "version" : "7.9.1", + "build_flavor" : "default", + "build_type" : "docker", + "build_hash" : "083627f112ba94dffc1232e8b42b73492789ef91", + "roles" : [ + "data", + "ml", + "remote_cluster_client", + "transform" + ], + "attributes" : { + "ml.machine_memory" : "1073741824", + "ml.max_open_jobs" : "20", + "xpack.installed" : "true", + "transform.node" : "true" + }, + "settings" : { + "cluster" : { + "name" : "es-topology", + "routing" : { + "allocation" : { + "disk" : { + "threshold_enabled" : "true", + "watermark" : { + "low" : "15gb", + "flood_stage" : "5gb", + "high" : "10gb" + } + } + } + }, + "election" : { + "strategy" : "supports_voting_only" + } + }, + "node" : { + "name" : "es-topology-data-0", + "processors" : "2", + "attr" : { + "transform" : { + "node" : "true" + }, + "xpack" : { + "installed" : "true" + }, + "ml" : { + "machine_memory" : "1073741824", + "max_open_jobs" : "20" + } + }, + "data" : "true", + "ingest" : "false", + "master" : "false" + }, + "path" : { + "logs" : "/usr/share/elasticsearch/data/data-logs-dir", + "home" : "/usr/share/elasticsearch" + }, + "discovery" : { + "seed_hosts" : "es-topology-master" + }, + "client" : { + "type" : "node" + }, + "http" : { + "compression" : "false", + "type" : "security4", + "type.default" : "netty4" + }, + "transport" : { + "type" : "security4", + "features" : { + "x-pack" : "true" + }, + "type.default" : "netty4" + }, + "xpack" : { + "security" : { + "http" : { + "ssl" : { + "enabled" : "true" + } + }, + "enabled" : "true", + "transport" : { + "ssl" : { + "enabled" : "true" + } + } + } + }, + "network" : { + "host" : "0.0.0.0" + } + } + }, + "5EeawayWTa6aw9D8pcYlGQ" : { + "name" : "es-topology-ingest-0", + "transport_address" : "10.244.0.36:9300", + "host" : "10.244.0.36", + "ip" : "10.244.0.36", + "version" : "7.9.1", + "build_flavor" : "default", + "build_type" : "docker", + "build_hash" : "083627f112ba94dffc1232e8b42b73492789ef91", + "roles" : [ + "ingest", + "ml", + "remote_cluster_client" + ], + "attributes" : { + "ml.machine_memory" : "1073741824", + "xpack.installed" : "true", + "transform.node" : "false", + "ml.max_open_jobs" : "20" + }, + "settings" : { + "cluster" : { + "name" : "es-topology", + "election" : { + "strategy" : "supports_voting_only" + } + }, + "node" : { + "name" : "es-topology-ingest-0", + "processors" : "2", + "attr" : { + "transform" : { + "node" : "false" + }, + "xpack" : { + "installed" : "true" + }, + "ml" : { + "machine_memory" : "1073741824", + "max_open_jobs" : "20" + } + }, + "data" : "false", + "ingest" : "true", + "master" : "false" + }, + "path" : { + "logs" : "/usr/share/elasticsearch/data/ingest-logs-dir", + "home" : "/usr/share/elasticsearch" + }, + "discovery" : { + "seed_hosts" : "es-topology-master" + }, + "client" : { + "type" : "node" + }, + "http" : { + "compression" : "false", + "type" : "security4", + "type.default" : "netty4" + }, + "transport" : { + "type" : "security4", + "features" : { + "x-pack" : "true" + }, + "type.default" : "netty4" + }, + "xpack" : { + "security" : { + "http" : { + "ssl" : { + "enabled" : "true" + } + }, + "enabled" : "true", + "transport" : { + "ssl" : { + "enabled" : "true" + } + } + } + }, + "network" : { + "host" : "0.0.0.0" + } + } + }, + "d2YO9jGNRzuPczGpITuxNA" : { + "name" : "es-topology-master-0", + "transport_address" : "10.244.0.38:9300", + "host" : "10.244.0.38", + "ip" : "10.244.0.38", + "version" : "7.9.1", + "build_flavor" : "default", + "build_type" : "docker", + "build_hash" : "083627f112ba94dffc1232e8b42b73492789ef91", + "roles" : [ + "master", + "ml", + "remote_cluster_client" + ], + "attributes" : { + "ml.machine_memory" : "1073741824", + "ml.max_open_jobs" : "20", + "xpack.installed" : "true", + "transform.node" : "false" + }, + "settings" : { + "cluster" : { + "initial_master_nodes" : "es-topology-master-0", + "name" : "es-topology", + "election" : { + "strategy" : "supports_voting_only" + } + }, + "node" : { + "name" : "es-topology-master-0", + "processors" : "2", + "attr" : { + "transform" : { + "node" : "false" + }, + "xpack" : { + "installed" : "true" + }, + "ml" : { + "machine_memory" : "1073741824", + "max_open_jobs" : "20" + } + }, + "data" : "false", + "ingest" : "false", + "master" : "true" + }, + "path" : { + "logs" : "/usr/share/elasticsearch/data/master-logs-dir", + "home" : "/usr/share/elasticsearch" + }, + "discovery" : { + "seed_hosts" : "es-topology-master" + }, + "client" : { + "type" : "node" + }, + "http" : { + "compression" : "false", + "type" : "security4", + "type.default" : "netty4" + }, + "transport" : { + "type" : "security4", + "features" : { + "x-pack" : "true" + }, + "type.default" : "netty4" + }, + "xpack" : { + "security" : { + "http" : { + "ssl" : { + "enabled" : "true" + } + }, + "enabled" : "true", + "transport" : { + "ssl" : { + "enabled" : "true" + } + } + } + }, + "network" : { + "host" : "0.0.0.0" + } + } + } + } +} +``` + +Here we can see that our given configuration is merged to the default configurations. The common configuration `node.processors` is merged to all types of nodes. The node role-specific log directories are also configured. The disk-based shard allocation setting merged to data nodes. + +## Cleanup + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +$ kubectl delete elasticsearch -n demo es-topology + +$ kubectl delete secret -n demo es-custom-config + +$ kubectl delete namespace demo +``` + +## Next Steps diff --git a/docs/guides/elasticsearch/configuration/topology-cluster/yamls/es-topology.yaml b/docs/guides/elasticsearch/configuration/topology-cluster/yamls/es-topology.yaml index 1a7d4d38b9..275f7f047c 100644 --- a/docs/guides/elasticsearch/configuration/topology-cluster/yamls/es-topology.yaml +++ b/docs/guides/elasticsearch/configuration/topology-cluster/yamls/es-topology.yaml @@ -6,8 +6,8 @@ metadata: spec: enableSSL: true version: xpack-8.11.1 - configSecret: - name: es-custom-config + configuration: + secretName: es-custom-config storageType: Durable deletionPolicy: WipeOut topology: diff --git a/docs/guides/elasticsearch/configuration/topology-cluster/yamls/es-topology.yaml.bak b/docs/guides/elasticsearch/configuration/topology-cluster/yamls/es-topology.yaml.bak new file mode 100644 index 0000000000..1a7d4d38b9 --- /dev/null +++ b/docs/guides/elasticsearch/configuration/topology-cluster/yamls/es-topology.yaml.bak @@ -0,0 +1,41 @@ +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-topology + namespace: demo +spec: + enableSSL: true + version: xpack-8.11.1 + configSecret: + name: es-custom-config + storageType: Durable + deletionPolicy: WipeOut + topology: + master: + replicas: 1 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + data: + replicas: 1 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + ingest: + replicas: 1 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + diff --git a/docs/guides/elasticsearch/plugins-backup/s3-repository/index.md b/docs/guides/elasticsearch/plugins-backup/s3-repository/index.md index 1faa887da4..be35af9689 100644 --- a/docs/guides/elasticsearch/plugins-backup/s3-repository/index.md +++ b/docs/guides/elasticsearch/plugins-backup/s3-repository/index.md @@ -109,8 +109,8 @@ metadata: namespace: demo spec: # Custom configuration, which will update elasticsearch.yml - configSecret: - name: es-custom-config + configuration: + secretName: es-custom-config # Secure settings which will be stored in elasticsearch.keystore secureConfigSecret: name: es-secure-settings diff --git a/docs/guides/elasticsearch/plugins-backup/s3-repository/index.md.bak b/docs/guides/elasticsearch/plugins-backup/s3-repository/index.md.bak new file mode 100644 index 0000000000..1faa887da4 --- /dev/null +++ b/docs/guides/elasticsearch/plugins-backup/s3-repository/index.md.bak @@ -0,0 +1,409 @@ +--- +title: Snapshot and Restore Using S3 Repository Plugin +description: Snapshot and Restore of Elasticsearch Cluster Using S3 Repository Plugin +menu: + docs_{{ .version }}: + identifier: guides-es-plugins-backup-s3-repository + name: S3 Repository Plugin + parent: guides-es-plugins-backup + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Snapshot and Restore Using S3 Repository Plugin + +The [S3 repository](https://www.elastic.co/guide/en/elasticsearch/plugins/7.14/repository-s3.html) plugin adds support for using AWS S3 as a repository for Snapshot/Restore. It also works with S3 compatible other mediums such as [Linode Object Storage](https://www.linode.com/docs/guides/how-to-use-object-storage/). + +For the demo, we are going to show you how to snapshot a KubeDB managed Elasticsearch and restore data from previously taken snapshot. + +## Before You Begin + +At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +Now, install the KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create namespace demo +namespace/demo created + +$ kubectl get namespace +NAME STATUS AGE +demo Active 9s +``` + +> Note: YAML files used in this tutorial are stored in [guides/elasticsearch/quickstart/overview/yamls](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/guides/elasticsearch/plugins-backup/s3-repository/yamls) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs) + +## Create S3 Compatible Storage + +We are going to use the [Linode Object Storage](https://www.linode.com/docs/guides/how-to-use-object-storage/) which is S3 compatible. But you can any S3 compatible storage which suits you best. Let's [create](https://cloud.linode.com/object-storage/buckets/create) a `sample-s3-bucket` to store snapshot and later restore from it. + +![create sample s3 bucket](images/create-s3-bucket.png) + +You also need to [create](https://cloud.linode.com/object-storage/access-keys) `access_key` and `secret_key` so that your Elasticsearch Cluster can connect to the bucket. + +## Deploy Elasticsearch Cluster and Populate Data + +For the demo, we are going to use Elasticsearch docker images from KubeDB distribution with the pre-installed S3 repository plugin. + +### Secure Client Settings + +To make the plugin works we need to create a k8s secret with the Elastisearch secure settings: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: es-secure-settings + namespace: demo +stringData: + password: strong-password + s3.client.default.access_key: 6BU5GFIIUC2******** + s3.client.default.secret_key: DD1FS5NAiPf******** +``` + +> N.B.: Here, the `password` is the Elasticsearch `KEYSTROE_PASSWORD`, if you do not provide it, default to empty string (`""`). + +Let's create the k8s secret with secure settings: + +```bash +$ kubectl apply -f secure-settings-secret.yaml +secret/es-secure-settings created +``` + +In [S3 Client Settings](https://www.elastic.co/guide/en/elasticsearch/plugins/7.14/repository-s3-client.html), If you do not configure the `endpoint`, it default to `s3.amazonaws.com`. Since we are using Linode Bucket instead of AWS S3, we need to configure the endpoint too. Let's create another secret with custom client configurations: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: es-custom-config + namespace: demo +stringData: + elasticsearch.yml: |- + s3.client.default.endpoint: us-east-1.linodeobjects.com +``` + +> N.B.: In Elasticsearch, only secure setting goes to `elasticsearch.keystore`, others are put into `elasticsearch.yml` config file. That's why two different k8s secrets are used. + +Let's create the k8s secret with custom configurations: + +```bash +$ kubectl apply -f custom-configuration.yaml +secret/es-custom-config created +``` + +### Deploy Elasticsearch Cluster + +Now that we have deployed our configuration secrets, it's time to deploy our Elasticsearch instance. + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: sample-es + namespace: demo +spec: + # Custom configuration, which will update elasticsearch.yml + configSecret: + name: es-custom-config + # Secure settings which will be stored in elasticsearch.keystore + secureConfigSecret: + name: es-secure-settings + enableSSL: true + # we are using ElasticsearchVersion with pre-installed s3 repository plugin + version: xpack-8.11.1 + storageType: Durable + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Let's deploy the Elasticsearch and wait for it to become ready to use: + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/elasticsearch/plugins-backup/s3-repository/yamls/elasticsearch.yaml +elasticsearch.kubedb.com/sample-es created +``` + +```bash +$ kubectl get es -n demo -w +NAME VERSION STATUS AGE +sample-es xpack-8.11.1 0s +sample-es xpack-8.11.1 Provisioning 19s +sample-es xpack-8.11.1 Ready 41s +``` + +### Populate Data + +To connect to our Elasticsearch cluster, let's port-forward the Elasticsearch service to local machine: + +```bash +$ kubectl port-forward -n demo svc/sample-es 9200 +Forwarding from 127.0.0.1:9200 -> 9200 +Forwarding from [::1]:9200 -> 9200 +``` + +Keep it like that and switch to another terminal window: + +```bash +$ export ELASTIC_USER=$(kubectl get secret -n demo sample-es-elastic-cred -o jsonpath='{.data.username}' | base64 -d) + +$ export ELASTIC_PASSWORD=$(kubectl get secret -n demo sample-es-elastic-cred -o jsonpath='{.data.password}' | base64 -d) + +$ curl -XGET -k -u "$ELASTIC_USER:$ELASTIC_PASSWORD" "https://localhost:9200/_cluster/health?pretty" +{ + "cluster_name" : "sample-es", + "status" : "green", + "timed_out" : false, + "number_of_nodes" : 3, + "number_of_data_nodes" : 3, + "active_primary_shards" : 1, + "active_shards" : 2, + "relocating_shards" : 0, + "initializing_shards" : 0, + "unassigned_shards" : 0, + "delayed_unassigned_shards" : 0, + "number_of_pending_tasks" : 0, + "number_of_in_flight_fetch" : 0, + "task_max_waiting_in_queue_millis" : 0, + "active_shards_percent_as_number" : 100.0 +} +``` + +So, our cluster status is green. Let's create some indices with dummy data: + +```bash +$ curl -XPOST -k -u "$ELASTIC_USER:$ELASTIC_PASSWORD" "https://localhost:9200/products/_doc?pretty" -H 'Content-Type: application/json' -d ' +{ + "name": "KubeDB", + "vendor": "AppsCode Inc.", + "description": "Database Operator for Kubernetes" +} +' + +$ curl -XPOST -k -u "$ELASTIC_USER:$ELASTIC_PASSWORD" "https://localhost:9200/companies/_doc?pretty" -H 'Content-Type: application/json' -d ' +{ + "name": "AppsCode Inc.", + "mission": "Accelerate the transition to Containers by building a Kubernetes-native Data Platform", + "products": ["KubeDB", "Stash", "KubeVault", "Kubeform", "ByteBuilders"] +} +' +``` + +Now, let’s verify that the indexes have been created successfully. + +```bash +$ curl -XGET -k -u "$ELASTIC_USER:$ELASTIC_PASSWORD" "https://localhost:9200/_cat/indices?v&s=index&pretty" +health status index uuid pri rep docs.count docs.deleted store.size pri.store.size +green open .geoip_databases oiaZfJA8Q5CihQon0oR8hA 1 1 42 0 81.6mb 40.8mb +green open companies GuGisWJ8Tkqnq8vhREQ2-A 1 1 1 0 11.5kb 5.7kb +green open products wyu-fImDRr-Hk_GXVF7cDw 1 1 1 0 10.6kb 5.3kb +``` + +### Repository Settings + +The s3 repository type supports a [number of settings](https://www.elastic.co/guide/en/elasticsearch/plugins/7.14/repository-s3-repository.html#repository-s3-repository) to customize how data is stored in S3. These can be specified when creating the repository. + +Let's create the `_snapshot` repository `sample_s3_repo` with our bucket name `sample-s3-bucket`: + +```bash +$ curl -k -X PUT -u "$ELASTIC_USER:$ELASTIC_PASSWORD" "https://localhost:9200/_snapshot/sample_s3_repo?pretty" -H 'Content-Type: application/json' -d' +{ + "type": "s3", + "settings": { + "bucket": "sample-s3-bucket" + } +} +' +{ + "acknowledged" : true +} +``` + +We've successfully created our repository. Ready to take our first snapshot. + +## Create a Snapshot + +A repository can contain multiple snapshots of the same cluster. Snapshots are identified by unique names within the cluster. For more details, visit [Create a snapshot](https://www.elastic.co/guide/en/elasticsearch/reference/7.14/snapshots-take-snapshot.html). + +```bash +$ curl -k -X PUT -u "$ELASTIC_USER:$ELASTIC_PASSWORD" "https://localhost:9200/_snapshot/sample_s3_repo/snapshot_1?wait_for_completion=true&pretty" + +{ + "snapshot" : { + "snapshot" : "snapshot_1", + "uuid" : "JKoF5sgtS3WPBQ8A_OvWbw", + "repository" : "sample_s3_repo", + "version_id" : 7140099, + "version" : "7.14.0", + "indices" : [ + ".geoip_databases", + "companies", + "products" + ], + "data_streams" : [ ], + "include_global_state" : true, + "state" : "SUCCESS", + "start_time" : "2021-08-24T14:45:38.930Z", + "start_time_in_millis" : 1629816338930, + "end_time" : "2021-08-24T14:46:16.946Z", + "end_time_in_millis" : 1629816376946, + "duration_in_millis" : 38016, + "failures" : [ ], + "shards" : { + "total" : 3, + "failed" : 0, + "successful" : 3 + }, + "feature_states" : [ + { + "feature_name" : "geoip", + "indices" : [ + ".geoip_databases" + ] + } + ] + } +} +``` + +We've successfully taken our first snapshot. + +## Delete Data and Restore a Snapshot + +Let's delete all the indices: + +```bash +$ curl -k -u "$ELASTIC_USER:$ELASTIC_PASSWORD" -X DELETE "https://localhost:9200/_all?pretty" +{ + "acknowledged" : true +} +``` + +List and varify the deletion: + +```bash +$ curl -XGET -k -u "$ELASTIC_USER:$ELASTIC_PASSWORD" "https://localhost:9200/_cat/indices?v&s=index&pretty" +health status index uuid pri rep docs.count docs.deleted store.size pri.store.size +green open .geoip_databases oiaZfJA8Q5CihQon0oR8hA 1 1 42 0 81.6mb 40.8mb +``` + +For more details about restore, visit [Restore a snapshot](https://www.elastic.co/guide/en/elasticsearch/reference/7.14/snapshots-restore-snapshot.html#snapshots-restore-snapshot). + +Let's restore the data from our `snapshot_1`: + +```bash +$ curl -k -u "$ELASTIC_USER:$ELASTIC_PASSWORD" -X POST "https://localhost:9200/_snapshot/sample_s3_repo/snapshot_1/_restore?pretty" -H 'Content-Type: application/json' -d' +{ + "indices": "companies,products" +} +' + +{ + "accepted" : true +} +``` + +We've successfully restored our indices. + +> N.B.: We only wanted to restore the indices we created, but if you want to overwrite everything with the snapshot data, you can do it by setting `include_global_state` to `true` while restoring. + +### Varify Data + +To varify our data, let's list the indices: + +```bash +$ curl -XGET -k -u "$ELASTIC_USER:$ELASTIC_PASSWORD" "https://localhost:9200/_cat/indices?v&s=index&pretty" +health status index uuid pri rep docs.count docs.deleted store.size pri.store.size +green open .geoip_databases oiaZfJA8Q5CihQon0oR8hA 1 1 42 0 81.6mb 40.8mb +green open companies drsv-5tvQwCcte7bkUT0uQ 1 1 1 0 11.7kb 5.8kb +green open products 7TXoXy5kRFiVgZDuyqffQA 1 1 1 0 10.6kb 5.3kb +``` + +Check the content inside: + +```bash +$ curl -XGET -k -u "$ELASTIC_USER:$ELASTIC_PASSWORD" "https://localhost:9200/products/_search?pretty" +{ + "took" : 3, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 1.0, + "hits" : [ + { + "_index" : "products", + "_type" : "_doc", + "_id" : "36SEeHsBS6UMHADkEvJw", + "_score" : 1.0, + "_source" : { + "name" : "KubeDB", + "vendor" : "AppsCode Inc.", + "description" : "Database Operator for Kubernetes" + } + } + ] + } +} +``` + +```bash +$ curl -XGET -k -u "$ELASTIC_USER:$ELASTIC_PASSWORD" "https://localhost:9200/companies/_search?pretty" +{ + "took" : 3, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 1.0, + "hits" : [ + { + "_index" : "companies", + "_type" : "_doc", + "_id" : "4KSFeHsBS6UMHADkGvL5", + "_score" : 1.0, + "_source" : { + "name" : "AppsCode Inc.", + "mission" : "Accelerate the transition to Containers by building a Kubernetes-native Data Platform", + "products" : [ + "KubeDB", + "Stash", + "KubeVault", + "Kubeform", + "ByteBuilders" + ] + } + } + ] + } +} +``` + +So, we have successfully retored our data from the snapshot. diff --git a/docs/guides/elasticsearch/plugins-backup/s3-repository/yamls/elasticsearch.yaml b/docs/guides/elasticsearch/plugins-backup/s3-repository/yamls/elasticsearch.yaml index 1eff98f913..fa38f9ac6b 100644 --- a/docs/guides/elasticsearch/plugins-backup/s3-repository/yamls/elasticsearch.yaml +++ b/docs/guides/elasticsearch/plugins-backup/s3-repository/yamls/elasticsearch.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: # Custom configuration, which will update elasticsearch.yml - configSecret: - name: es-custom-config + configuration: + secretName: es-custom-config # Secure settings which will be stored in elasticsearch.keystore secureConfigSecret: name: es-secure-settings diff --git a/docs/guides/elasticsearch/plugins-backup/s3-repository/yamls/elasticsearch.yaml.bak b/docs/guides/elasticsearch/plugins-backup/s3-repository/yamls/elasticsearch.yaml.bak new file mode 100644 index 0000000000..1eff98f913 --- /dev/null +++ b/docs/guides/elasticsearch/plugins-backup/s3-repository/yamls/elasticsearch.yaml.bak @@ -0,0 +1,24 @@ +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: sample-es + namespace: demo +spec: + # Custom configuration, which will update elasticsearch.yml + configSecret: + name: es-custom-config + # Secure settings which will be stored in elasticsearch.keystore + secureConfigSecret: + name: es-secure-settings + enableSSL: true + # we are using ElasticsearchVersion with pre-installed s3 repository plugin + version: xpack-8.11.1 + storageType: Durable + replicas: 3 + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/docs/guides/elasticsearch/plugins/search-guard/x-pack-monitoring.md b/docs/guides/elasticsearch/plugins/search-guard/x-pack-monitoring.md index 15e1251e20..10f4030c8c 100644 --- a/docs/guides/elasticsearch/plugins/search-guard/x-pack-monitoring.md +++ b/docs/guides/elasticsearch/plugins/search-guard/x-pack-monitoring.md @@ -284,7 +284,7 @@ metadata: ... ``` -Now, create Elasticsearch crd specifying `spec.authSecret` and `spec.configSecret` field. +Now, create Elasticsearch crd specifying `spec.authSecret` and `spec.configuration` field. ```bash $ kubectl apply -f kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/x-pack/es-mon-demo.yaml @@ -305,8 +305,8 @@ spec: authSecret: kind: Secret name: es-auth - configSecret: - name: es-custom-config + configuration: + secretName: es-custom-config storage: storageClassName: "standard" accessModes: diff --git a/docs/guides/elasticsearch/plugins/search-guard/x-pack-monitoring.md.bak b/docs/guides/elasticsearch/plugins/search-guard/x-pack-monitoring.md.bak new file mode 100644 index 0000000000..28b8fa618d --- /dev/null +++ b/docs/guides/elasticsearch/plugins/search-guard/x-pack-monitoring.md.bak @@ -0,0 +1,505 @@ +--- +title: X-Pack Monitoring of Elasticsearch Cluster with SearchGuard Auth +menu: + docs_{{ .version }}: + identifier: es-x-pack-monitoring-with-searchguard + name: Monitoring + parent: es-search-guard-elasticsearch + weight: 50 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# X-Pack Monitoring with KubeDB Elasticsearch + +This tutorial will show you how to use X-Pack monitoring in an Elasticsearch cluster deployed with KubeDB. + +## Before You Begin + +At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +As KubeDB uses [Search Guard](https://search-guard.com/) plugin for authentication and authorization, you have to know how to configure Search Guard for both Elasticsearch cluster and Kibana. If you don't know, please visit [here](https://docs.search-guard.com/latest/main-concepts). + +To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created + +$ kubectl get ns demo +NAME STATUS AGE +demo Active 5s +``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/elasticsearch](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/elasticsearch) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +At first, we will create some necessary Search Guard configuration and roles to give a user permission to monitor an Elasticsearch cluster from Kibana. We will create a secret with this configuration files. Then we will provide this secret in `spec.authSecret` field of Elasticsearch crd so that our Elasticsearch cluster start with this configuration. We are going to configure Elasticsearch cluster to collect and send x-pack monitoring data over [HTTP Exporters](https://www.elastic.co/guide/en/elasticsearch/reference/current/http-exporter.html) using a [custom configuration](/docs/guides/elasticsearch/configuration/overview/index.md) file. + +Then, we will deploy Kibana with Search Guard plugin installed. We will configure Kibana to connect with our Elasticsearch cluster and view monitoring data from it. + +For this tutorial, we will use Elasticsearch 6.3.0 with Search Guard plugin 23.1 and Kibana 6.3.0 with Search Guard plugin 14 installed. + +## Deploy Elasticsearch Cluster + +Let's create necessary Search Guard configuration files. Here, we will create two users `admin` and `monitor`. User `admin` will have all permissions on the cluster and user `monitor` will have some limited permission to view only monitoring data. Here, are the contents of Search Guard configuration files, + +**sg_action_groups.yml:** + +```yaml +###### UNLIMITED ###### +UNLIMITED: + readonly: true + permissions: + - "*" + +###### CLUSTER LEVEL ##### +CLUSTER_MONITOR: + readonly: true + permissions: + - "cluster:monitor/*" + +CLUSTER_COMPOSITE_OPS_RO: + readonly: true + permissions: + - "indices:data/read/mget" + - "indices:data/read/msearch" + - "indices:data/read/mtv" + - "indices:data/read/coordinate-msearch*" + - "indices:admin/aliases/exists*" + - "indices:admin/aliases/get*" + - "indices:data/read/scroll" + +CLUSTER_COMPOSITE_OPS: + readonly: true + permissions: + - "indices:data/write/bulk" + - "indices:admin/aliases*" + - "indices:data/write/reindex" + - CLUSTER_COMPOSITE_OPS_RO + +###### INDEX LEVEL ###### +INDICES_ALL: + readonly: true + permissions: + - "indices:*" + +READ: + readonly: true + permissions: + - "indices:data/read*" + - "indices:admin/mappings/fields/get*" + - "indices:admin/mappings/get*" +``` + +**sg_roles.yaml:** + +```yaml +### Admin +sg_all_access: + readonly: true + cluster: + - UNLIMITED + indices: + '*': + '*': + - UNLIMITED + tenants: + admin_tenant: RW + +### X-Pack Monitoring +sg_xp_monitoring: + cluster: + - cluster:admin/xpack/monitoring/* + - cluster:admin/ingest/pipeline/put + - cluster:admin/ingest/pipeline/get + - indices:admin/template/get + - indices:admin/template/put + - CLUSTER_MONITOR + - CLUSTER_COMPOSITE_OPS + indices: + '?monitor*': + '*': + - INDICES_ALL + '?marvel*': + '*': + - INDICES_ALL + '?kibana*': + '*': + - READ + '*': + '*': + - indices:data/read/field_caps +``` + +**sg_internal_users.yml:** + +```yaml +#password is: admin@secret +admin: + readonly: true + hash: $2y$12$skma87wuFFtxtGWegeAiIeTtUH1nnOfIRZzwwhBlzXjg0DdM4gLeG + roles: + - admin + +#password is: monitor@secret +monitor: + readonly: true + hash: $2y$12$JDTXih3AqV/1MDRYQ.KIY.u68CkzCIq.xiiqwtRJx3cjN0YmFavTe + roles: + - monitor +``` + +Here, we have used `admin@secret` password for `admin` user and `monitor@secret` password for `monitor` user. You can use `htpasswd` to generate the bcrypt encrypted password hashes. + +```bash +$htpasswd -bnBC 12 "" | tr -d ':\n' +``` + +**sg_roles_mapping.yml:** + +```yaml +sg_all_access: + readonly: true + backendroles: + - admin + +sg_xp_monitoring: + readonly: true + backendroles: + - monitor +``` + +**sg_config.yml:** + +```yaml +searchguard: + dynamic: + authc: + kibana_auth_domain: + enabled: true + order: 0 + http_authenticator: + type: basic + challenge: false + authentication_backend: + type: internal + basic_internal_auth_domain: + http_enabled: true + transport_enabled: true + order: 1 + http_authenticator: + type: basic + challenge: true + authentication_backend: + type: internal +``` + +Now, create a secret with these Search Guard configuration files. + +```bash + $ kubectl create secret generic -n demo es-auth \ + --from-literal=ADMIN_USERNAME=admin \ + --from-literal=ADMIN_PASSWORD=admin@secret \ + --from-file=./sg_action_groups.yml \ + --from-file=./sg_config.yml \ + --from-file=./sg_internal_users.yml \ + --from-file=./sg_roles_mapping.yml \ + --from-file=./sg_roles.yml +secret/es-auth created +``` + +Verify the secret has desired configuration files, + +```yaml +$ kubectl get secret -n demo es-auth -o yaml +apiVersion: v1 +data: + sg_action_groups.yml: + sg_config.yml: + sg_internal_users.yml: + sg_roles.yml: + sg_roles_mapping.yml: +kind: Secret +metadata: + ... + name: es-auth + namespace: demo + ... +type: Opaque +``` + +As we are using Search Guard plugin for authentication, we need to ensure that `x-pack` security is not enabled. We will ensure that by providing `xpack.security.enabled: false` in `common-config.yml` file and we will use this file to configure our Elasticsearch cluster. As Search Guard does not support `local` exporter, we will use `http` exporter and set `host` filed to `http://127.0.0.1:9200` to store monitoring data in same cluster. + + Let's create `common-config.yml` with following configuration, + +```yaml +xpack.security.enabled: false +xpack.monitoring.enabled: true +xpack.monitoring.collection.enabled: true +xpack.monitoring.exporters: + my-http-exporter: + type: http + host: ["http://127.0.0.1:9200"] + auth: + username: monitor + password: monitor@secret +``` + +Create a ConfigMap using this file, + +```bash +$ kubectl create configmap -n demo es-custom-config \ + --from-file=./common-config.yaml +configmap/es-custom-config created +``` + +Verify that the ConfigMap has desired configuration, + +```yaml +$ kubectl get configmap -n demo es-custom-config -o yaml +apiVersion: v1 +data: + common-config.yaml: |- + xpack.security.enabled: false + xpack.monitoring.enabled: true + xpack.monitoring.collection.enabled: true + xpack.monitoring.exporters: + my-http-exporter: + type: http + host: ["http://127.0.0.1:9200"] + auth: + username: monitor + password: monitor@secret +kind: ConfigMap +metadata: + ... + name: es-custom-config + namespace: demo + ... +``` + +Now, create Elasticsearch crd specifying `spec.authSecret` and `spec.configSecret` field. + +```bash +$ kubectl apply -f kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/x-pack/es-mon-demo.yaml +elasticsearch.kubedb.com/es-mon-demo created +``` + +Below is the YAML for the Elasticsearch crd we just created. + +```yaml +apiVersion: kubedb.com/v1 +kind: Elasticsearch +metadata: + name: es-mon-demo + namespace: demo +spec: + version: searchguard-7.9.3 + replicas: 1 + authSecret: + kind: Secret + name: es-auth + configuration: + secretName: es-custom-config + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Now, wait for few minutes. KubeDB will create necessary secrets, services, and petsets. + +Check resources created in demo namespace by KubeDB, + +```bash +$ kubectl get all -n demo -l=app.kubernetes.io/instance=es-mon-demo +NAME READY STATUS RESTARTS AGE +pod/es-mon-demo-0 1/1 Running 0 37s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/es-mon-demo ClusterIP 10.110.227.143 9200/TCP 40s +service/es-mon-demo-master ClusterIP 10.104.12.90 9300/TCP 40s + +NAME DESIRED CURRENT AGE +petset.apps/es-mon-demo 1 1 39s +``` + +Once everything is created, Elasticsearch will go to Running state. Check that Elasticsearch is in running state. + +```bash +$ kubectl get es -n demo es-mon-demo +NAME VERSION STATUS AGE +es-mon-demo 7.3.2 Running 1m +``` + +Now, check elasticsearch log to see if the cluster is ready to accept requests, + +```bash +$ kubectl logs -n demo es-mon-demo-0 -f +... +Starting runit... +... +Elasticsearch Version: 6.3.0 +Search Guard Version: 6.3.0-23.0 +Connected as CN=sgadmin,O=Elasticsearch Operator +Contacting elasticsearch cluster 'elasticsearch' and wait for YELLOW clusterstate ... +Clustername: es-mon-demo +Clusterstate: GREEN +Number of nodes: 1 +Number of data nodes: 1 +... +Done with success +... +``` + +Once you see `Done with success` success line in the log, the cluster is ready to accept requests. Now, it is time to connect with Kibana. + +## Deploy Kibana + +In order to view monitoring data from Kibana, we need to configure `kibana.yml` with appropriate configuration. + +KubeDB has created a service with name `es-mon-demo` in `demo` namespace for the Elasticsearch cluster. We will use this service in `elasticsearch.url` field. Kibana will use this service to connect with the Elasticsearch cluster. + +Let's, configure `kibana.yml` as below, + +```yaml +xpack.security.enabled: false +xpack.monitoring.enabled: true +xpack.monitoring.kibana.collection.enabled: true +xpack.monitoring.ui.enabled: true + +server.host: 0.0.0.0 + +elasticsearch.url: "http://es-mon-demo.demo.svc:9200" +elasticsearch.username: "monitor" +elasticsearch.password: "monitor@secret" + +searchguard.auth.type: "basicauth" +searchguard.cookie.secure: false + +``` + +Notice the `elasticsearch.username` and `elasticsearch.password` field. Kibana will connect to Elasticsearch cluster with this credentials. They must match with the credentials we have provided in `sg_internal_users.yml` file for `monitor` user while creating the cluster. + +Now, create a ConfigMap with `kibana.yml` file. We will mount this ConfigMap in Kibana deployment so that Kibana starts with this configuration. + +```conlose +$ kubectl create configmap -n demo kibana-config \ + --from-file=./kibana.yml +configmap/kibana-config created +``` + +Finally, deploy Kibana deployment, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/elasticsearch/kibana/kibana-deployment.yaml +deployment.apps/kibana created +``` + +Below is the YAML for the Kibana deployment we just created. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kibana + namespace: demo +spec: + replicas: 1 + selector: + matchLabels: + app: kibana + template: + metadata: + labels: + app: kibana + spec: + containers: + - name: kibana + image: kubedb/kibana:6.3.0 + volumeMounts: + - name: kibana-config + mountPath: /usr/share/kibana/config + volumes: + - name: kibana-config + configMap: + name: kibana-config +``` + +Now, wait for few minutes. Let the Kibana pod go in`Running` state. Check pod is in `Running` using this command, + +```bash + $ kubectl get pods -n demo -l app=kibana +NAME READY STATUS RESTARTS AGE +kibana-84b8cbcf7c-mg699 1/1 Running 0 3m +``` + +Now, watch the Kibana pod's log to see if Kibana is ready to access, + +```bash +$ kubectl logs -n demo kibana-84b8cbcf7c-mg699 -f +... +{"type":"log","@timestamp":"2018-08-27T09:50:47Z","tags":["listening","info"],"pid":1,"message":"Server running at http://0.0.0.0:5601"} +``` + +Once you see `"message":"Server running at http://0.0.0.0:5601"` in the log, Kibana is ready. Now it is time to access Kibana UI. + +Kibana is running on port `5601` in of `kibana-84b8cbcf7c-mg699` pod. In order to access Kibana UI from outside of the cluster, we will use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster). + +First, open a new terminal and run, + +```bash +$ kubectl port-forward -n demo kibana-84b8cbcf7c-mg699 5601 +Forwarding from 127.0.0.1:5601 -> 5601 +Forwarding from [::1]:5601 -> 5601 +``` + +Now, open `localhost:5601` in your browser. When you will open the address, you will be greeted with Search Guard login UI. When you will open the address, you will be greeted with Search Guard login UI. + +Login with following credentials: `username: monitor` and `password: monitor@secret`. After login, go to `Monitoring` tab in Kibana UI. You will see Kibana has connected with the Elasticsearch cluster and showing monitoring data. Some screenshots of monitoring `es-mon-demo` cluster is given below. + +![Kibana Monitoring Home](/docs/images/elasticsearch/x-pack/monitoring-home.png) + +![Kibana Monitoring Node](/docs/images/elasticsearch/x-pack/monitoring-node.png) + +![Kibana Monitoring Overview](/docs/images/elasticsearch/x-pack/monitoring-overview.png) + +## Monitoring Multiple Cluster + +Monitoring multiple cluster is paid feature of X-Pack. If you are interested then follow these steps, + +1. First, create a separate cluster to store monitoring data. Let's say it **monitoring-cluster**. +2. Configure monitoring-cluster to connect with Kibana. +3. Configure Kibana to view monitoring data from monitoring-cluster. +4. Configure `http` exporter of your production clusters to export monitoring data to the monitoring-cluster. Set `xpack.monitoring.exporters..host:` field to the address of the monitoring-cluster. + +Now, your production clusters will send monitoring data to the monitoring-cluster and Kibana will retrieve these data from it. + +## Cleanup + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +$ kubectl patch -n demo es/es-mon-demo -p '{"spec":{"deletionPolicy":"WipeOut"}}' --type="merge" + +$ kubectl delete -n demo es/es-mon-demo + +$ kubectl delete -n demo configmap/es-custom-config + +$ kubectl delete -n demo configmap/kibana-config + +$ kubectl delete -n demo deployment/kibana + +$ kubectl delete ns demo +``` + +To uninstall KubeDB follow this [guide](/docs/setup/README.md). diff --git a/docs/guides/hazelcast/concepts/hazelcast-opsrequest.md b/docs/guides/hazelcast/concepts/hazelcast-opsrequest.md index 354cca0214..cdd2119591 100644 --- a/docs/guides/hazelcast/concepts/hazelcast-opsrequest.md +++ b/docs/guides/hazelcast/concepts/hazelcast-opsrequest.md @@ -127,8 +127,8 @@ metadata: spec: apply: IfReady configuration: - configSecret: - name: hazelcast-custom-config + configuration: + secretName: hazelcast-custom-config applyConfig: hazelcast.yaml: |- hazelcast: diff --git a/docs/guides/hazelcast/concepts/hazelcast-opsrequest.md.bak b/docs/guides/hazelcast/concepts/hazelcast-opsrequest.md.bak new file mode 100644 index 0000000000..354cca0214 --- /dev/null +++ b/docs/guides/hazelcast/concepts/hazelcast-opsrequest.md.bak @@ -0,0 +1,308 @@ +--- +title: HazelcastOpsRequest CRD +menu: + docs_{{ .version }}: + identifier: hz-opsrequest-concepts + name: HazelcastOpsRequest + parent: hz-concepts-hazelcast + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# HazelcastOpsRequest + +## What is HazelcastOpsRequest + +`HazelcastOpsRequest` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for Hazelcast administrative operations like database version updating, horizontal scaling, vertical scaling, reconfigure TLS, restart, etc. in a Kubernetes native way. + +## HazelcastOpsRequest CRD Specifications + +Like any official Kubernetes resource, a `HazelcastOpsRequest` has `TypeMeta`, `ObjectMeta`, `Spec` and `Status` sections. + +Here, some sample `HazelcastOpsRequest` CRs for different administrative operations is given below. + +Sample HazelcastOpsRequest for updating database version: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: HazelcastOpsRequest +metadata: + name: hzops-update-version + namespace: demo +spec: + type: UpdateVersion + databaseRef: + name: hz-prod + updateVersion: + targetVersion: 5.5.6 +``` + +Sample `HazelcastOpsRequest` for horizontal scaling: +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: HazelcastOpsRequest +metadata: + name: hazelcast-scale-up + namespace: demo +spec: + databaseRef: + name: hz-prod + type: HorizontalScaling + horizontalScaling: + hazelcast: 4 +``` + +Sample `HazelcastOpsRequest` for vertical scaling: +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: HazelcastOpsRequest +metadata: + name: hazelcast-vertical-scaling + namespace: demo +spec: + databaseRef: + name: hz-prod + type: VerticalScaling + verticalScaling: + hazelcast: + resources: + limits: + cpu: 1 + memory: 2.5Gi + requests: + cpu: 1 + memory: 2.5Gi +``` + +Sample `HazelcastOpsRequest` for reconfiguring TLS: +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: HazelcastOpsRequest +metadata: + name: hzops-add-tls + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: hz-prod + tls: + issuerRef: + name: hz-issuer + kind: Issuer + apiGroup: "cert-manager.io" + certificates: + - alias: client + subject: + organizations: + - hazelcast + organizationalUnits: + - client + timeout: 5m + apply: IfReady +``` + +Sample `HazelcastOpsRequest` for restart: +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: HazelcastOpsRequest +metadata: + name: hazelcast-restart + namespace: demo +spec: + apply: IfReady + databaseRef: + name: hz-prod + type: Restart +``` +Sample `HazelcastOpsRequest` for reconfigure: +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: HazelcastOpsRequest +metadata: + name: hz-reconfigure-custom-config + namespace: demo +spec: + apply: IfReady + configuration: + configSecret: + name: hazelcast-custom-config + applyConfig: + hazelcast.yaml: |- + hazelcast: + persistence: + enabled: true + validation-timeout-seconds: 2500 + data-load-timeout-seconds: 3000 + auto-remove-stale-data: false + hazelcast-client.yaml: |- + hazelcast-client: {} + databaseRef: + name: hz-prod + type: Reconfigure + +``` + +Here, we are going to describe the various sections of a `HazelcastOpsRequest` crd. + +## HazelcastOpsRequest `Spec` + +A `HazelcastOpsRequest` object has the following fields in the `spec` section. + +### spec.databaseRef + +`spec.databaseRef` is a required field that point to the [Hazelcast](/docs/guides/hazelcast/concepts/hazelcast.md) object for which the administrative operations will be performed. This field consists of the following sub-field: + +- **spec.databaseRef.name :** specifies the name of the [Hazelcast](/docs/guides/hazelcast/concepts/hazelcast.md) object. + +### spec.type + +`spec.type` specifies the kind of operation that will be applied to the database. Currently, the following types of operations are allowed in `HazelcastOpsRequest`. + +- `UpdateVersion` +- `HorizontalScaling` +- `VerticalScaling` +- `VolumeExpansion` +- `Reconfigure` +- `ReconfigureTLS` +- `Restart` + +> You can perform only one type of operation on a single `HazelcastOpsRequest` CR. For example, if you want to update your database and scale up its replica then you have to create two separate `HazelcastOpsRequest`. At first, you have to create a `HazelcastOpsRequest` for updating. Once it is completed, then you can create another `HazelcastOpsRequest` for scaling. + +### spec.updateVersion + +If you want to update your Hazelcast version, you have to specify the `spec.updateVersion` section that specifies the desired version information. This field consists of the following sub-field: + +- `spec.updateVersion.targetVersion` refers to a [HazelcastVersion](/docs/guides/hazelcast/concepts/hazelcastversion.md) CR that contains the Hazelcast version information where you want to update. + +> You can only update between Hazelcast versions. KubeDB does not support downgrade for Hazelcast. + +### spec.horizontalScaling + +If you want to scale-up or scale-down your Hazelcast cluster, you have to specify `spec.horizontalScaling` section. This field consists of the following sub-field: + +- `spec.horizontalScaling.member` indicates the desired number of member nodes for Hazelcast cluster after scaling. For example, if your cluster currently has 3 member nodes and you want to add additional 2 member nodes then you have to specify 5 in `spec.horizontalScaling.member` field. Similarly, if you want to remove 1 node from the cluster, you have to specify 2 in `spec.horizontalScaling.member` field. + +### spec.verticalScaling + +`spec.verticalScaling` is used to specify the new resources requirements to vertical scale the database. This field consists of the following sub-fields: + +- `spec.verticalScaling.member` indicates the Hazelcast member resources. It has the below structure: + +```yaml +requests: + memory: "2Gi" + cpu: "1" +limits: + memory: "2Gi" + cpu: "1" +``` + +Here, when you specify the resource request for Hazelcast member, KubeDB will create a new [PetSet](https://github.com/kubeops/petset) and [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) with the new resource requirements and drop the old PetSet and StatefulSet. + +### spec.volumeExpansion + +To expand the storage of the Hazelcast cluster, you have to specify `spec.volumeExpansion` section. This field consists of the following sub-field: + +- `spec.volumeExpansion.member` indicates the desired size for the persistent volume claim of the member nodes. + +All the volumes of member nodes will be expanded when the ops request type is `VolumeExpansion`. + +### spec.reconfigure + +`spec.reconfigure` specifies the information of the custom configuration. This field consists of the following sub-field: + +- `spec.reconfigure.configSecret` points to a secret in the same namespace of a Hazelcast resource, which contains the new custom configurations. If there are any configSecret is already associated with the database, the new custom configuration will be merged and will be applied to the database. + +### spec.reconfigureTLS + +KubeDB supports reconfigure i.e. add, remove, update and rotation of TLS/SSL certificates for existing Hazelcast via a HazelcastOpsRequest. This field consists of the following sub-field: + +- `spec.reconfigureTLS.issuerRef` specifies the issuer name, api group and kind of the desired issuer. For example, + +```yaml +issuerRef: + apiGroup: cert-manager.io + kind: Issuer + name: hz-ca-issuer +``` + +- `spec.reconfigureTLS.certificates` specifies the certificates. For example, + +```yaml +certificates: +- alias: server + subject: + organizations: + - hazelcast + organizationalUnits: + - server +- alias: client + subject: + organizations: + - hazelcast + organizationalUnits: + - client +``` + +- `spec.reconfigureTLS.rotateCertificates` specifies that we want to rotate the certificate of this database. Set it to `true` to rotate certificates. + +- `spec.reconfigureTLS.remove` specifies that we want to remove tls of this database. Set it to `true` to remove tls. + +### spec.timeout + +As we internally retry the ops request steps multiple times, This `timeout` field helps the users to specify the timeout for those steps of the ops request (in second). If a step doesn't finish within the specified timeout, the ops request will result in failure. + +### spec.apply + +This field controls the execution of obsRequest depending on the database state. It has two supported values: `Always` & `IfReady`. +Use `IfReady` if you want to process the opsRequest only when the database is Ready. And use `Always` if you want to process the execution of opsReq irrespective of the Database state. + +## HazelcastOpsRequest `Status` + +`.status` describes the current state and progress of the `HazelcastOpsRequest` operation. It has the following fields: + +### status.phase + +`status.phase` indicates the overall phase of the operation for this `HazelcastOpsRequest`. It can have the following three values: + +| Phase | Meaning | +| ---------- | ------------------------------------------------------------------------------------ | +| Successful | KubeDB has successfully performed the operation requested in the HazelcastOpsRequest | +| Failed | KubeDB has failed to perform the operation requested in the HazelcastOpsRequest | +| Progressing| KubeDB is performing the operation requested in the HazelcastOpsRequest | + +### status.observedGeneration + +`status.observedGeneration` shows the most recent generation observed by the `HazelcastOpsRequest` controller. + +### status.conditions + +`status.conditions` is an array that specifies the conditions of different steps of `HazelcastOpsRequest` processing. Each condition entry has the following fields: + +- `types` specifies the type of the condition. HazelcastOpsRequest has the following types of conditions: + +| Type | Meaning | +| ---------------------------- | ------------------------------------------------------------------------- | +| `Progressing` | Specifies that the operation is now in the progressing state | +| `Successful` | Specifies that the operation phase succeeded | +| `Failed` | Specifies that the operation phase failed | +| `UpdateVersion` | Specifies that the UpdateVersion operation succeeded | +| `HorizontalScaling` | Specifies that the HorizontalScaling operation succeeded | +| `VerticalScaling` | Specifies that the VerticalScaling operation succeeded | +| `VolumeExpansion` | Specifies that the VolumeExpansion operation succeeded | +| `Reconfigure` | Specifies that the Reconfigure operation succeeded | +| `ReconfigureTLS` | Specifies that the ReconfigureTLS operation succeeded | +| `Restart` | Specifies that the Restart operation succeeded | + +- `status` specifies the status of the condition. It can be `True`, `False` or `Unknown`. +- `lastTransitionTime` specifies the last time the condition transitioned from one status to another. +- `reason` specifies the reason for the last transition of the condition. +- `message` provides a human readable message indicating details about the last transition. + +## Next Steps + +- Learn about [Hazelcast CRD](/docs/guides/hazelcast/concepts/hazelcast.md). +- Deploy your first Hazelcast database with KubeDB by following the guide [here](/docs/guides/hazelcast/quickstart/overview/index.md). diff --git a/docs/guides/hazelcast/concepts/hazelcast.md b/docs/guides/hazelcast/concepts/hazelcast.md index 116f22551c..eaf5a91831 100644 --- a/docs/guides/hazelcast/concepts/hazelcast.md +++ b/docs/guides/hazelcast/concepts/hazelcast.md @@ -233,9 +233,9 @@ We can add java environment variables using this attribute. Hazelcast managed by KubeDB can be monitored with builtin-Prometheus and Prometheus operator out-of-the-box. -### spec.configSecret +### spec.configuration -`spec.configSecret` is an optional field that allows users to provide custom configuration for Hazelcast. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). So you can use any Kubernetes supported volume source such as `configMap`, `secret`, `azureDisk` etc. +`spec.configuration` is an optional field that allows users to provide custom configuration for Hazelcast. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). So you can use any Kubernetes supported volume source such as `configMap`, `secret`, `azureDisk` etc. ### spec.podTemplate diff --git a/docs/guides/hazelcast/concepts/hazelcast.md.bak b/docs/guides/hazelcast/concepts/hazelcast.md.bak new file mode 100644 index 0000000000..116f22551c --- /dev/null +++ b/docs/guides/hazelcast/concepts/hazelcast.md.bak @@ -0,0 +1,389 @@ +--- +title: Hazelcast CRD +menu: + docs_{{ .version }}: + identifier: hz-hazelcast-concepts + name: Hazelcast + parent: hz-concepts-hazelcast + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Hazelcast + +## What is Hazelcast + +`Hazelcast` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for [Hazelcast](https://hazelcast.com/) in a Kubernetes native way. You only need to describe the desired database configuration in a Hazelcast object, and the KubeDB operator will create Kubernetes objects in the desired state for you. + +## Hazelcast Spec + +As with all other Kubernetes objects, a Hazelcast needs `apiVersion`, `kind`, and `metadata` fields. It also needs a `.spec` section. Below is an example Hazelcast object. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Hazelcast +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"kubedb.com/v1alpha2","kind":"Hazelcast","metadata":{"annotations":{},"name":"hazelcast-sample","namespace":"demo"},"spec":{"deletionPolicy":"Halt","enableSSL":true,"licenseSecret":{"name":"hz-license-key"},"replicas":3,"storage":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"2Gi"}},"storageClassName":"longhorn"},"tls":{"certificates":[{"alias":"server","dnsNames":["localhost"],"ipAddresses":["127.0.0.1"],"subject":{"organizations":["kubedb"]}},{"alias":"client","dnsNames":["localhost"],"ipAddresses":["127.0.0.1"],"subject":{"organizations":["kubedb"]}}],"issuerRef":{"apiGroup":"cert-manager.io","kind":"ClusterIssuer","name":"self-signed-issuer"}},"version":"5.5.2"}} + creationTimestamp: "2025-06-11T07:35:38Z" + finalizers: + - kubedb.com + generation: 2 + name: hazelcast-sample + namespace: demo + resourceVersion: "1180125" + uid: c86fe3d3-276a-4124-a1cf-d7f5409ee61f +spec: + deletionPolicy: Halt + enableSSL: true + healthChecker: + failureThreshold: 3 + periodSeconds: 20 + timeoutSeconds: 10 + keystoreSecret: + name: hazelcast-sample-keystore-cred + licenseSecret: + name: hz-license-key + podTemplate: + controller: {} + metadata: {} + spec: + containers: + - livenessProbe: + failureThreshold: 10 + httpGet: + path: /hazelcast/health/node-state + port: 5701 + scheme: HTTPS + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + name: hazelcast + readinessProbe: + failureThreshold: 10 + httpGet: + path: /hazelcast/health/ready + port: 5701 + scheme: HTTPS + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1536Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 65534 + seccompProfile: + type: RuntimeDefault + initContainers: + - name: hazelcast-init + resources: + limits: + memory: 512Mi + requests: + cpu: 200m + memory: 256Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 65534 + seccompProfile: + type: RuntimeDefault + podPlacementPolicy: + name: default + securityContext: + fsGroup: 65534 + terminationGracePeriodSeconds: 600 + replicas: 3 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: longhorn + storageType: Durable + tls: + certificates: + - alias: server + dnsNames: + - localhost + ipAddresses: + - 127.0.0.1 + subject: + organizations: + - kubedb + - alias: client + dnsNames: + - localhost + ipAddresses: + - 127.0.0.1 + subject: + organizations: + - kubedb + issuerRef: + apiGroup: cert-manager.io + kind: ClusterIssuer + name: self-signed-issuer + version: 5.5.2 +``` + + +### spec.version + +`spec.version` is a required field specifying the name of the [HazelcastVersion](/docs/guides/hazelcast/concepts/hazelcastversion.md) crd where the docker images are specified. Currently, when you install KubeDB, it creates the following `HazelcastVersion` crds, + +- `5.5.2` + +### spec.disableSecurity + +`spec.disableSecurity` is an optional field that decides whether Hazelcast instance will be secured by auth or no. + +### spec.authSecret + +`spec.authSecret` is an optional field that points to a Secret used to hold credentials for `Hazelcast` superuser. If not set, KubeDB operator creates a new Secret `{Hazelcast-object-name}-auth` for storing the password for `admin` superuser. + +We can use this field in 3 mode. + +1. Using an external secret. In this case, You need to create an auth secret first with required fields, then specify the secret name when creating the Hazelcast object using `spec.authSecret.name` & set `spec.authSecret.externallyManaged` to true. +```yaml +authSecret: + name: + externallyManaged: true +``` +2. Specifying the secret name only. In this case, You need to specify the secret name when creating the Hazelcast object using `spec.authSecret.name`. `externallyManaged` is by default false. +```yaml +authSecret: + name: +``` + +3. Let KubeDB do everything for you. In this case, no work for you. + +AuthSecret contains a `username` key and a `password` key which contains the `username` and `password` respectively for `Hazelcast` superuser. + +Example: + +```bash +$ kubectl create secret generic hazelcast-sample-auth -n demo \ +--from-literal=username=admin \ +--from-literal=password=6q8u_2jMOW-OOZXk +secret "hazelcast-sample-auth" created +``` + +```yaml +apiVersion: v1 +data: + password: NnE4dV8yak1PVy1PT1pYaw== + username: amhvbi1kb2U= +kind: Secret +metadata: + name: hazelcast-sample-auth + namespace: demo +type: Opaque +``` + +Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). + +> Clsuter Mode: all peers are equal in the cluster. + +### spec.replicas + +`spec.replicas` specifies the number of nodes (ie. pods) in the Hazelcast cluster. The default value of this field is `1`. + +```yaml +spec: + replicas: 3 +``` + +### spec.storage + +If you set `spec.storageType:` to `Durable`, then `spec.storage` is a required field that specifies the StorageClass of PVCs dynamically allocated to store data for the database. This storage spec will be passed to the Petset created by KubeDB operator to run database pods. You can specify any StorageClass available in your cluster with appropriate resource requests. + +- `spec.storage.storageClassName` is the name of the StorageClass used to provision PVCs. PVCs don’t necessarily have to request a class. A PVC with its storageClassName set equal to "" is always interpreted to be requesting a PV with no class, so it can only be bound to PVs with no class (no annotation or one set equal to ""). A PVC with no storageClassName is not quite the same and is treated differently by the cluster depending on whether the DefaultStorageClass admission plugin is turned on. +- `spec.storage.accessModes` uses the same conventions as Kubernetes PVCs when requesting storage with specific access modes. +- `spec.storage.resources` can be used to request specific quantities of storage. This follows the same resource model used by PVCs. + +To learn how to configure `spec.storage`, please visit the links below: + +- https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + +### spec.javaOpts + +We can add java environment variables using this attribute. + +### spec.monitor + +Hazelcast managed by KubeDB can be monitored with builtin-Prometheus and Prometheus operator out-of-the-box. + + +### spec.configSecret + +`spec.configSecret` is an optional field that allows users to provide custom configuration for Hazelcast. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). So you can use any Kubernetes supported volume source such as `configMap`, `secret`, `azureDisk` etc. + +### spec.podTemplate + +KubeDB allows providing a template for database pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the Petset created for Hazelcast server. + +KubeDB accept following fields to set in `spec.podTemplate:` + +- metadata: + - annotations (pod's annotation) +- controller: + - annotations (petset's annotation) +- spec: + - resources + - initContainers + - containers + - imagePullSecrets + - nodeSelector + - serviceAccountName + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + +You can check out the full list [here](https://github.com/kmodules/offshoot-api/blob/39bf8b2/api/v2/types.go#L44-L279). +Uses of some field of `spec.podTemplate` is described below, + +#### spec.podTemplate.spec.imagePullSecret + +`KubeDB` provides the flexibility of deploying Hazelcast server from a private Docker registry. +#### spec.podTemplate.spec.nodeSelector + +`spec.podTemplate.spec.nodeSelector` is an optional field that specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). To learn more, see [here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) . + +#### spec.podTemplate.spec.serviceAccountName + +`serviceAccountName` is an optional field supported by KubeDB Operator (version 0.13.0 and higher) that can be used to specify a custom service account to fine tune role based access control. + +If this field is left empty, the KubeDB operator will create a service account name matching Hazelcast crd name. Role and RoleBinding that provide necessary access permissions will also be generated automatically for this service account. + +If a service account name is given, but there's no existing service account by that name, the KubeDB operator will create one, and Role and RoleBinding that provide necessary access permissions will also be generated for this service account. + +If a service account name is given, and there's an existing service account by that name, the KubeDB operator will use that existing service account. Since this service account is not managed by KubeDB, users are responsible for providing necessary access permissions manually. + +#### spec.podTemplate.spec.resources + +`spec.podTemplate.spec.resources` is an optional field. This can be used to request compute resources required by the database pods. To learn more, visit [here](http://kubernetes.io/docs/user-guide/compute-resources/). + +### spec.serviceTemplates + +You can also provide a template for the services created by KubeDB operator for Hazelcast server through `spec.serviceTemplates`. This will allow you to set the type and other properties of the services. + +KubeDB allows following fields to set in `spec.serviceTemplates`: +- `alias` represents the identifier of the service. It has the following possible value: + - `primary` is used for the primary service identification. + - `standby` is used for the secondary service identification. + - `stats` is used for the exporter service identification. + +- metadata: + - annotations +- spec: + - type + - ports + - clusterIP + - externalIPs + - loadBalancerIP + - loadBalancerSourceRanges + - externalTrafficPolicy + - healthCheckNodePort + - sessionAffinityConfig + +See [here](https://github.com/kmodules/offshoot-api/blob/kubernetes-1.16.3/api/v1/types.go#L163) to understand these fields in detail. + +### spec.tls + +> The ReconfigureTLS only works with the [Cert-Manager](https://cert-manager.io/docs/concepts/) managed certificates. [Installation guide](https://cert-manager.io/docs/installation/). + +`spec.tls` is an `optional` field, but it acts as a `required` field when the `spec.type` is set to `ReconfigureTLS`. It specifies the necessary information required to add or remove or update the TLS configuration of the Hazelcast cluster. It consists of the following sub-fields: + +- `tls.remove` ( `bool` | `false` ) - tells the operator to remove the TLS configuration for the HTTP layer. The transport layer is always secured with certificates, so the removal process does not affect the transport layer. +- `tls.rotateCertificates` ( `bool` | `false`) - tells the operator to renew all the certificates. +- `tls.issuerRef` - is an `optional` field that references to the `Issuer` or `ClusterIssuer` custom resource object of [cert-manager](https://cert-manager.io/docs/concepts/issuer/). It is used to generate the necessary certificate secrets for Hazelcast. If the `issuerRef` is not specified, the operator creates a self-signed CA and also creates necessary certificate (valid: 365 days) secrets using that CA. + - `apiGroup` - is the group name of the resource that is being referenced. Currently, the only supported value is `cert-manager.io`. + - `kind` - is the type of resource that is being referenced. The supported values are `Issuer` and `ClusterIssuer`. + - `name` - is the name of the resource ( `Issuer` or `ClusterIssuer` ) that is being referenced. + +- `tls.certificates` - is an `optional` field that specifies a list of certificate configurations used to configure the certificates. It has the following fields: + - `alias` - represents the identifier of the certificate. It has the following possible value: + - `server` - is used for the server certificate configuration. + - `client` - is used for the client certificate configuration. + + - `secretName` - ( `string` | `"-alias-cert"` ) - specifies the k8s secret name that holds the certificates. + + - `subject` - specifies an `X.509` distinguished name (DN). It has the following configurable fields: + - `organizations` ( `[]string` | `nil` ) - is a list of organization names. + - `organizationalUnits` ( `[]string` | `nil` ) - is a list of organization unit names. + - `countries` ( `[]string` | `nil` ) - is a list of country names (ie. Country Codes). + - `localities` ( `[]string` | `nil` ) - is a list of locality names. + - `provinces` ( `[]string` | `nil` ) - is a list of province names. + - `streetAddresses` ( `[]string` | `nil` ) - is a list of street addresses. + - `postalCodes` ( `[]string` | `nil` ) - is a list of postal codes. + - `serialNumber` ( `string` | `""` ) is a serial number. + + For more details, visit [here](https://golang.org/pkg/crypto/x509/pkix/#Name). + + - `duration` ( `string` | `""` ) - is the period during which the certificate is valid. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300m"`, `"1.5h"` or `"20h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + - `renewBefore` ( `string` | `""` ) - is a specifiable time before expiration duration. + - `dnsNames` ( `[]string` | `nil` ) - is a list of subject alt names. + - `ipAddresses` ( `[]string` | `nil` ) - is a list of IP addresses. + - `uris` ( `[]string` | `nil` ) - is a list of URI Subject Alternative Names. + - `emailAddresses` ( `[]string` | `nil` ) - is a list of email Subject Alternative Names. + +### spec.deletionPolicy + +`deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `Hazelcast` crd or which resources KubeDB should keep or delete when you delete `Hazelcast` crd. KubeDB provides following four deletion policies: + +- DoNotTerminate +- Halt +- Delete (`Default`) +- WipeOut + +When `deletionPolicy` is `DoNotTerminate`, KubeDB takes advantage of `ValidationWebhook` feature in Kubernetes 1.9.0 or later clusters to implement `DoNotTerminate` feature. If admission webhook is enabled, `DoNotTerminate` prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. + +Following table show what KubeDB does when you delete Hazelcast crd for different deletion policies, + +| Behavior | DoNotTerminate | Halt | Delete | WipeOut | +|-------------------------------------|:--------------:|:--------:|:--------:|:--------:| +| 1. Block Delete operation | ✓ | ✗ | ✗ | ✗ | +| 2. Delete Petset | ✗ | ✓ | ✓ | ✓ | +| 3. Delete Services | ✗ | ✓ | ✓ | ✓ | +| 4. Delete PVCs | ✗ | ✗ | ✓ | ✓ | +| 5. Delete Secrets | ✗ | ✗ | ✗ | ✓ | +| 6. Delete Snapshots | ✗ | ✗ | ✗ | ✓ | +| 7. Delete Snapshot data from bucket | ✗ | ✗ | ✗ | ✓ | +If you don't specify `spec.deletionPolicy` KubeDB uses `Delete` deletion policy by default. + +### spec.halted +Indicates that the database is halted and all offshoot Kubernetes resources except PVCs are deleted. + +## spec.healthChecker +It defines the attributes for the health checker. +- `spec.healthChecker.periodSeconds` specifies how often to perform the health check. +- `spec.healthChecker.timeoutSeconds` specifies the number of seconds after which the probe times out. +- `spec.healthChecker.failureThreshold` specifies minimum consecutive failures for the healthChecker to be considered failed. + +Know details about KubeDB Health checking from this [blog post](https://appscode.com/blog/post/kubedb-health-checker/). + +## Next Steps + +- Learn how to use KubeDB to run a Hazelcast server [here](/docs/guides/hazelcast/README.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/hazelcast/configuration/hazelcast-config.md b/docs/guides/hazelcast/configuration/hazelcast-config.md index 3f704218e5..191cffd4fc 100644 --- a/docs/guides/hazelcast/configuration/hazelcast-config.md +++ b/docs/guides/hazelcast/configuration/hazelcast-config.md @@ -105,8 +105,8 @@ spec: version: 5.5.2 licenseSecret: name: hz-license-key - configSecret: - name: hz + configuration: + secretName: hz storage: accessModes: - ReadWriteOnce diff --git a/docs/guides/hazelcast/configuration/hazelcast-config.md.bak b/docs/guides/hazelcast/configuration/hazelcast-config.md.bak new file mode 100644 index 0000000000..3f704218e5 --- /dev/null +++ b/docs/guides/hazelcast/configuration/hazelcast-config.md.bak @@ -0,0 +1,179 @@ +--- +title: Configuring Hazelcast Cluster +menu: + docs_{{ .version }}: + identifier: hz-configuration-cluster + name: Cluster Configuration + parent: hz-configuration + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Configure Hazelcast Cluster + +In Hazelcast cluster, nodes work together to provide distributed computing and caching capabilities. In this tutorial, we will see how to configure a Hazelcast cluster. + +## Before You Begin + +At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +Now, install the KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create namespace demo +namespace/demo created + +$ kubectl get namespace +NAME STATUS AGE +demo Active 9s +``` + +> Note: YAML files used in this tutorial are stored in [here](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/hazelcast/configuration/ +) in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Find Available StorageClass + +We will have to provide `StorageClass` in Hazelcast CR specification. Check available `StorageClass` in your cluster using the following command, + +```bash +$ kubectl get storageclass +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +standard (default) rancher.io/local-path Delete WaitForFirstConsumer false 1h +``` + +Here, we have `standard` StorageClass in our cluster from [Local Path Provisioner](https://github.com/rancher/local-path-provisioner). + +## Use Custom Configuration + +Say we want to enable persistence with custom validation and data load timeout settings. Let's create the `hazelcast.yaml` file with our desired configurations. + +**hazelcast.yaml:** + +```yaml +hazelcast: + persistence: + enabled: true + validation-timeout-seconds: 2500 + data-load-timeout-seconds: 3000 + auto-remove-stale-data: false +``` + +Let's create a k8s secret containing the above configuration where the file name will be the key and the file-content as the value: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: hz + namespace: demo +stringData: + hazelcast.yaml: |- + hazelcast: + persistence: + enabled: true + validation-timeout-seconds: 2500 + data-load-timeout-seconds: 3000 + auto-remove-stale-data: false + hazelcast-client.yaml: |- +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/hazelcast/configuration/configsecret-combine.yaml +secret/hz created +``` +Before deploying hazelcast we need to create license secret since we are running enterprise version of hazelcast. + +```bash +kubectl create secret generic hz-license-key -n demo --from-literal=licenseKey='your hazelcast license key' +secret/hz-license-key created +``` +Now that the config secret is created, it needs to be mention in the [Hazelcast](/docs/guides/hazelcast/concepts/hazelcast.md) object's yaml: + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Hazelcast +metadata: + name: hazelcast-dev + namespace: demo +spec: + replicas: 2 + version: 5.5.2 + licenseSecret: + name: hz-license-key + configSecret: + name: hz + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: longhorn + storageType: Durable + deletionPolicy: WipeOut +``` + +Now, create the Hazelcast object by the following command: + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/hazelcast/configuration/hazelcast-config.yaml +hazelcast.kubedb.com/hazelcast-dev created +``` + +Now, wait for the Hazelcast to become ready: + +```bash +$ kubectl get hz -n demo -w +NAME TYPE VERSION STATUS AGE +hazelcast-dev kubedb.com/v1alpha2 5.5.2 Provisioning 0s +hazelcast-dev kubedb.com/v1alpha2 5.5.2 Provisioning 24s +. +. +hazelcast-dev kubedb.com/v1alpha2 5.5.2 Ready 92s +``` + +## Verify Configuration + +Let's exec into one of the hazelcast pod that we have created and check the configurations are applied or not: + +Exec into the Hazelcast pod: + +```bash +$ kubectl exec -it -n demo hazelcast-dev-0 -- bash +hazelcast@hazelcast-dev-0:~$ +``` + +Now, execute the following commands to see the configurations: +```bash +hazelcast@hazelcast-dev-0:~$ cat /data/hazelcast/hazelcast.yaml | grep persistence + persistence: +hazelcast@hazelcast-dev-0:~$ cat /data/hazelcast/hazelcast.yaml | grep enabled + enabled: true +hazelcast@hazelcast-dev-0:~$ cat /data/hazelcast/hazelcast.yaml | grep validation-timeout-seconds + validation-timeout-seconds: 2500 +hazelcast@hazelcast-dev-0:~$ cat /data/hazelcast/hazelcast.yaml | grep data-load-timeout-seconds + data-load-timeout-seconds: 3000 +``` +Here, we can see that our given persistence configuration is applied to the Hazelcast cluster for all nodes. + +## Cleanup + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +$ kubectl delete hz -n demo hazelcast-dev +$ kubectl delete secret -n demo hz +$ kubectl delete namespace demo +``` + +## Next Steps + +- Detail concepts of [Hazelcast object](/docs/guides/hazelcast/concepts/hazelcast.md). +- Monitor your Hazelcast database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/hazelcast/monitoring/prometheus-operator.md). + +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/ignite/concepts/ignite.md b/docs/guides/ignite/concepts/ignite.md index 842b62a258..b0196cc24c 100644 --- a/docs/guides/ignite/concepts/ignite.md +++ b/docs/guides/ignite/concepts/ignite.md @@ -31,8 +31,8 @@ metadata: spec: replicas: 3 version: 2.17.0 - configSecret: - name: ignite-configuration + configuration: + secretName: ignite-configuration authSecret: kind: Secret name: ignite-quickstart-auth @@ -80,9 +80,9 @@ Ignite managed by KubeDB can be monitored with builtin-Prometheus and Prometheus - [Monitor Ignite with builtin Prometheus](/docs/guides/ignite/monitoring/using-builtin-prometheus.md) - [Monitor Ignite with Prometheus operator](/docs/guides/ignite/monitoring/using-prometheus-operator.md) -### spec.configSecret +### spec.configuration -`spec.configSecret` is an optional field that allows users to provide custom configuration for Ignite. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). So you can use any Kubernetes supported volume source such as `configMap`, `secret`, `azureDisk` etc. To learn more about how to use a custom configuration file see [here](/docs/guides/ignite/custom-configuration/using-config-file.md). +`spec.configuration` is an optional field that allows users to provide custom configuration for Ignite. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). So you can use any Kubernetes supported volume source such as `configMap`, `secret`, `azureDisk` etc. To learn more about how to use a custom configuration file see [here](/docs/guides/ignite/custom-configuration/using-config-file.md). ### spec.podTemplate diff --git a/docs/guides/ignite/concepts/ignite.md.bak b/docs/guides/ignite/concepts/ignite.md.bak new file mode 100644 index 0000000000..bde4f14a4c --- /dev/null +++ b/docs/guides/ignite/concepts/ignite.md.bak @@ -0,0 +1,246 @@ +--- +title: Ignite +menu: + docs_{{ .version }}: + identifier: ig-ignite-concepts + name: Ignite + parent: ig-concepts-ignite + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Ignite + +## What is Ignite + +`Ignite` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for [Ignite](https://ignite.apache.org/) in a Kubernetes native way. You only need to describe the desired database configuration in a Ignite object, and the KubeDB operator will create Kubernetes objects in the desired state for you. + +## Ignite Spec + +As with all other Kubernetes objects, a Ignite needs `apiVersion`, `kind`, and `metadata` fields. It also needs a `.spec` section. Below is an example of a Ignite object. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Ignite +metadata: + name: ignite-quickstart + namespace: demo +spec: + replicas: 3 + version: 2.17.0 + configuration: + secretName: ignite-configuration + authSecret: + kind: Secret + name: ignite-quickstart-auth + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + podTemplate: + spec: + containers: + - name: "ignite" + resources: + requests: + cpu: "500m" + limits: + cpu: "600m" + memory: "1.5Gi" + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + release: prometheus + deletionPolicy: WipeOut +``` + +### spec.replicas + +`spec.replicas` is an optional field that specifies the number of desired Instances/Replicas of Ignite server. If you do not specify .spec.replicas, then it defaults to 1. + +KubeDB uses `PodDisruptionBudget` to ensure that majority of these replicas are available during [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions) so that quorum is maintained. + +### spec.version + +`spec.version` is a required field specifying the name of the [IgniteVersion](/docs/guides/ignite/concepts/ignite-version.md) crd where the docker images are specified. Currently, when you install KubeDB, it creates the following `IgniteVersion` crds, + +- `2.17.0` + +### spec.monitor + +Ignite managed by KubeDB can be monitored with builtin-Prometheus and Prometheus operator out-of-the-box. To learn more, + +- [Monitor Ignite with builtin Prometheus](/docs/guides/ignite/monitoring/using-builtin-prometheus.md) +- [Monitor Ignite with Prometheus operator](/docs/guides/ignite/monitoring/using-prometheus-operator.md) + +### spec.configSecret + +`spec.configSecret` is an optional field that allows users to provide custom configuration for Ignite. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). So you can use any Kubernetes supported volume source such as `configMap`, `secret`, `azureDisk` etc. To learn more about how to use a custom configuration file see [here](/docs/guides/ignite/custom-configuration/using-config-file.md). + +### spec.podTemplate + +KubeDB allows providing a template for database pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the Petset created for Ignite server. + +KubeDB accept following fields to set in `spec.podTemplate:` + +- metadata + - annotations (pod's annotation) +- controller + - annotations (petset's annotation) +- spec: + - containers + - volumes + - podPlacementPolicy + - initContainers + - containers + - podPlacementPolicy + - imagePullSecrets + - nodeSelector + - serviceAccountName + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + +Uses of some field of `spec.podTemplate` is described below, + +You can check out the full list [here](https://github.com/kmodules/offshoot-api/blob/master/api/v2/types.go#L26C1-L279C1). +Uses of some field of `spec.podTemplate` is described below, + +#### spec.podTemplate.spec.tolerations + +The `spec.podTemplate.spec.tolerations` is an optional field. This can be used to specify the pod's tolerations. + +#### spec.podTemplate.spec.volumes + +The `spec.podTemplate.spec.volumes` is an optional field. This can be used to provide the list of volumes that can be mounted by containers belonging to the pod. + +#### spec.podTemplate.spec.podPlacementPolicy + +`spec.podTemplate.spec.podPlacementPolicy` is an optional field. This can be used to provide the reference of the podPlacementPolicy. This will be used by our Petset controller to place the db pods throughout the region, zone & nodes according to the policy. It utilizes kubernetes affinity & podTopologySpreadContraints feature to do so. + +#### spec.podTemplate.spec.containers + +The `spec.podTemplate.spec.containers` can be used to provide the list containers and their configurations for to the database pod. some of the fields are described below, + +##### spec.podTemplate.spec.containers[].name +The `spec.podTemplate.spec.containers[].name` field used to specify the name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + +##### spec.podTemplate.spec.containers[].args +`spec.podTemplate.spec.containers[].args` is an optional field. This can be used to provide additional arguments to database installation. + +##### spec.podTemplate.spec.containers[].env + +`.env` is an optional field that specifies the environment variables to pass to the Ignite containers. + +Note that, KubeDB does not allow to update the environment variables. If you try to update environment variables, KubeDB operator will reject the request with following error, + +```ini +Error from server (BadRequest): error when applying patch: +... +for: "./ig.yaml": admission webhook "ignite.validators.kubedb.com" denied the request: precondition failed for: +... +At least one of the following was changed: + apiVersion + kind + name + namespace + spec.podTemplate.spec.nodeSelector + spec.podTemplate.spec.env +``` + +#### spec.podTemplate.spec.containers[].resources + +`spec.podTemplate.spec.containers[].resources` is an optional field. This can be used to request compute resources required by containers of the database pods. To learn more, visit [here](http://kubernetes.io/docs/user-guide/compute-resources/). + +#### spec.podTemplate.spec.imagePullSecrets + +`KubeDB` provides the flexibility of deploying Ignite server from a private Docker registry. To learn how to deploym Ignite from a private registry, please visit [here](/docs/guides/ignite/private-registry/using-private-registry.md). + +#### spec.podTemplate.spec.nodeSelector + +`spec.nodeSelector` is an optional field that specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). To learn more, see [here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) . + +#### spec.podTemplate.spec.serviceAccountName + +`serviceAccountName` is an optional field supported by KubeDB Operator (version 0.13.0 and higher) that can be used to specify a custom service account to fine tune role based access control. + +If this field is left empty, the KubeDB operator will create a service account name matching Ignite crd name. Role and RoleBinding that provide necessary access permissions will also be generated automatically for this service account. + +If a service account name is given, but there's no existing service account by that name, the KubeDB operator will create one, and Role and RoleBinding that provide necessary access permissions will also be generated for this service account. + +If a service account name is given, and there's an existing service account by that name, the KubeDB operator will use that existing service account. Since this service account is not managed by KubeDB, users are responsible for providing necessary access permissions manually. Follow the guide [here](/docs/guides/ignite/custom-rbac/using-custom-rbac.md) to grant necessary permissions in this scenario. + +#### spec.podTemplate.spec.resources + +`spec.resources` is an optional field. This can be used to request compute resources required by the database pods. To learn more, visit [here](http://kubernetes.io/docs/user-guide/compute-resources/). + +### spec.serviceTemplates + +You can also provide a template for the services created by KubeDB operator for Ignite server through `spec.serviceTemplates`. This will allow you to set the type and other properties of the services. + +KubeDB allows following fields to set in `spec.serviceTemplates`: + +- `alias` represents the identifier of the service. It has the following possible value: + - `primary` is used for the primary service identification. + - `standby` is used for the secondary service identification. + - `stats` is used for the exporter service identification. + +- metadata: + - annotations +- spec: + - type + - ports + - clusterIP + - externalIPs + - loadBalancerIP + - loadBalancerSourceRanges + - externalTrafficPolicy + - healthCheckNodePort + - sessionAffinityConfig + +See [here](https://github.com/kmodules/offshoot-api/blob/kubernetes-1.16.3/api/v1/types.go#L163) to understand these fields in details. + + +### spec.deletionPolicy + +`deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `Ignite` crd or which resources KubeDB should keep or delete when you delete `Ignite` crd. KubeDB provides following four termination policies: + +- DoNotTerminate +- Delete (`Default`) +- WipeOut + +When `deletionPolicy` is `DoNotTerminate`, KubeDB takes advantage of `ValidationWebhook` feature in Kubernetes 1.9.0 or later clusters to implement `DoNotTerminate` feature. If admission webhook is enabled, `DoNotTerminate` prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. + +Following table show what KubeDB does when you delete Ignite crd for different termination policies, + +| Behavior | DoNotTerminate | Delete | WipeOut | +| ---------------------------| :------------: | :------: | :------: | +| 1. Block Delete operation | ✓ | ✗ | ✗ | +| 2. Delete PetSet | ✗ | ✓ | ✓ | +| 3. Delete Services | ✗ | ✓ | ✓ | +| 4. Delete Secrets | ✗ | ✗ | ✓ | + +If you don't specify `spec.deletionPolicy` KubeDB uses `Delete` termination policy by default. + +## spec.helathChecker +It defines the attributes for the health checker. +- spec.healthChecker.periodSeconds specifies how often to perform the health check. +- spec.healthChecker.timeoutSeconds specifies the number of seconds after which the probe times out. +- spec.healthChecker.failureThreshold specifies minimum consecutive failures for the healthChecker to be considered failed. +- spec.healthChecker.disableWriteCheck specifies whether to disable the writeCheck or not. + +Know details about KubeDB Health checking from this blog post. + +## Next Steps + +- Learn how to use KubeDB to run a Ignite server [here](/docs/guides/ignite/README.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/ignite/concepts/opsrequest.md b/docs/guides/ignite/concepts/opsrequest.md index 3b4d147a68..51f03bd5bf 100644 --- a/docs/guides/ignite/concepts/opsrequest.md +++ b/docs/guides/ignite/concepts/opsrequest.md @@ -79,8 +79,8 @@ spec: databaseRef: name: ig-cluster configuration: - configSecret: - name: new-custom-config + configuration: + secretName: new-custom-config timeout: 5m apply: IfReady ``` diff --git a/docs/guides/ignite/concepts/opsrequest.md.bak b/docs/guides/ignite/concepts/opsrequest.md.bak new file mode 100644 index 0000000000..3b4d147a68 --- /dev/null +++ b/docs/guides/ignite/concepts/opsrequest.md.bak @@ -0,0 +1,198 @@ +--- +title: IgniteOpsRequests CRD +menu: + docs_{{ .version }}: + identifier: ig-opsrequest-concepts + name: IgniteOpsRequest + parent: ig-concepts-ignite + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + + +> New to KubeDB? Please start [here](/docs/README.md). + +# IgniteOpsRequest + +## What is IgniteOpsRequest + +`IgniteOpsRequest` is a Kubernetes `Custom Resource Definitions` (CRD). It provides a declarative configuration for [Ignite](https://ignite.apache.org/) administrative operations like database version updating, horizontal scaling, vertical scaling etc. in a Kubernetes native way. + +## IgniteOpsRequest CRD Specifications + +Like any official Kubernetes resource, a `IgniteOpsRequest` has `TypeMeta`, `ObjectMeta`, `Spec` and `Status` sections. + +Here, some sample `IgniteOpsRequest` CRs for different administrative operations is given below: + +Sample `IgniteOpsRequest` for Horizontal Scaling of Database Cluster: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: IgniteOpsRequest +metadata: + name: ignite-horizontal-scale-up + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: ignite + horizontalScaling: + node: 3 +``` + +Sample `IgniteOpsRequest` for Vertical Scaling of Database: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: IgniteOpsRequest +metadata: + name: igops-vscale + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: ig + verticalScaling: + node: + resources: + requests: + memory: "2Gi" + cpu: "1" + limits: + memory: "2Gi" + cpu: "1" + timeout: 5m + apply: IfReady +``` + +Sample `IgniteOpsRequest` Objects for Reconfiguring Ignite database with config: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: IgniteOpsRequest +metadata: + name: reconfigure-ig-cluster + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: ig-cluster + configuration: + configSecret: + name: new-custom-config + timeout: 5m + apply: IfReady +``` + +Sample `IgniteOpsRequest` Objects for Volume Expansion of Ignite: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: IgniteOpsRequest +metadata: + name: igops-volume-exp-standalone + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: ig-standalone + volumeExpansion: + node: 2Gi + mode: Online +``` + +Sample `IgniteOpsRequest` Objects for Reconfiguring TLS of the database: + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: IgniteOpsRequest +metadata: + name: igops-add-tls + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: ig + tls: + issuerRef: + name: ig-issuer + kind: Issuer + apiGroup: "cert-manager.io" + certificates: + - alias: client + subject: + organizations: + - ignite + organizationalUnits: + - client + timeout: 5m + apply: IfReady +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: IgniteOpsRequest +metadata: + name: igops-rotate + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: ig + tls: + rotateCertificates: true +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: IgniteOpsRequest +metadata: + name: ig-change-issuer + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: ig + tls: + issuerRef: + name: ig-new-issuer + kind: Issuer + apiGroup: "cert-manager.io" +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: IgniteOpsRequest +metadata: + name: ig-ops-remove + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: ig + tls: + remove: true +``` + +Here, we are going to describe the various sections of a `IgniteOpsRequest` crd. + +A `IgniteOpsRequest` object has the following fields in the `spec` section. + +### spec.databaseRef + +`spec.databaseRef` is a required field that point to the [Ignite](/docs/guides/ignite/concepts/ignite.md) object for which the administrative operations will be performed. This field consists of the following sub-field: + +- **spec.databaseRef.name :** specifies the name of the [Ignite](/docs/guides/ignite/concepts/ignite.md) object. + +### spec.type + +`spec.type` specifies the kind of operation that will be applied to the database. Currently, the following types of operations are allowed in `IgniteOpsRequest`. + +- `UpdateVersion` +- `HorizontalScaling` +- `VerticalScaling` +- `VolumeExpansion` +- `Reconfigure` +- `ReconfigureTLS` +- `Restart` \ No newline at end of file diff --git a/docs/guides/ignite/custom-configuration/using-config-file.md b/docs/guides/ignite/custom-configuration/using-config-file.md index 702e9eff1c..f24934a8cb 100644 --- a/docs/guides/ignite/custom-configuration/using-config-file.md +++ b/docs/guides/ignite/custom-configuration/using-config-file.md @@ -41,7 +41,7 @@ Ignite does not allow to configuration via any file. However, configuration para To know more about configuring Ignite server see [here](https://ignite.apache.org/docs/ignite3/latest/administrators-guide/config/node-config). -At first, you have to create a config file named `node-configuration.xml` with your desired configuration. Then you have to put this file into a [volume](https://kubernetes.io/docs/concepts/storage/volumes/). You have to specify this volume in `spec.configSecret` section while creating Ignite crd. KubeDB will mount this volume into `/usr/config` directory of the database pod. +At first, you have to create a config file named `node-configuration.xml` with your desired configuration. Then you have to put this file into a [volume](https://kubernetes.io/docs/concepts/storage/volumes/). You have to specify this volume in `spec.configuration` section while creating Ignite crd. KubeDB will mount this volume into `/usr/config` directory of the database pod. In this tutorial, we will enable Ignite's authentication via secret. @@ -94,7 +94,7 @@ metadata: type: Opaque ``` -Now, create Ignite crd specifying `spec.configSecret` field. +Now, create Ignite crd specifying `spec.configuration` field. ```bash $ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ignite/configuration/custom-ignite.yaml @@ -112,8 +112,8 @@ metadata: spec: replicas: 3 version: 2.17.0 - configSecret: - name: ignite-configuration + configuration: + secretName: ignite-configuration storage: storageClassName: "standard" accessModes: diff --git a/docs/guides/ignite/custom-configuration/using-config-file.md.bak b/docs/guides/ignite/custom-configuration/using-config-file.md.bak new file mode 100644 index 0000000000..1abca6d4b6 --- /dev/null +++ b/docs/guides/ignite/custom-configuration/using-config-file.md.bak @@ -0,0 +1,176 @@ +--- +title: Run Ignite with Custom Configuration +menu: + docs_{{ .version }}: + identifier: ig-using-config-file-configuration + name: Customize Configurations + parent: ig-custom-configuration + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Using Custom Configuration File + +KubeDB supports providing custom configuration for Ignite. This tutorial will show you how to use KubeDB to run Ignite with custom configuration. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + + $ kubectl get ns demo + NAME STATUS AGE + demo Active 5s + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/ignite](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/ignite) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +Ignite does not allow to configuration via any file. However, configuration parameters can be set as arguments while starting the ignite docker image. To keep similarity with other KubeDB supported databases which support configuration through a config file, KubeDB has added an additional executable script on top of the official ignite docker image. This script parses the configuration file then set them as arguments of ignite binary. + +To know more about configuring Ignite server see [here](https://ignite.apache.org/docs/ignite3/latest/administrators-guide/config/node-config). + +At first, you have to create a config file named `node-configuration.xml` with your desired configuration. Then you have to put this file into a [volume](https://kubernetes.io/docs/concepts/storage/volumes/). You have to specify this volume in `spec.configSecret` section while creating Ignite crd. KubeDB will mount this volume into `/usr/config` directory of the database pod. + +In this tutorial, we will enable Ignite's authentication via secret. + +Create a secret with custom configuration file: +```yaml +apiVersion: v1 +stringData: + node-configuration.xml: | + + + + + + + + +kind: Secret +metadata: + name: ignite-configuration + namespace: demo + resourceVersion: "4505" +``` +Here, `authenticationEnabled's` default value is `false`. In this secret, we make the value `true`. + +```bash + $ kubectl apply -f ignite-configuration.yaml +secret/ignite-configuration created +``` + +Let's get the ignite-configuration `secret` with custom configuration: + +```yaml +$ kubectl get secret -n demo ignite-configuration -o yaml +apiVersion: v1 +data: + node-configuration.xml: PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPGJlYW5zIHhtbG5zPSJodHRwOi8vd3d3LnNwcmluZ2ZyYW1ld29yay5vcmcvc2NoZW1hL2JlYW5zIgogICB4bWxuczp4c2k9Imh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hLWluc3RhbmNlIgogICB4c2k6c2NoZW1hTG9jYXRpb249Imh0dHA6Ly93d3cuc3ByaW5nZnJhbWV3b3JrLm9yZy9zY2hlbWEvYmVhbnMKICAgICAgICAgICAgICAgICAgICAgICBodHRwOi8vd3d3LnNwcmluZ2ZyYW1ld29yay5vcmcvc2NoZW1hL2JlYW5zL3NwcmluZy1iZWFucy0zLjAueHNkIj4KPCEtLSBZb3VyIElnbml0ZSBDb25maWd1cmF0aW9uIC0tPgo8YmVhbiBjbGFzcz0ib3JnLmFwYWNoZS5pZ25pdGUuY29uZmlndXJhdGlvbi5JZ25pdGVDb25maWd1cmF0aW9uIj4KCiAgICA8cHJvcGVydHkgbmFtZT0iYXV0aGVudGljYXRpb25FbmFibGVkIiB2YWx1ZT0idHJ1ZSIvPgoKPC9iZWFuPgo8L2JlYW5zPgo= +kind: Secret +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","kind":"Secret","metadata":{"annotations":{},"name":"ignite-configuration","namespace":"demo","resourceVersion":"4505"},"stringData":{"node-configuration.xml":"\u003c?xml version=\"1.0\" encoding=\"UTF-8\"?\u003e\n\u003cbeans xmlns=\"http://www.springframework.org/schema/beans\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.springframework.org/schema/beans\n http://www.springframework.org/schema/beans/spring-beans-3.0.xsd\"\u003e\n\u003c!-- Your Ignite Configuration --\u003e\n\u003cbean class=\"org.apache.ignite.configuration.IgniteConfiguration\"\u003e\n\n \u003cproperty name=\"authenticationEnabled\" value=\"true\"/\u003e\n\n\u003c/bean\u003e\n\u003c/beans\u003e\n"}} + creationTimestamp: "2025-06-02T09:37:05Z" + name: ignite-configuration + namespace: demo + resourceVersion: "1391127" + uid: 57f2a44c-d6b1-4571-bb91-fd68b3048306 +type: Opaque +``` + +Now, create Ignite crd specifying `spec.configSecret` field. + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ignite/configuration/custom-ignite.yaml +ignite.kubedb.com/custom-ignite created +``` + +Below is the YAML for the Ignite crd we just created. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Ignite +metadata: + name: custom-ignite + namespace: demo +spec: + replicas: 3 + version: 2.17.0 + configuration: + secretName: ignite-configuration + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut +``` + +Now, wait a few minutes. KubeDB operator will create necessary petset, services etc. If everything goes well, we will see that a pod with the name `custom-ignite-0` has been created. + +Check if the database is ready + +```bash +$ kubectl get ig -n demo +NAME VERSION STATUS AGE +custom-ignite 2.17.0 Ready 17m +``` + +Now, we will check if the database has started with the custom configuration we have provided. +We will connect to `custom-ignite-0` pod: + +```bash +$ kubectl exec -it -n demo ignite-quickstart-0 -c ignite -- bash +[ignite@ignite-quickstart-0 config]$ cat /ignite/config/node-configuration.xml + + + + +... +... + + + +``` + +Here, we can see `authenticationEnabled's` value is `true`. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl patch -n demo ig/custom-ignite -p '{"spec":{"deletionPolicy":"WipeOut"}}' --type="merge" +kubectl delete -n demo ig/custom-ignite + +kubectl delete -n demo secret ignite-configuration +kubectl delete ns demo +``` + +If you would like to uninstall KubeDB operator, please follow the steps [here](/docs/setup/README.md). + +## Next Steps + +- Learn how to use KubeDB to run a Ignite server [here](/docs/guides/ignite/README.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/ignite/reconfigure/reconfigure.md b/docs/guides/ignite/reconfigure/reconfigure.md index 2e1de5e549..4b9d71b9df 100644 --- a/docs/guides/ignite/reconfigure/reconfigure.md +++ b/docs/guides/ignite/reconfigure/reconfigure.md @@ -53,7 +53,7 @@ $ kubectl create secret generic -n demo ig-custom-config --from-file=./node-conf secret/ig-custom-config created ``` -In this section, we are going to create a Ignite object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `Ignite` CR that we are going to create, +In this section, we are going to create a Ignite object specifying `spec.configuration` field to apply this custom configuration. Below is the YAML of the `Ignite` CR that we are going to create, ```yaml apiVersion: kubedb.com/v1alpha2 @@ -71,8 +71,8 @@ spec: resources: requests: storage: 1Gi - configSecret: - name: ig-custom-config + configuration: + secretName: ig-custom-config ``` Let's create the `Ignite` CR we have shown above, @@ -125,8 +125,8 @@ spec: databaseRef: name: ig-cluster configuration: - configSecret: - name: new-custom-config + configuration: + secretName: new-custom-config timeout: 5m apply: IfReady ``` @@ -135,7 +135,7 @@ Here, - `spec.databaseRef.name` specifies that we are reconfiguring `igps-reconfigure` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configuration.configSecret.name` specifies the name of the new secret. +- `spec.configuration.configuration.secretName` specifies the name of the new secret. - Have a look [here](/docs/guides/ignite/concepts/opsrequest.md#specconfiguration) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. Let's create the `IgniteOpsRequest` CR we have shown above, diff --git a/docs/guides/ignite/reconfigure/reconfigure.md.bak b/docs/guides/ignite/reconfigure/reconfigure.md.bak new file mode 100644 index 0000000000..646dea7558 --- /dev/null +++ b/docs/guides/ignite/reconfigure/reconfigure.md.bak @@ -0,0 +1,300 @@ +--- +title: Reconfigure Ignite Cluster +menu: + docs_{{ .version }}: + identifier: ig-reconfigure-cluster + name: Reconfigure Configurations + parent: ig-reconfigure + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure Ignite Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a Ignite cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Ignite](/docs/guides/ignite/concepts/ignite.md) + - [IgniteOpsRequest](/docs/guides/ignite/concepts/opsrequest.md) + - [Reconfigure Overview](/docs/guides/ignite/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [examples](/docs/examples/ignite) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `Ignite` cluster using a supported version by `KubeDB` operator. Then we are going to apply `IgniteOpsRequest` to reconfigure its configuration. + +### Prepare Ignite Database + +Now, we are going to deploy a `Ignite` cluster with version `2.17.0`. + +### Deploy Ignite + +At first, we will create `ignite.conf` file containing required configuration settings. + +Now, we will create a secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo ig-custom-config --from-file=./node-configuration.xml +secret/ig-custom-config created +``` + +In this section, we are going to create a Ignite object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `Ignite` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: Ignite +metadata: + name: ig-cluster + namespace: demo +spec: + version: "2.17.0" + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + configuration: + secretName: ig-custom-config +``` + +Let's create the `Ignite` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ignite/cluster/ig-custom-config.yaml +ignite.kubedb.com/ig-cluster created +``` + +Now, wait until `ig-cluster` has status `Ready`. i.e, + +```bash +$ kubectl get ig -n demo +NAME TYPE VERSION STATUS AGE +ig-cluster kubedb.com/v1alpha2 2.17.0 Ready 79m +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +First we need to get the username and password to connect to a Ignite instance, +```bash +$ kubectl get secrets -n demo ig-cluster-admin-cred -o jsonpath='{.data.\username}' | base64 -d +admin + +$ kubectl get secrets -n demo ig-cluster-admin-cred -o jsonpath='{.data.\password}' | base64 -d +m6lXjZugrC4VEpB8 +``` + +### Reconfigure using new secret + +Now, we will create a new secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo new-custom-config --from-file=./ignite.conf +secret/new-custom-config created +``` + +#### Create IgniteOpsRequest + +Now, we will use this secret to replace the previous secret using a `IgniteOpsRequest` CR. The `IgniteOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: IgniteOpsRequest +metadata: + name: reconfigure-ig-cluster + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: ig-cluster + configuration: + configuration: + secretName: new-custom-config + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `igps-reconfigure` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.configuration.secretName` specifies the name of the new secret. +- Have a look [here](/docs/guides/ignite/concepts/opsrequest.md#specconfiguration) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. + +Let's create the `IgniteOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ignite/opsrequests/ig-reconfigure-with-secret.yaml +igniteopsrequest.ops.kubedb.com/reconfigure-ig-cluster created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `Ignite` object. + +Let's wait for `IgniteOpsRequest` to be `Successful`. Run the following command to watch `IgniteOpsRequest` CR, + +```bash +$ watch kubectl get igniteopsrequest -n demo +Every 2.0s: kubectl get igniteopsrequest -n demo +NAME TYPE STATUS AGE +reconfigure-ig-cluster Reconfigure Successful 3m +``` + +We can see from the above output that the `IgniteOpsRequest` has succeeded. If we describe the `IgniteOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe igniteopsrequest -n demo reconfigure-ig-cluster +Name: reconfigure-ig-cluster +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: IgniteOpsRequest +Metadata: + Creation Timestamp: 2024-09-10T11:09:16Z + Generation: 1 + Resource Version: 70651 + UID: 5c99031f-6604-48ac-b700-96f896c5d0b3 +Spec: + Apply: IfReady + Configuration: + Config Secret: + Name: new-custom-config + Database Ref: + Name: ig-cluster + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-09-10T11:09:16Z + Message: Ignite ops-request has started to reconfigure Ignite nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-09-10T11:09:24Z + Message: successfully reconciled the Ignite with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-09-10T11:09:29Z + Message: get pod; ConditionStatus:True; PodName:ig-cluster-0 + Observed Generation: 1 + Status: True + Type: GetPod--ig-cluster-0 + Last Transition Time: 2024-09-10T11:09:29Z + Message: evict pod; ConditionStatus:True; PodName:ig-cluster-0 + Observed Generation: 1 + Status: True + Type: EvictPod--ig-cluster-0 + Last Transition Time: 2024-09-10T11:09:34Z + Message: running pod; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: RunningPod + Last Transition Time: 2024-09-10T11:09:49Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-09-10T11:09:50Z + Message: Successfully completed reconfigure Ignite + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 6m13s KubeDB Ops-manager Operator Start processing for IgniteOpsRequest: demo/reconfigure-ig-cluster + Normal Starting 6m13s KubeDB Ops-manager Operator Pausing Ignite databse: demo/ig-cluster + Normal Successful 6m13s KubeDB Ops-manager Operator Successfully paused Ignite database: demo/ig-cluster for IgniteOpsRequest: reconfigure + Normal UpdatePetSets 6m5s KubeDB Ops-manager Operator successfully reconciled the Ignite with new configure + Warning get pod; ConditionStatus:True; PodName:ig-cluster-0 6m KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:ig-cluster-0 + Warning evict pod; ConditionStatus:True; PodName:ig-cluster-0 6m KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:ig-cluster-0 + Warning running pod; ConditionStatus:False 5m55s KubeDB Ops-manager Operator running pod; ConditionStatus:False + Normal RestartNodes 5m40s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 5m40s KubeDB Ops-manager Operator Resuming Ignite database: demo/ig-cluster + Normal Successful 5m39s KubeDB Ops-manager Operator Successfully resumed Ignite database: demo/ig-cluster for IgniteOpsRequest: reconfigure-ig-cluster +``` + +### Reconfigure using apply config + +Let's say you are in a rush or, don't want to create a secret for updating configuration. You can directly do that using the following manifest. + +#### Create IgniteOpsRequest + +Now, we will use the new configuration in the `configuration.applyConfig` field in the `IgniteOpsRequest` CR. The `IgniteOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: IgniteOpsRequest +metadata: + name: reconfigure-apply + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: ig-cluster + configuration: + applyConfig: + node-configuration.xml: | + + + + + + + + + + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `ig-cluster` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `IgniteOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/ignite/opsrequests/ignite-reconfigure-apply.yaml +igniteopsrequest.ops.kubedb.com/reconfigure-apply created +``` + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete ig -n demo ig-cluster +kubectl delete igniteopsrequest -n demo reconfigure-apply reconfigure-ig-cluster +``` \ No newline at end of file diff --git a/docs/guides/kafka/concepts/connectcluster.md b/docs/guides/kafka/concepts/connectcluster.md index 333dbadbc3..90eaa0ea21 100644 --- a/docs/guides/kafka/concepts/connectcluster.md +++ b/docs/guides/kafka/concepts/connectcluster.md @@ -52,8 +52,8 @@ spec: secretName: connectcluster-server-cert - alias: client secretName: connectcluster-client-cert - configSecret: - name: custom-connectcluster-config + configuration: + secretName: custom-connectcluster-config configuration: secretName: custom-connectcluster-config inline: @@ -138,15 +138,15 @@ kafkaRef: namespace: ``` -### spec.configSecret +### spec.configuration -`spec.configSecret` is an optional field that specifies the name of the secret containing the custom configuration for the ConnectCluster. The secret should contain a key `config.properties` which contains the custom configuration for the ConnectCluster. The default value of this field is `nil`. +`spec.configuration` is an optional field that specifies the name of the secret containing the custom configuration for the ConnectCluster. The secret should contain a key `config.properties` which contains the custom configuration for the ConnectCluster. The default value of this field is `nil`. ```yaml -configSecret: - name: +configuration: + secretName: ``` -> **Note**: Use `.spec.configuration.secretName` to specify the name of the secret instead of `.spec.configSecret.name`. The field `.spec.configSecret` is deprecated and will be removed in future releases. If you still use `.spec.configSecret`, KubeDB will copy `.spec.configSecret.name` to `.spec.configuration.secretName` internally. +> **Note**: Use `.spec.configuration.secretName` to specify the name of the secret instead of `.spec.configuration.secretName`. The field `.spec.configuration` is deprecated and will be removed in future releases. If you still use `.spec.configuration`, KubeDB will copy `.spec.configuration.secretName` to `.spec.configuration.secretName` internally. ### spec.configuration `spec.configuration` is an optional field that specifies custom configuration for Kafka Connect Cluster. It has the following fields: diff --git a/docs/guides/kafka/concepts/connectcluster.md.bak b/docs/guides/kafka/concepts/connectcluster.md.bak new file mode 100644 index 0000000000..8cc9451dd2 --- /dev/null +++ b/docs/guides/kafka/concepts/connectcluster.md.bak @@ -0,0 +1,390 @@ +--- +title: ConnectCluster CRD +menu: + docs_{{ .version }}: + identifier: kf-connectcluster-concepts + name: ConnectCluster + parent: kf-concepts-kafka + weight: 25 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# ConnectCluster + +## What is ConnectCluster + +`ConnectCluster` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for [ConnectCluster](https://kafka.apache.org/) in a Kubernetes native way. You only need to describe the desired configuration in a `ConnectCluster` object, and the KubeDB operator will create Kubernetes objects in the desired state for you. + +## ConnectCluster Spec + +As with all other Kubernetes objects, a ConnectCluster needs `apiVersion`, `kind`, and `metadata` fields. It also needs a `.spec` section. Below is an example ConnectCluster object. + +```yaml +apiVersion: kafka.kubedb.com/v1alpha1 +kind: ConnectCluster +metadata: + name: connectcluster + namespace: demo +spec: + version: 3.9.0 + healthChecker: + failureThreshold: 3 + periodSeconds: 20 + timeoutSeconds: 10 + disableSecurity: false + authSecret: + kind: Secret + name: connectcluster-auth + enableSSL: true + keystoreCredSecret: + kind: Secret + name: connectcluster-keystore-cred + tls: + issuerRef: + apiGroup: cert-manager.io + kind: Issuer + name: connectcluster-ca-issuer + certificates: + - alias: server + secretName: connectcluster-server-cert + - alias: client + secretName: connectcluster-client-cert + configuration: + secretName: custom-connectcluster-config + configuration: + secretName: custom-connectcluster-config + inline: + config.properties: | + key.converter=org.apache.kafka.connect.json.JsonConverter + value.converter=org.apache.kafka.connect.json.JsonConverter + key.converter.schemas.enable=true + value.converter.schemas.enable=true + offset.storage.topic=connect-cluster-offsets + config.storage.topic=connect-cluster-configs + status.storage.topic=connect-cluster-status + offset.flush.interval.ms=10000 + replicas: 3 + connectorPlugins: + - gcs-0.13.0 + - mongodb-1.14.1 + - mysql-3.0.5.final + - postgres-3.0.5.final + - s3-2.15.0 + - jdbc-3.0.5.final + kafkaRef: + name: kafka + namespace: demo + podTemplate: + metadata: + annotations: + passMe: ToDatabasePod + labels: + thisLabel: willGoToPod + controller: + annotations: + passMe: ToPetSet + labels: + thisLabel: willGoToPetSet + monitor: + agent: prometheus.io/operator + prometheus: + exporter: + port: 56790 + serviceMonitor: + labels: + release: prometheus + interval: 10s + deletionPolicy: WipeOut +``` + +### spec.version + +`spec.version` is a required field specifying the name of the [KafkaVersion](/docs/guides/kafka/concepts/kafkaversion.md) CR where the docker images are specified. Currently, when you install KubeDB, it creates the following `KafkaVersion` resources, + +- `3.5.2` +- `3.6.1` +- `3.7.2` +- `3.8.1` +- `3.9.0` + +### spec.replicas + +`spec.replicas` the number of worker nodes in ConnectCluster. + +KubeDB uses `PodDisruptionBudget` to ensure that majority of these replicas are available during [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions) so that quorum is maintained. + +### spec.disableSecurity + +`spec.disableSecurity` is an optional field that specifies whether to disable all kind of security features like basic authentication and tls. The default value of this field is `false`. + +### spec.connectorPlugins + +`spec.connectorPlugins` is an optional field that specifies the list of connector plugins to be installed in the ConnectCluster worker node. The field takes a list of strings where each string represents the name of the KafkaConnectorVersion CR. To learn more about KafkaConnectorVersion CR, visit [here](/docs/guides/kafka/concepts/kafkaconnectorversion.md). +```yaml +connectorPlugins: + - + - +``` + +### spec.kafkaRef + +`spec.kafkaRef` is a required field that specifies the name and namespace of the appbinding for `Kafka` object that the `ConnectCluster` object is associated with. +```yaml +kafkaRef: + name: + namespace: +``` + +### spec.configSecret + +`spec.configSecret` is an optional field that specifies the name of the secret containing the custom configuration for the ConnectCluster. The secret should contain a key `config.properties` which contains the custom configuration for the ConnectCluster. The default value of this field is `nil`. +```yaml +configuration: + secretName: +``` + +> **Note**: Use `.spec.configuration.secretName` to specify the name of the secret instead of `.spec.configuration.secretName`. The field `.spec.configSecret` is deprecated and will be removed in future releases. If you still use `.spec.configSecret`, KubeDB will copy `.spec.configuration.secretName` to `.spec.configuration.secretName` internally. + +### spec.configuration +`spec.configuration` is an optional field that specifies custom configuration for Kafka Connect Cluster. It has the following fields: +- `configuration.secretName` is a optional field that specifies the name of the secret that holds custom configuration files for Kafka Connect Cluster. +- `configuration.inline` is an optional field that allows you to provide custom configuration directly in the ConnectCluster object. It has the following possible keys: + - `config.properties` - is used to provide custom configuration for Kafka Connect Cluster. + +```yaml +spec: + configuration: + secretName: +``` +or +```yaml +spec: + configuration: + inline: + config.properties: | + key.converter=org.apache.kafka.connect.json.JsonConverter + value.converter=org.apache.kafka.connect.json.JsonConverter + ..... + .... +``` + +### spec.authSecret + +`spec.authSecret` is an optional field that points to a Secret used to hold credentials for `ConnectCluster` username and password. If not set, KubeDB operator creates a new Secret `{connectcluster-object-name}-connect-cred` for storing the username and password for each ConnectCluster object. + +We can use this field in 3 mode. + +1. Using an external secret. In this case, You need to create an auth secret first with required fields, then specify the secret name when creating the ConnectCluster object using `spec.authSecret.name` & set `spec.authSecret.externallyManaged` to true. +```yaml +authSecret: + name: + externallyManaged: true +``` + +2. Specifying the secret name only. In this case, You need to specify the secret name when creating the Kafka object using `spec.authSecret.name`. `externallyManaged` is by default false. +```yaml +authSecret: + name: +``` + +3. Let KubeDB do everything for you. In this case, no work for you. + +AuthSecret contains a `user` key and a `password` key which contains the `username` and `password` respectively for ConnectCluster user. + +Example: + +```bash +$ kubectl create secret generic kcc-auth -n demo \ +--from-literal=username=jhon-doe \ +--from-literal=password=6q8u_2jMOW-OOZXk +secret "kcc-auth" created +``` + +```yaml +apiVersion: v1 +data: + password: NnE4dV8yak1PVy1PT1pYaw== + username: amhvbi1kb2U= +kind: Secret +metadata: + name: kcc-auth + namespace: demo +type: Opaque +``` + +Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). + +### spec.enableSSL + +`spec.enableSSL` is an `optional` field that specifies whether to enable TLS to HTTP layer. The default value of this field is `false`. + +```yaml +spec: + enableSSL: true +``` + +### spec.keystoreCredSecret + +`spec.keystoreCredSecret` is an `optional` field that specifies the name of the secret containing the keystore credentials for the ConnectCluster. The secret should contain three keys `ssl.keystore.password`, `ssl.key.password` and `ssl.keystore.password`. The default value of this field is `nil`. + +```yaml +spec: + keystoreCredSecret: + name: +``` + +### spec.tls + +`spec.tls` specifies the TLS/SSL configurations. The KubeDB operator supports TLS management by using the [cert-manager](https://cert-manager.io/). Currently, the operator only supports the `PKCS#8` encoded certificates. + +```yaml +spec: + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: kcc-issuer + certificates: + - alias: server + privateKey: + encoding: PKCS8 + secretName: kcc-client-cert + subject: + organizations: + - kubedb + - alias: http + privateKey: + encoding: PKCS8 + secretName: kcc-server-cert + subject: + organizations: + - kubedb +``` + +The `spec.tls` contains the following fields: + +- `tls.issuerRef` - is an `optional` field that references to the `Issuer` or `ClusterIssuer` custom resource object of [cert-manager](https://cert-manager.io/docs/concepts/issuer/). It is used to generate the necessary certificate secrets for ConnectCluster. If the `issuerRef` is not specified, the operator creates a self-signed CA and also creates necessary certificate (valid: 365 days) secrets using that CA. + - `apiGroup` - is the group name of the resource that is being referenced. Currently, the only supported value is `cert-manager.io`. + - `kind` - is the type of resource that is being referenced. The supported values are `Issuer` and `ClusterIssuer`. + - `name` - is the name of the resource ( `Issuer` or `ClusterIssuer` ) that is being referenced. + +- `tls.certificates` - is an `optional` field that specifies a list of certificate configurations used to configure the certificates. It has the following fields: + - `alias` - represents the identifier of the certificate. It has the following possible value: + - `server` - is used for the server certificate configuration. + - `client` - is used for the client certificate configuration. + + - `secretName` - ( `string` | `"-alias-cert"` ) - specifies the k8s secret name that holds the certificates. + + - `subject` - specifies an `X.509` distinguished name (DN). It has the following configurable fields: + - `organizations` ( `[]string` | `nil` ) - is a list of organization names. + - `organizationalUnits` ( `[]string` | `nil` ) - is a list of organization unit names. + - `countries` ( `[]string` | `nil` ) - is a list of country names (ie. Country Codes). + - `localities` ( `[]string` | `nil` ) - is a list of locality names. + - `provinces` ( `[]string` | `nil` ) - is a list of province names. + - `streetAddresses` ( `[]string` | `nil` ) - is a list of street addresses. + - `postalCodes` ( `[]string` | `nil` ) - is a list of postal codes. + - `serialNumber` ( `string` | `""` ) is a serial number. + + For more details, visit [here](https://pkg.go.dev/crypto/x509/pkix#Name). + + - `duration` ( `string` | `""` ) - is the period during which the certificate is valid. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300m"`, `"1.5h"` or `"20h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + - `renewBefore` ( `string` | `""` ) - is a specifiable time before expiration duration. + - `dnsNames` ( `[]string` | `nil` ) - is a list of subject alt names. + - `ipAddresses` ( `[]string` | `nil` ) - is a list of IP addresses. + - `uris` ( `[]string` | `nil` ) - is a list of URI Subject Alternative Names. + - `emailAddresses` ( `[]string` | `nil` ) - is a list of email Subject Alternative Names. + + + +### spec.monitor + +ConnectCluster managed by KubeDB can be monitored with Prometheus operator out-of-the-box. To learn more, +- [Monitor Apache with Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md) + +### spec.podTemplate + +KubeDB allows providing a template for pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the PetSet created for ConnectCluster. + +KubeDB accept following fields to set in `spec.podTemplate:` + +- metadata: + - annotations (pod's annotation) + - labels (pod's labels) +- controller: + - annotations (petset's annotation) + - labels (petset's labels) +- spec: + - volumes + - initContainers + - containers + - imagePullSecrets + - nodeSelector + - serviceAccountName + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + +You can check out the full list [here](https://github.com/kmodules/offshoot-api/blob/39bf8b2/api/v2/types.go#L44-L279). Uses of some field of `spec.podTemplate` is described below, + +#### spec.podTemplate.spec.nodeSelector + +`spec.podTemplate.spec.nodeSelector` is an optional field that specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). To learn more, see [here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) . + +#### spec.podTemplate.spec.resources + +`spec.podTemplate.spec.resources` is an optional field. This can be used to request compute resources required by the database pods. To learn more, visit [here](http://kubernetes.io/docs/user-guide/compute-resources/). + +### spec.serviceTemplates + +You can also provide template for the services created by KubeDB operator for Kafka cluster through `spec.serviceTemplates`. This will allow you to set the type and other properties of the services. + +KubeDB allows following fields to set in `spec.serviceTemplates`: +- `alias` represents the identifier of the service. It has the following possible value: + - `stats` is used for the exporter service identification. +- metadata: + - labels + - annotations +- spec: + - type + - ports + - clusterIP + - externalIPs + - loadBalancerIP + - loadBalancerSourceRanges + - externalTrafficPolicy + - healthCheckNodePort + - sessionAffinityConfig + +See [here](https://github.com/kmodules/offshoot-api/blob/kubernetes-1.21.1/api/v1/types.go#L237) to understand these fields in detail. + +### spec.deletionPolicy + +`spec.deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `ConnectCluster` crd or which resources KubeDB should keep or delete when you delete `ConnectCluster` crd. KubeDB provides following four deletion policies: + +- Delete +- DoNotTerminate +- WipeOut + +When `deletionPolicy` is `DoNotTerminate`, KubeDB takes advantage of `ValidationWebhook` feature in Kubernetes 1.9.0 or later clusters to implement `DoNotTerminate` feature. If admission webhook is enabled, `DoNotTerminate` prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. + +## spec.healthChecker +It defines the attributes for the health checker. +- `spec.healthChecker.periodSeconds` specifies how often to perform the health check. +- `spec.healthChecker.timeoutSeconds` specifies the number of seconds after which the probe times out. +- `spec.healthChecker.failureThreshold` specifies minimum consecutive failures for the healthChecker to be considered failed. +- `spec.healthChecker.disableWriteCheck` specifies whether to disable the writeCheck or not. + +Know details about KubeDB Health checking from this [blog post](https://appscode.com/blog/post/kubedb-health-checker/). + +## Next Steps + +- Learn how to use KubeDB to run a Apache Kafka Connect cluster [here](/docs/guides/kafka/README.md). +- Monitor your ConnectCluster with KubeDB using [`out-of-the-box` Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md). +- Detail concepts of [KafkaConnectorVersion object](/docs/guides/kafka/concepts/kafkaconnectorversion.md). +- Learn to use KubeDB managed Kafka objects using [CLIs](/docs/guides/kafka/cli/cli.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/kafka/concepts/connector.md b/docs/guides/kafka/concepts/connector.md index c8e6b3128f..0fc268217c 100644 --- a/docs/guides/kafka/concepts/connector.md +++ b/docs/guides/kafka/concepts/connector.md @@ -29,8 +29,8 @@ metadata: name: mongodb-source-connector namespace: demo spec: - configSecret: - name: mongodb-source-config + configuration: + secretName: mongodb-source-config configuration: secretName: mongodb-source-config inline: @@ -45,16 +45,16 @@ spec: deletionPolicy: WipeOut ``` -### spec.configSecret +### spec.configuration -`spec.configSecret` is a required field that specifies the name of the secret containing the configuration for the Connector. The secret should contain a key `config.properties` which contains the configuration for the Connector. +`spec.configuration` is a required field that specifies the name of the secret containing the configuration for the Connector. The secret should contain a key `config.properties` which contains the configuration for the Connector. ```yaml spec: configuration: secretName: ``` -> **Note**: Use `.spec.configuration.secretName` to specify the name of the secret instead of `.spec.configSecret.name`. The field `.spec.configSecret` is deprecated and will be removed in future releases. If you still use `.spec.configSecret`, KubeDB will copy `.spec.configSecret.name` to `.spec.configuration.secretName` internally. +> **Note**: Use `.spec.configuration.secretName` to specify the name of the secret instead of `.spec.configuration.secretName`. The field `.spec.configuration` is deprecated and will be removed in future releases. If you still use `.spec.configuration`, KubeDB will copy `.spec.configuration.secretName` to `.spec.configuration.secretName` internally. ### spec.configuration diff --git a/docs/guides/kafka/concepts/connector.md.bak b/docs/guides/kafka/concepts/connector.md.bak new file mode 100644 index 0000000000..2a8da59858 --- /dev/null +++ b/docs/guides/kafka/concepts/connector.md.bak @@ -0,0 +1,107 @@ +--- +title: Connector CRD +menu: + docs_{{ .version }}: + identifier: kf-connector-concepts + name: Connector + parent: kf-concepts-kafka + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Connector + +## What is Connector + +`Connector` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for [Connector](https://kafka.apache.org/) in a Kubernetes native way. You only need to describe the desired configuration in a `Connector` object, and the KubeDB operator will create Kubernetes objects in the desired state for you. + +## Connector Spec + +As with all other Kubernetes objects, a Connector needs `apiVersion`, `kind`, and `metadata` fields. It also needs a `.spec` section. Below is an example Connector object. + +```yaml +apiVersion: kafka.kubedb.com/v1alpha1 +kind: Connector +metadata: + name: mongodb-source-connector + namespace: demo +spec: + configuration: + secretName: mongodb-source-config + configuration: + secretName: mongodb-source-config + inline: + config.properties: | + connector.class=com.mongodb.* + tasks.max=1 + topic.prefix=mongodb- + connection.uri=mongodb://mongo-user: + connectClusterRef: + name: connectcluster-quickstart + namespace: demo + deletionPolicy: WipeOut +``` + +### spec.configSecret + +`spec.configSecret` is a required field that specifies the name of the secret containing the configuration for the Connector. The secret should contain a key `config.properties` which contains the configuration for the Connector. +```yaml +spec: + configuration: + secretName: +``` + +> **Note**: Use `.spec.configuration.secretName` to specify the name of the secret instead of `.spec.configuration.secretName`. The field `.spec.configSecret` is deprecated and will be removed in future releases. If you still use `.spec.configSecret`, KubeDB will copy `.spec.configuration.secretName` to `.spec.configuration.secretName` internally. + +### spec.configuration + +`spec.configuration` is a required field that specifies the configuration for the Connector. It can either be specified inline or as a reference to a secret. +```yaml +spec: + configuration: + secretName: +``` +or +```yaml +spec: + configuration: + inline: + config.properties: | + connector.class=com.mongodb.* + tasks.max=1 + topic.prefix=mongodb- + connection.uri=mongodb://mongo-user:mongo-password@mongo-host:27017 +``` + +### spec.connectClusterRef + +`spec.connectClusterRef` is a required field that specifies the name and namespace of the `ConnectCluster` object that the `Connector` object is associated with. This is an appbinding reference for `ConnectCluster` object. +```yaml +spec: + connectClusterRef: + name: + namespace: +``` + +### spec.deletionPolicy + +`spec.deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `Connector` CR or which resources KubeDB should keep or delete when you delete `Connector` CR. KubeDB provides following three deletion policies: + +- Delete +- DoNotTerminate +- WipeOut + +When `deletionPolicy` is `DoNotTerminate`, KubeDB takes advantage of `ValidationWebhook` feature in Kubernetes 1.9.0 or later clusters to implement `DoNotTerminate` feature. If admission webhook is enabled, `DoNotTerminate` prevents users from deleting the resource as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. + +Deletion policy `WipeOut` will delete the connector from the ConnectCluster when the Connector CR is deleted and `Delete` keep the connector after deleting the Connector CR. + +## Next Steps + +- Learn how to use KubeDB to run Apache Kafka cluster [here](/docs/guides/kafka/quickstart/kafka/index.md). +- Learn how to use KubeDB to run Apache Kafka Connect cluster [here](/docs/guides/kafka/connectcluster/quickstart.md). +- Detail concepts of [KafkaConnectorVersion object](/docs/guides/kafka/concepts/kafkaconnectorversion.md). +- Learn to use KubeDB managed Kafka objects using [CLIs](/docs/guides/kafka/cli/cli.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/kafka/concepts/kafka.md b/docs/guides/kafka/concepts/kafka.md index b5d29d96bf..e8ce518435 100644 --- a/docs/guides/kafka/concepts/kafka.md +++ b/docs/guides/kafka/concepts/kafka.md @@ -33,8 +33,8 @@ spec: authSecret: kind: Secret name: kafka-admin-cred - configSecret: - name: kafka-custom-config + configuration: + secretName: kafka-custom-config configuration: secretName: kafka-custom-config inline: @@ -190,11 +190,11 @@ type: Opaque Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). -### spec.configSecret +### spec.configuration -`spec.configSecret` is an optional field that points to a Secret used to hold custom Kafka configuration. If not set, KubeDB operator will use default configuration for Kafka. This is currently not in use. Use `.spec.configuration` to provide custom configuration instead. If you still provide this field, KubeDB operator will update `spec.configuration.secretName` with the provided secret name. +`spec.configuration` is an optional field that points to a Secret used to hold custom Kafka configuration. If not set, KubeDB operator will use default configuration for Kafka. This is currently not in use. Use `.spec.configuration` to provide custom configuration instead. If you still provide this field, KubeDB operator will update `spec.configuration.secretName` with the provided secret name. -> **Note**: Use `.spec.configuration.secretName` to specify the name of the secret instead of `.spec.configSecret.name`. The field `.spec.configSecret` is deprecated and will be removed in future releases. If you still use `.spec.configSecret`, KubeDB will copy `.spec.configSecret.name` to `.spec.configuration.secretName` internally. +> **Note**: Use `.spec.configuration.secretName` to specify the name of the secret instead of `.spec.configuration.secretName`. The field `.spec.configuration` is deprecated and will be removed in future releases. If you still use `.spec.configuration`, KubeDB will copy `.spec.configuration.secretName` to `.spec.configuration.secretName` internally. ### spec.configuration `spec.configuration` is an optional field that specifies custom configuration for Kafka cluster. It has the following fields: diff --git a/docs/guides/kafka/concepts/kafka.md.bak b/docs/guides/kafka/concepts/kafka.md.bak new file mode 100644 index 0000000000..23bf5d6be7 --- /dev/null +++ b/docs/guides/kafka/concepts/kafka.md.bak @@ -0,0 +1,461 @@ +--- +title: Kafka CRD +menu: + docs_{{ .version }}: + identifier: kf-kafka-concepts + name: Kafka + parent: kf-concepts-kafka + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Kafka + +## What is Kafka + +`Kafka` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for [Kafka](https://kafka.apache.org/) in a Kubernetes native way. You only need to describe the desired database configuration in a `Kafka`object, and the KubeDB operator will create Kubernetes objects in the desired state for you. + +## Kafka Spec + +As with all other Kubernetes objects, a Kafka needs `apiVersion`, `kind`, and `metadata` fields. It also needs a `.spec` section. Below is an example Kafka object. + +```yaml +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka + namespace: demo +spec: + disableSecurity: false + authSecret: + kind: Secret + name: kafka-admin-cred + configuration: + secretName: kafka-custom-config + configuration: + secretName: kafka-custom-config + inline: + broker.properties: | + log.retention.hours=168 + log.segment.bytes=1073741824 + controller.properties: | + log.retention.hours=168 + log.segment.bytes=1073741824 + server.properties: | + log.retention.hours=168 + log.segment.bytes=1073741824 + enableSSL: true + healthChecker: + failureThreshold: 3 + periodSeconds: 20 + timeoutSeconds: 10 + keystoreCredSecret: + kind: Secret + name: kafka-keystore-cred + podTemplate: + metadata: + annotations: + passMe: ToDatabasePod + labels: + thisLabel: willGoToPod + controller: + annotations: + passMe: ToPetSet + labels: + thisLabel: willGoToSts + storageType: Durable + deletionPolicy: DoNotTerminate + tls: + certificates: + - alias: server + secretName: kafka-server-cert + - alias: client + secretName: kafka-client-cert + issuerRef: + apiGroup: cert-manager.io + kind: Issuer + name: kafka-ca-issuer + topology: + broker: + podTemplate: + spec: + containers: + - name: kafka + resources: + requests: + cpu: 500m + memory: 1024Mi + limits: + cpu: 700m + memory: 2Gi + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: standard + controller: + replicas: 1 + podTemplate: + spec: + containers: + - name: kafka + resources: + requests: + cpu: 500m + memory: 1024Mi + limits: + cpu: 700m + memory: 2Gi + monitor: + agent: prometheus.io/operator + prometheus: + exporter: + port: 56790 + serviceMonitor: + labels: + release: prometheus + interval: 10s + version: 3.9.0 +``` + +### spec.version + +`spec.version` is a required field specifying the name of the [KafkaVersion](/docs/guides/kafka/concepts/kafkaversion.md) crd where the docker images are specified. Currently, when you install KubeDB, it creates the following `Kafka` resources, + +- `3.5.2` +- `3.6.1` +- `3.7.2` +- `3.8.1` +- `3.9.0` + +### spec.replicas + +`spec.replicas` the number of members in Kafka replicaset. + +If `spec.topology` is set, then `spec.replicas` needs to be empty. Instead use `spec.topology.controller.replicas` and `spec.topology.broker.replicas`. You need to set both of them for topology clustering. + +KubeDB uses `PodDisruptionBudget` to ensure that majority of these replicas are available during [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions) so that quorum is maintained. + +### spec.disableSecurity + +`spec.disableSecurity` is an optional field that specifies whether to disable security for Kafka cluster which means no authentication and authorization will be enabled. All the Kafka Brokers and controllers will be set to spin up with `security.protocol=PLAINTEXT` configuration. The default value of this field is `false`. + +### spec.authSecret + +`spec.authSecret` is an optional field that points to a Secret used to hold credentials for `kafka` admin user. If not set, KubeDB operator creates a new Secret `{kafka-object-name}-auth` for storing the password for `admin` user for each Kafka object. + +We can use this field in 3 mode. +1. Using an external secret. In this case, You need to create an auth secret first with required fields, then specify the secret name when creating the Kafka object using `spec.authSecret.name` & set `spec.authSecret.externallyManaged` to true. +```yaml +authSecret: + name: + externallyManaged: true +``` + +2. Specifying the secret name only. In this case, You need to specify the secret name when creating the Kafka object using `spec.authSecret.name`. `externallyManaged` is by default false. +```yaml +authSecret: + name: +``` + +3. Let KubeDB do everything for you. In this case, no work for you. + +AuthSecret contains a `user` key and a `password` key which contains the `username` and `password` respectively for Kafka `admin` user. + +Example: + +```bash +$ kubectl create secret generic kf-auth -n demo \ +--from-literal=username=jhon-doe \ +--from-literal=password=6q8u_2jMOW-OOZXk +secret "kf-auth" created +``` + +```yaml +apiVersion: v1 +data: + password: NnE4dV8yak1PVy1PT1pYaw== + username: amhvbi1kb2U= +kind: Secret +metadata: + name: kf-auth + namespace: demo +type: Opaque +``` + +Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). + +### spec.configSecret + +`spec.configSecret` is an optional field that points to a Secret used to hold custom Kafka configuration. If not set, KubeDB operator will use default configuration for Kafka. This is currently not in use. Use `.spec.configuration` to provide custom configuration instead. If you still provide this field, KubeDB operator will update `spec.configuration.secretName` with the provided secret name. + +> **Note**: Use `.spec.configuration.secretName` to specify the name of the secret instead of `.spec.configuration.secretName`. The field `.spec.configSecret` is deprecated and will be removed in future releases. If you still use `.spec.configSecret`, KubeDB will copy `.spec.configuration.secretName` to `.spec.configuration.secretName` internally. + +### spec.configuration +`spec.configuration` is an optional field that specifies custom configuration for Kafka cluster. It has the following fields: +- `configuration.secretName` is an optional field that specifies the name of the secret that holds custom configuration files for Kafka cluster. +- `configuration.inline` is an optional field that allows you to provide custom configuration directly in the Kafka object. It has the following possible keys: + - `broker.properties` - is used to provide custom configuration for Kafka brokers. + - `controller.properties` - is used to provide custom configuration for Kafka controllers. + - `server.propertbies` - is used to provide custom configuration for both Kafka brokers and controllers. + +### spec.topology + +`spec.topology` represents the topology configuration for Kafka cluster in KRaft mode. + +When `spec.topology` is set, the following fields needs to be empty, otherwise validating webhook will throw error. + +- `spec.replicas` +- `spec.podTemplate` +- `spec.storage` + +#### spec.topology.broker + +`broker` represents configuration for brokers of Kafka. In KRaft Topology mode clustering each pod can act as a single dedicated Kafka broker. + +Available configurable fields: + +- `topology.broker`: + - `replicas` (`: "1"`) - is an `optional` field to specify the number of nodes (ie. pods ) that act as the dedicated Kafka `broker` pods. Defaults to `1`. + - `suffix` (`: "broker"`) - is an `optional` field that is added as the suffix of the broker PetSet name. Defaults to `broker`. + - `storage` is a `required` field that specifies how much storage to claim for each of the `broker` pods. + - `resources` (`: "cpu: 500m, memory: 1Gi" `) - is an `optional` field that specifies how much computational resources to request or to limit for each of the `broker` pods. + +#### spec.topology.controller + +`controller` represents configuration for controllers of Kafka. In KRaft Topology mode clustering each pod can act as a single dedicated Kafka controller that preserves metadata for the whole cluster and participated in leader election. + +Available configurable fields: + +- `topology.controller`: + - `replicas` (`: "1"`) - is an `optional` field to specify the number of nodes (ie. pods ) that act as the dedicated Kafka `controller` pods. Defaults to `1`. + - `suffix` (`: "controller"`) - is an `optional` field that is added as the suffix of the controller PetSet name. Defaults to `controller`. + - `storage` is a `required` field that specifies how much storage to claim for each of the `controller` pods. + - `resources` (`: "cpu: 500m, memory: 1Gi" `) - is an `optional` field that specifies how much computational resources to request or to limit for each of the `controller` pods. + +### spec.enableSSL + +`spec.enableSSL` is an `optional` field that specifies whether to enable TLS to HTTP layer. The default value of this field is `false`. + +```yaml +spec: + enableSSL: true +``` + +### spec.tls + +`spec.tls` specifies the TLS/SSL configurations. The KubeDB operator supports TLS management by using the [cert-manager](https://cert-manager.io/). Currently, the operator only supports the `PKCS#8` encoded certificates. + +```yaml +spec: + tls: + issuerRef: + apiGroup: "cert-manager.io" + kind: Issuer + name: kf-issuer + certificates: + - alias: server + privateKey: + encoding: PKCS8 + secretName: kf-client-cert + subject: + organizations: + - kubedb + - alias: http + privateKey: + encoding: PKCS8 + secretName: kf-server-cert + subject: + organizations: + - kubedb +``` + +The `spec.tls` contains the following fields: + +- `tls.issuerRef` - is an `optional` field that references to the `Issuer` or `ClusterIssuer` custom resource object of [cert-manager](https://cert-manager.io/docs/concepts/issuer/). It is used to generate the necessary certificate secrets for Kafka. If the `issuerRef` is not specified, the operator creates a self-signed CA and also creates necessary certificate (valid: 365 days) secrets using that CA. + - `apiGroup` - is the group name of the resource that is being referenced. Currently, the only supported value is `cert-manager.io`. + - `kind` - is the type of resource that is being referenced. The supported values are `Issuer` and `ClusterIssuer`. + - `name` - is the name of the resource ( `Issuer` or `ClusterIssuer` ) that is being referenced. + +- `tls.certificates` - is an `optional` field that specifies a list of certificate configurations used to configure the certificates. It has the following fields: + - `alias` - represents the identifier of the certificate. It has the following possible value: + - `server` - is used for the server certificate configuration. + - `client` - is used for the client certificate configuration. + + - `secretName` - ( `string` | `"-alias-cert"` ) - specifies the k8s secret name that holds the certificates. + + - `subject` - specifies an `X.509` distinguished name (DN). It has the following configurable fields: + - `organizations` ( `[]string` | `nil` ) - is a list of organization names. + - `organizationalUnits` ( `[]string` | `nil` ) - is a list of organization unit names. + - `countries` ( `[]string` | `nil` ) - is a list of country names (ie. Country Codes). + - `localities` ( `[]string` | `nil` ) - is a list of locality names. + - `provinces` ( `[]string` | `nil` ) - is a list of province names. + - `streetAddresses` ( `[]string` | `nil` ) - is a list of street addresses. + - `postalCodes` ( `[]string` | `nil` ) - is a list of postal codes. + - `serialNumber` ( `string` | `""` ) is a serial number. + + For more details, visit [here](https://golang.org/pkg/crypto/x509/pkix/#Name). + + - `duration` ( `string` | `""` ) - is the period during which the certificate is valid. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300m"`, `"1.5h"` or `"20h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + - `renewBefore` ( `string` | `""` ) - is a specifiable time before expiration duration. + - `dnsNames` ( `[]string` | `nil` ) - is a list of subject alt names. + - `ipAddresses` ( `[]string` | `nil` ) - is a list of IP addresses. + - `uris` ( `[]string` | `nil` ) - is a list of URI Subject Alternative Names. + - `emailAddresses` ( `[]string` | `nil` ) - is a list of email Subject Alternative Names. + + +### spec.storageType + +`spec.storageType` is an optional field that specifies the type of storage to use for database. It can be either `Durable` or `Ephemeral`. The default value of this field is `Durable`. If `Ephemeral` is used then KubeDB will create Kafka cluster using [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) volume. + +### spec.storage + +If you set `spec.storageType:` to `Durable`, then `spec.storage` is a required field that specifies the StorageClass of PVCs dynamically allocated to store data for the database. This storage spec will be passed to the PetSet created by KubeDB operator to run database pods. You can specify any StorageClass available in your cluster with appropriate resource requests. + +- `spec.storage.storageClassName` is the name of the StorageClass used to provision PVCs. PVCs don’t necessarily have to request a class. A PVC with its storageClassName set equal to "" is always interpreted to be requesting a PV with no class, so it can only be bound to PVs with no class (no annotation or one set equal to ""). A PVC with no storageClassName is not quite the same and is treated differently by the cluster depending on whether the DefaultStorageClass admission plugin is turned on. +- `spec.storage.accessModes` uses the same conventions as Kubernetes PVCs when requesting storage with specific access modes. +- `spec.storage.resources` can be used to request specific quantities of storage. This follows the same resource model used by PVCs. + +To learn how to configure `spec.storage`, please visit the links below: + +- https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + +NB. If `spec.topology` is set, then `spec.storage` needs to be empty. Instead use `spec.topology..storage` + +### spec.monitor + +Kafka managed by KubeDB can be monitored with Prometheus operator out-of-the-box. To learn more, +- [Monitor Apache Kafka with Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md) +- [Monitor Apache Kafka with Built-in Prometheus](/docs/guides/kafka/monitoring/using-builtin-prometheus.md) + +### spec.podTemplate + +KubeDB allows providing a template for database pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the PetSet created for Kafka cluster. + +KubeDB accept following fields to set in `spec.podTemplate:` + +- metadata: + - annotations (pod's annotation) + - labels (pod's labels) +- controller: + - annotations (petset's annotation) + - labels (petset's labels) +- spec: + - containers + - volumes + - podPlacementPolicy + - initContainers + - containers + - imagePullSecrets + - nodeSelector + - serviceAccountName + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + +You can check out the full list [here](https://github.com/kmodules/offshoot-api/blob/master/api/v2/types.go#L26C1-L279C1). +Uses of some field of `spec.podTemplate` is described below, + +NB. If `spec.topology` is set, then `spec.podTemplate` needs to be empty. Instead use `spec.topology..podTemplate` + +#### spec.podTemplate.spec.tolerations + +The `spec.podTemplate.spec.tolerations` is an optional field. This can be used to specify the pod's tolerations. + +#### spec.podTemplate.spec.volumes + +The `spec.podTemplate.spec.volumes` is an optional field. This can be used to provide the list of volumes that can be mounted by containers belonging to the pod. + +#### spec.podTemplate.spec.podPlacementPolicy + +`spec.podTemplate.spec.podPlacementPolicy` is an optional field. This can be used to provide the reference of the `podPlacementPolicy`. `name` of the podPlacementPolicy is referred under this attribute. This will be used by our Petset controller to place the db pods throughout the region, zone & nodes according to the policy. It utilizes kubernetes affinity & podTopologySpreadContraints feature to do so. +```yaml +spec: + podPlacementPolicy: + name: default +``` + +#### spec.podTemplate.spec.nodeSelector + +`spec.podTemplate.spec.nodeSelector` is an optional field that specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). To learn more, see [here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) . + +### spec.serviceTemplates + +You can also provide template for the services created by KubeDB operator for Kafka cluster through `spec.serviceTemplates`. This will allow you to set the type and other properties of the services. + +KubeDB allows following fields to set in `spec.serviceTemplates`: +- `alias` represents the identifier of the service. It has the following possible value: + - `stats` is used for the exporter service identification. +- metadata: + - labels + - annotations +- spec: + - type + - ports + - clusterIP + - externalIPs + - loadBalancerIP + - loadBalancerSourceRanges + - externalTrafficPolicy + - healthCheckNodePort + - sessionAffinityConfig + +See [here](https://github.com/kmodules/offshoot-api/blob/kubernetes-1.21.1/api/v1/types.go#L237) to understand these fields in detail. + + +#### spec.podTemplate.spec.containers + +The `spec.podTemplate.spec.containers` can be used to provide the list containers and their configurations for to the database pod. some of the fields are described below, + +##### spec.podTemplate.spec.containers[].name +The `spec.podTemplate.spec.containers[].name` field used to specify the name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + +##### spec.podTemplate.spec.containers[].args +`spec.podTemplate.spec.containers[].args` is an optional field. This can be used to provide additional arguments to database installation. + +##### spec.podTemplate.spec.containers[].env + +`spec.podTemplate.spec.containers[].env` is an optional field that specifies the environment variables to pass to the Redis containers. + +##### spec.podTemplate.spec.containers[].resources + +`spec.podTemplate.spec.containers[].resources` is an optional field. This can be used to request compute resources required by containers of the database pods. To learn more, visit [here](http://kubernetes.io/docs/user-guide/compute-resources/). + +### spec.deletionPolicy + +`deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `Kafka` crd or which resources KubeDB should keep or delete when you delete `Kafka` crd. KubeDB provides following four deletion policies: + +- DoNotTerminate +- WipeOut +- Halt +- Delete + +When `deletionPolicy` is `DoNotTerminate`, KubeDB takes advantage of `ValidationWebhook` feature in Kubernetes 1.9.0 or later clusters to implement `DoNotTerminate` feature. If admission webhook is enabled, `DoNotTerminate` prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. + +> For more details you can visit [here](https://appscode.com/blog/post/deletion-policy/) + +## spec.healthChecker +It defines the attributes for the health checker. +- `spec.healthChecker.periodSeconds` specifies how often to perform the health check. +- `spec.healthChecker.timeoutSeconds` specifies the number of seconds after which the probe times out. +- `spec.healthChecker.failureThreshold` specifies minimum consecutive failures for the healthChecker to be considered failed. +- `spec.healthChecker.disableWriteCheck` specifies whether to disable the writeCheck or not. + +Know details about KubeDB Health checking from this [blog post](https://appscode.com/blog/post/kubedb-health-checker/). + +## Next Steps + +- Learn how to use KubeDB to run Apache Kafka cluster [here](/docs/guides/kafka/README.md). +- Deploy [dedicated topology cluster](/docs/guides/kafka/clustering/topology-cluster/index.md) for Apache Kafka +- Deploy [combined cluster](/docs/guides/kafka/clustering/combined-cluster/index.md) for Apache Kafka +- Monitor your Kafka cluster with KubeDB using [`out-of-the-box` Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md). +- Detail concepts of [KafkaVersion object](/docs/guides/kafka/concepts/kafkaversion.md). +- Learn to use KubeDB managed Kafka objects using [CLIs](/docs/guides/kafka/cli/cli.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/kafka/concepts/kafkaopsrequest.md b/docs/guides/kafka/concepts/kafkaopsrequest.md index 21fd484c64..654007bc90 100644 --- a/docs/guides/kafka/concepts/kafkaopsrequest.md +++ b/docs/guides/kafka/concepts/kafkaopsrequest.md @@ -244,8 +244,8 @@ spec: databaseRef: name: kafka-dev configuration: - configSecret: - name: new-configsecret-combined + configuration: + secretName: new-configsecret-combined status: conditions: - lastTransitionTime: "2024-07-25T18:22:38Z" @@ -269,8 +269,8 @@ spec: databaseRef: name: kafka-prod configuration: - configSecret: - name: new-configsecret-topology + configuration: + secretName: new-configsecret-topology status: conditions: - lastTransitionTime: "2024-07-25T18:22:38Z" diff --git a/docs/guides/kafka/concepts/kafkaopsrequest.md.bak b/docs/guides/kafka/concepts/kafkaopsrequest.md.bak new file mode 100644 index 0000000000..21fd484c64 --- /dev/null +++ b/docs/guides/kafka/concepts/kafkaopsrequest.md.bak @@ -0,0 +1,622 @@ +--- +title: KafkaOpsRequests CRD +menu: + docs_{{ .version }}: + identifier: kf-opsrequest-concepts + name: KafkaOpsRequest + parent: kf-concepts-kafka + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + + +> New to KubeDB? Please start [here](/docs/README.md). + +# KafkaOpsRequest + +## What is KafkaOpsRequest + +`KafkaOpsRequest` is a Kubernetes `Custom Resource Definitions` (CRD). It provides a declarative configuration for [Kafka](https://kafka.apache.org/) administrative operations like database version updating, horizontal scaling, vertical scaling etc. in a Kubernetes native way. + +## KafkaOpsRequest CRD Specifications + +Like any official Kubernetes resource, a `KafkaOpsRequest` has `TypeMeta`, `ObjectMeta`, `Spec` and `Status` sections. + +Here, some sample `KafkaOpsRequest` CRs for different administrative operations is given below: + +**Sample `KafkaOpsRequest` for updating database:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: update-version + namespace: demo +spec: + type: UpdateVersion + databaseRef: + name: kafka-prod + updateVersion: + targetVersion: 3.9.0 +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +**Sample `KafkaOpsRequest` Objects for Horizontal Scaling of different component of the database:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-hscale-combined + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: kafka-dev + horizontalScaling: + node: 3 +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-hscale-down-topology + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: kafka-prod + horizontalScaling: + topology: + broker: 2 + controller: 2 +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +**Sample `KafkaOpsRequest` Objects for Vertical Scaling of different component of the database:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-vscale-combined + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: kafka-dev + verticalScaling: + node: + resources: + requests: + memory: "1.5Gi" + cpu: "0.7" + limits: + memory: "2Gi" + cpu: "1" +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-vscale-topology + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: kafka-prod + verticalScaling: + broker: + resources: + requests: + memory: "1.5Gi" + cpu: "0.7" + limits: + memory: "2Gi" + cpu: "1" + controller: + resources: + requests: + memory: "1.5Gi" + cpu: "0.7" + limits: + memory: "2Gi" + cpu: "1" +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +**Sample `KafkaOpsRequest` Objects for Reconfiguring different kafka mode:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfiugre-combined + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-dev + configuration: + applyConfig: + server.properties: | + log.retention.hours=100 + default.replication.factor=2 +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfiugre-topology + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-prod + configuration: + applyConfig: + broker.properties: | + log.retention.hours=100 + default.replication.factor=2 + controller.properties: | + metadata.log.dir=/var/log/kafka/metadata-custom +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfiugre-combined + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-dev + configuration: + configSecret: + name: new-configsecret-combined +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfiugre-topology + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-prod + configuration: + configSecret: + name: new-configsecret-topology +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +**Sample `KafkaOpsRequest` Objects for Volume Expansion of different database components:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-volume-exp-combined + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: kafka-dev + volumeExpansion: + mode: "Online" + node: 2Gi +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-volume-exp-topology + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: kafka-prod + volumeExpansion: + mode: "Online" + broker: 2Gi + controller: 2Gi +status: + conditions: + - lastTransitionTime: "2024-07-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +**Sample `KafkaOpsRequest` Objects for Reconfiguring TLS of the database:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-add-tls + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: kafka-prod + tls: + issuerRef: + name: kf-issuer + kind: Issuer + apiGroup: "cert-manager.io" + certificates: + - alias: client + emailAddresses: + - abc@appscode.com +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-rotate + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: kafka-dev + tls: + rotateCertificates: true +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-change-issuer + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: kafka-prod + tls: + issuerRef: + name: kf-new-issuer + kind: Issuer + apiGroup: "cert-manager.io" +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-remove + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: kafka-prod + tls: + remove: true +``` + +Here, we are going to describe the various sections of a `KafkaOpsRequest` crd. + +A `KafkaOpsRequest` object has the following fields in the `spec` section. + +### spec.databaseRef + +`spec.databaseRef` is a required field that point to the [Kafka](/docs/guides/kafka/concepts/kafka.md) object for which the administrative operations will be performed. This field consists of the following sub-field: + +- **spec.databaseRef.name :** specifies the name of the [Kafka](/docs/guides/kafka/concepts/kafka.md) object. + +### spec.type + +`spec.type` specifies the kind of operation that will be applied to the database. Currently, the following types of operations are allowed in `KafkaOpsRequest`. + +- `UpdateVersion` +- `HorizontalScaling` +- `VerticalScaling` +- `VolumeExpansion` +- `Reconfigure` +- `ReconfigureTLS` +- `Restart` + +> You can perform only one type of operation on a single `KafkaOpsRequest` CR. For example, if you want to update your database and scale up its replica then you have to create two separate `KafkaOpsRequest`. At first, you have to create a `KafkaOpsRequest` for updating. Once it is completed, then you can create another `KafkaOpsRequest` for scaling. + +### spec.updateVersion + +If you want to update you Kafka version, you have to specify the `spec.updateVersion` section that specifies the desired version information. This field consists of the following sub-field: + +- `spec.updateVersion.targetVersion` refers to a [KafkaVersion](/docs/guides/kafka/concepts/kafkaversion.md) CR that contains the Kafka version information where you want to update. + +> You can only update between Kafka versions. KubeDB does not support downgrade for Kafka. + +### spec.horizontalScaling + +If you want to scale-up or scale-down your Kafka cluster or different components of it, you have to specify `spec.horizontalScaling` section. This field consists of the following sub-field: + +- `spec.horizontalScaling.node` indicates the desired number of nodes for Kafka combined cluster after scaling. For example, if your cluster currently has 4 replica with combined node, and you want to add additional 2 nodes then you have to specify 6 in `spec.horizontalScaling.node` field. Similarly, if you want to remove one node from the cluster, you have to specify 3 in `spec.horizontalScaling.node` field. +- `spec.horizontalScaling.topology` indicates the configuration of topology nodes for Kafka topology cluster after scaling. This field consists of the following sub-field: + - `spec.horizontalScaling.topoloy.broker` indicates the desired number of broker nodes for Kafka topology cluster after scaling. + - `spec.horizontalScaling.topology.controller` indicates the desired number of controller nodes for Kafka topology cluster after scaling. + +> If the reference kafka object is combined cluster, then you can only specify `spec.horizontalScaling.node` field. If the reference kafka object is topology cluster, then you can only specify `spec.horizontalScaling.topology` field. You can not specify both fields at the same time. + +### spec.verticalScaling + +`spec.verticalScaling` is a required field specifying the information of `Kafka` resources like `cpu`, `memory` etc that will be scaled. This field consists of the following sub-fields: + +- `spec.verticalScaling.node` indicates the desired resources for combined Kafka cluster after scaling. +- `spec.verticalScaling.broker` indicates the desired resources for broker of Kafka topology cluster after scaling. +- `spec.verticalScaling.controller` indicates the desired resources for controller of Kafka topology cluster after scaling. + +> If the reference kafka object is combined cluster, then you can only specify `spec.verticalScaling.node` field. If the reference kafka object is topology cluster, then you can only specify `spec.verticalScaling.broker` or `spec.verticalScaling.controller` or both fields. You can not specify `spec.verticalScaling.node` field with any other fields at the same time, but you can specify `spec.verticalScaling.broker` and `spec.verticalScaling.controller` fields at the same time. + +All of them has the below structure: + +```yaml +requests: + memory: "200Mi" + cpu: "0.1" +limits: + memory: "300Mi" + cpu: "0.2" +``` + +Here, when you specify the resource request, the scheduler uses this information to decide which node to place the container of the Pod on and when you specify a resource limit for the container, the `kubelet` enforces those limits so that the running container is not allowed to use more of that resource than the limit you set. You can found more details from [here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +### spec.volumeExpansion + +> To use the volume expansion feature the storage class must support volume expansion + +If you want to expand the volume of your Kafka cluster or different components of it, you have to specify `spec.volumeExpansion` section. This field consists of the following sub-field: + +- `spec.mode` specifies the volume expansion mode. Supported values are `Online` & `Offline`. The default is `Online`. +- `spec.volumeExpansion.node` indicates the desired size for the persistent volume of a combined Kafka cluster. +- `spec.volumeExpansion.broker` indicates the desired size for the persistent volume for broker of a Kafka topology cluster. +- `spec.volumeExpansion.controller` indicates the desired size for the persistent volume for controller of a Kafka topology cluster. + +> If the reference kafka object is combined cluster, then you can only specify `spec.volumeExpansion.node` field. If the reference kafka object is topology cluster, then you can only specify `spec.volumeExpansion.broker` or `spec.volumeExpansion.controller` or both fields. You can not specify `spec.volumeExpansion.node` field with any other fields at the same time, but you can specify `spec.volumeExpansion.broker` and `spec.volumeExpansion.controller` fields at the same time. + +All of them refer to [Quantity](https://v1-22.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#quantity-resource-core) types of Kubernetes. + +Example usage of this field is given below: + +```yaml +spec: + volumeExpansion: + node: "2Gi" +``` + +This will expand the volume size of all the combined nodes to 2 GB. + +### spec.configuration + +If you want to reconfigure your Running Kafka cluster or different components of it with new custom configuration, you have to specify `spec.configuration` section. This field consists of the following sub-field: + +- `spec.configuration.configSecret` points to a secret in the same namespace of a Kafka resource, which contains the new custom configurations. If there are any configSecret set before in the database, this secret will replace it. The value of the field `spec.stringData` of the secret like below: +```yaml +server.properties: | + default.replication.factor=3 + offsets.topic.replication.factor=3 + log.retention.hours=100 +broker.properties: | + default.replication.factor=3 + offsets.topic.replication.factor=3 + log.retention.hours=100 +controller.properties: | + default.replication.factor=3 + offsets.topic.replication.factor=3 + log.retention.hours=100 +``` +> If you want to reconfigure a combined Kafka cluster, then you can only specify `server.properties` field. If you want to reconfigure a topology Kafka cluster, then you can specify `broker.properties` or `controller.properties` or both fields. You can not specify `server.properties` field with any other fields at the same time, but you can specify `broker.properties` and `controller.properties` fields at the same time. + +- `applyConfig` contains the new custom config as a string which will be merged with the previous configuration. + +- `applyConfig` is a map where key supports 3 values, namely `server.properties`, `broker.properties`, `controller.properties`. And value represents the corresponding configurations. + +```yaml + applyConfig: + server.properties: | + default.replication.factor=3 + offsets.topic.replication.factor=3 + log.retention.hours=100 + broker.properties: | + default.replication.factor=3 + offsets.topic.replication.factor=3 + log.retention.hours=100 + controller.properties: | + metadata.log.dir=/var/log/kafka/metadata-custom +``` + +- `removeCustomConfig` is a boolean field. Specify this field to true if you want to remove all the custom configuration from the deployed kafka cluster. + +### spec.tls + +If you want to reconfigure the TLS configuration of your Kafka i.e. add TLS, remove TLS, update issuer/cluster issuer or Certificates and rotate the certificates, you have to specify `spec.tls` section. This field consists of the following sub-field: + +- `spec.tls.issuerRef` specifies the issuer name, kind and api group. +- `spec.tls.certificates` specifies the certificates. You can learn more about this field from [here](/docs/guides/kafka/concepts/kafka.md#spectls). +- `spec.tls.rotateCertificates` specifies that we want to rotate the certificate of this kafka. +- `spec.tls.remove` specifies that we want to remove tls from this kafka. + +### spec.timeout +As we internally retry the ops request steps multiple times, This `timeout` field helps the users to specify the timeout for those steps of the ops request (in second). +If a step doesn't finish within the specified timeout, the ops request will result in failure. + +### spec.apply +This field controls the execution of obsRequest depending on the database state. It has two supported values: `Always` & `IfReady`. +Use IfReady, if you want to process the opsRequest only when the database is Ready. And use Always, if you want to process the execution of opsReq irrespective of the Database state. + +### KafkaOpsRequest `Status` + +`.status` describes the current state and progress of a `KafkaOpsRequest` operation. It has the following fields: + +### status.phase + +`status.phase` indicates the overall phase of the operation for this `KafkaOpsRequest`. It can have the following three values: + +| Phase | Meaning | +|-------------|----------------------------------------------------------------------------------| +| Successful | KubeDB has successfully performed the operation requested in the KafkaOpsRequest | +| Progressing | KubeDB has started the execution of the applied KafkaOpsRequest | +| Failed | KubeDB has failed the operation requested in the KafkaOpsRequest | +| Denied | KubeDB has denied the operation requested in the KafkaOpsRequest | +| Skipped | KubeDB has skipped the operation requested in the KafkaOpsRequest | + +Important: Ops-manager Operator can skip an opsRequest, only if its execution has not been started yet & there is a newer opsRequest applied in the cluster. `spec.type` has to be same as the skipped one, in this case. + +### status.observedGeneration + +`status.observedGeneration` shows the most recent generation observed by the `KafkaOpsRequest` controller. + +### status.conditions + +`status.conditions` is an array that specifies the conditions of different steps of `KafkaOpsRequest` processing. Each condition entry has the following fields: + +- `types` specifies the type of the condition. KafkaOpsRequest has the following types of conditions: + +| Type | Meaning | +|-------------------------------|---------------------------------------------------------------------------| +| `Progressing` | Specifies that the operation is now in the progressing state | +| `Successful` | Specifies such a state that the operation on the database was successful. | +| `HaltDatabase` | Specifies such a state that the database is halted by the operator | +| `ResumeDatabase` | Specifies such a state that the database is resumed by the operator | +| `Failed` | Specifies such a state that the operation on the database failed. | +| `StartingBalancer` | Specifies such a state that the balancer has successfully started | +| `StoppingBalancer` | Specifies such a state that the balancer has successfully stopped | +| `UpdateShardImage` | Specifies such a state that the Shard Images has been updated | +| `UpdateReplicaSetImage` | Specifies such a state that the Replicaset Image has been updated | +| `UpdateConfigServerImage` | Specifies such a state that the ConfigServer Image has been updated | +| `UpdateMongosImage` | Specifies such a state that the Mongos Image has been updated | +| `UpdatePetSetResources` | Specifies such a state that the Petset resources has been updated | +| `UpdateShardResources` | Specifies such a state that the Shard resources has been updated | +| `UpdateReplicaSetResources` | Specifies such a state that the Replicaset resources has been updated | +| `UpdateConfigServerResources` | Specifies such a state that the ConfigServer resources has been updated | +| `UpdateMongosResources` | Specifies such a state that the Mongos resources has been updated | +| `ScaleDownReplicaSet` | Specifies such a state that the scale down operation of replicaset | +| `ScaleUpReplicaSet` | Specifies such a state that the scale up operation of replicaset | +| `ScaleUpShardReplicas` | Specifies such a state that the scale up operation of shard replicas | +| `ScaleDownShardReplicas` | Specifies such a state that the scale down operation of shard replicas | +| `ScaleDownConfigServer` | Specifies such a state that the scale down operation of config server | +| `ScaleUpConfigServer` | Specifies such a state that the scale up operation of config server | +| `ScaleMongos` | Specifies such a state that the scale down operation of replicaset | +| `VolumeExpansion` | Specifies such a state that the volume expansion operaton of the database | +| `ReconfigureReplicaset` | Specifies such a state that the reconfiguration of replicaset nodes | +| `ReconfigureMongos` | Specifies such a state that the reconfiguration of mongos nodes | +| `ReconfigureShard` | Specifies such a state that the reconfiguration of shard nodes | +| `ReconfigureConfigServer` | Specifies such a state that the reconfiguration of config server nodes | + +- The `status` field is a string, with possible values `True`, `False`, and `Unknown`. + - `status` will be `True` if the current transition succeeded. + - `status` will be `False` if the current transition failed. + - `status` will be `Unknown` if the current transition was denied. +- The `message` field is a human-readable message indicating details about the condition. +- The `reason` field is a unique, one-word, CamelCase reason for the condition's last transition. +- The `lastTransitionTime` field provides a timestamp for when the operation last transitioned from one state to another. +- The `observedGeneration` shows the most recent condition transition generation observed by the controller. diff --git a/docs/guides/kafka/reconfigure/kafka-combined.md b/docs/guides/kafka/reconfigure/kafka-combined.md index a7be8deffc..007c88f4eb 100644 --- a/docs/guides/kafka/reconfigure/kafka-combined.md +++ b/docs/guides/kafka/reconfigure/kafka-combined.md @@ -171,8 +171,8 @@ spec: databaseRef: name: kafka-dev configuration: - configSecret: - name: new-kf-combined-custom-config + configuration: + secretName: new-kf-combined-custom-config timeout: 5m apply: IfReady ``` @@ -181,7 +181,7 @@ Here, - `spec.databaseRef.name` specifies that we are reconfiguring `kafka-dev` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configuration.configSecret.name` specifies the name of the new secret. +- `spec.configuration.configuration.secretName` specifies the name of the new secret. Let's create the `KafkaOpsRequest` CR we have shown above, diff --git a/docs/guides/kafka/reconfigure/kafka-combined.md.bak b/docs/guides/kafka/reconfigure/kafka-combined.md.bak new file mode 100644 index 0000000000..8f587c671c --- /dev/null +++ b/docs/guides/kafka/reconfigure/kafka-combined.md.bak @@ -0,0 +1,506 @@ +--- +title: Reconfigure Kafka Combined +menu: + docs_{{ .version }}: + identifier: kf-reconfigure-combined + name: Combined + parent: kf-reconfigure + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure Kafka Combined Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a Kafka Combined cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Kafka](/docs/guides/kafka/concepts/kafka.md) + - [Combined](/docs/guides/kafka/clustering/combined-cluster/index.md) + - [KafkaOpsRequest](/docs/guides/kafka/concepts/kafkaopsrequest.md) + - [Reconfigure Overview](/docs/guides/kafka/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/kafka](/docs/examples/kafka) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `Kafka` Combined cluster using a supported version by `KubeDB` operator. Then we are going to apply `KafkaOpsRequest` to reconfigure its configuration. + +### Prepare Kafka Combined Cluster + +Now, we are going to deploy a `Kafka` combined cluster with version `3.9.0`. + +### Deploy Kafka + +At first, we will create a secret with the `server.properties` file containing required configuration settings. + +**server.properties:** + +```properties +log.retention.hours=100 +``` +Here, `log.retention.hours` is set to `100`, whereas the default value is `168`. + +Let's create a k8s secret containing the above configuration where the file name will be the key and the file-content as the value: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: kf-combined-custom-config + namespace: demo +stringData: + server.properties: |- + log.retention.hours=100 +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-combined-custom-config.yaml +secret/kf-combined-custom-config created +``` + +In this section, we are going to create a Kafka object specifying `spec.configuration` field to apply this custom configuration. Below is the YAML of the `Kafka` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-dev + namespace: demo +spec: + replicas: 2 + version: 3.9.0 + configuration: + secretName: kf-combined-custom-config + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` + +Let's create the `Kafka` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-combined.yaml +kafka.kubedb.com/kafka-dev created +``` + +Now, wait until `kafka-dev` has status `Ready`. i.e, + +```bash +$ kubectl get kf -n demo -w +NAME TYPE VERSION STATUS AGE +kafka-dev kubedb.com/v1 3.9.0 Provisioning 0s +kafka-dev kubedb.com/v1 3.9.0 Provisioning 24s +. +. +kafka-dev kubedb.com/v1 3.9.0 Ready 92s +``` + +Now, we will check if the kafka has started with the custom configuration we have provided. + +Exec into the Kafka pod and execute the following commands to see the configurations: +```bash +$ kubectl exec -it -n demo kafka-dev-0 -- bash +kafka@kafka-dev-0:~$ kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep log.retention.hours + log.retention.hours=100 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=100, DEFAULT_CONFIG:log.retention.hours=168} + log.retention.hours=100 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=100, DEFAULT_CONFIG:log.retention.hours=168} +``` +Here, we can see that our given configuration is applied to the Kafka cluster for all brokers. `log.retention.hours` is set to `100` from the default value `168`. + +### Reconfigure using new config secret + +Now we will reconfigure this cluster to set `log.retention.hours` to `125`. + +Now, update our `server.properties` file with the new configuration. + +**server.properties:** + +```properties +log.retention.hours=125 +``` + +Then, we will create a new secret with this configuration file. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: new-kf-combined-custom-config + namespace: demo +stringData: + server.properties: |- + log.retention.hours=125 +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/new-kafka-combined-custom-config.yaml +secret/new-kf-combined-custom-config created +``` + +#### Create KafkaOpsRequest + +Now, we will use this secret to replace the previous secret using a `KafkaOpsRequest` CR. The `KafkaOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfigure-combined + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-dev + configuration: + configuration: + secretName: new-kf-combined-custom-config + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `kafka-dev` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.configSecret.name` specifies the name of the new secret. + +Let's create the `KafkaOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-reconfigure-update-combined.yaml +kafkaopsrequest.ops.kubedb.com/kfops-reconfigure-combined created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `.spec.configuration` of `Kafka` object. + +Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, + +```bash +$ kubectl get kafkaopsrequests -n demo +NAME TYPE STATUS AGE +kfops-reconfigure-combined Reconfigure Successful 4m55s +``` + +We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe kafkaopsrequest -n demo kfops-reconfigure-combined +Name: kfops-reconfigure-combined +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: KafkaOpsRequest +Metadata: + Creation Timestamp: 2024-08-01T09:14:46Z + Generation: 1 + Resource Version: 258361 + UID: ac2147ba-51cf-4ebf-8328-76253379108c +Spec: + Apply: IfReady + Configuration: + Config Secret: + Name: new-kf-combined-custom-config + Database Ref: + Name: kafka-dev + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-08-01T09:14:46Z + Message: Kafka ops-request has started to reconfigure kafka nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-08-01T09:14:55Z + Message: successfully reconciled the Kafka with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-08-01T09:15:00Z + Message: get pod; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-dev-0 + Last Transition Time: 2024-08-01T09:15:00Z + Message: evict pod; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-dev-0 + Last Transition Time: 2024-08-01T09:16:15Z + Message: check pod running; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-dev-0 + Last Transition Time: 2024-08-01T09:16:20Z + Message: get pod; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-dev-1 + Last Transition Time: 2024-08-01T09:16:20Z + Message: evict pod; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-dev-1 + Last Transition Time: 2024-08-01T09:17:20Z + Message: check pod running; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-dev-1 + Last Transition Time: 2024-08-01T09:17:25Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-08-01T09:17:25Z + Message: Successfully completed reconfigure kafka + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 5m32s KubeDB Ops-manager Operator Start processing for KafkaOpsRequest: demo/kfops-reconfigure-combined + Normal Starting 5m32s KubeDB Ops-manager Operator Pausing Kafka databse: demo/kafka-dev + Normal Successful 5m32s KubeDB Ops-manager Operator Successfully paused Kafka database: demo/kafka-dev for KafkaOpsRequest: kfops-reconfigure-combined + Normal UpdatePetSets 5m23s KubeDB Ops-manager Operator successfully reconciled the Kafka with new configure + Warning get pod; ConditionStatus:True; PodName:kafka-dev-0 5m18s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-dev-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-dev-0 5m18s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-dev-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-dev-0 5m13s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-dev-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-dev-0 4m3s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-dev-0 + Warning get pod; ConditionStatus:True; PodName:kafka-dev-1 3m58s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-dev-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-dev-1 3m58s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-dev-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-dev-1 3m53s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-dev-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-dev-1 2m58s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-dev-1 + Normal RestartNodes 2m53s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 2m53s KubeDB Ops-manager Operator Resuming Kafka database: demo/kafka-dev + Normal Successful 2m53s KubeDB Ops-manager Operator Successfully resumed Kafka database: demo/kafka-dev for KafkaOpsRequest: kfops-reconfigure-combined +``` + +Now let's exec one of the instance and run a kafka-configs.sh command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo kafka-dev-0 -- kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep 'log.retention.hours' + log.retention.hours=125 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=125, DEFAULT_CONFIG:log.retention.hours=168} + log.retention.hours=125 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=125, DEFAULT_CONFIG:log.retention.hours=168} +``` + +As we can see from the configuration of ready kafka, the value of `log.retention.hours` has been changed from `100` to `125`. So the reconfiguration of the cluster is successful. + + +### Reconfigure using apply config + +Now we will reconfigure this cluster again to set `log.retention.hours` to `150`. This time we won't use a new secret. We will use the `applyConfig` field of the `KafkaOpsRequest`. This will merge the new config in the existing secret. + +#### Create KafkaOpsRequest + +Now, we will use the new configuration in the `applyConfig` field in the `KafkaOpsRequest` CR. The `KafkaOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfigure-apply-combined + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-dev + configuration: + applyConfig: + server.properties: |- + log.retention.hours=150 + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `kafka-dev` cluster. +- `spec.type` specifies that we are performing `Reconfigure` on kafka. +- `spec.configuration.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `KafkaOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-reconfigure-apply-combined.yaml +kafkaopsrequest.ops.kubedb.com/kfops-reconfigure-apply-combined created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, + +```bash +$ kubectl get kafkaopsrequests -n demo kfops-reconfigure-apply-combined +NAME TYPE STATUS AGE +kfops-reconfigure-apply-combined Reconfigure Successful 55s +``` + +We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to reconfigure the cluster. + +```bash +$ kubectl describe kafkaopsrequest -n demo kfops-reconfigure-apply-combined +Name: kfops-reconfigure-apply-combined +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: KafkaOpsRequest +Metadata: + Creation Timestamp: 2024-08-01T09:27:03Z + Generation: 1 + Resource Version: 259123 + UID: fdc46ef0-e2ae-490a-aab8-6a3380ec09d1 +Spec: + Apply: IfReady + Configuration: + Apply Config: + server.properties: log.retention.hours=150 + Database Ref: + Name: kafka-dev + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-08-01T09:27:03Z + Message: Kafka ops-request has started to reconfigure kafka nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-08-01T09:27:06Z + Message: Successfully prepared user provided custom config secret + Observed Generation: 1 + Reason: PrepareCustomConfig + Status: True + Type: PrepareCustomConfig + Last Transition Time: 2024-08-01T09:27:12Z + Message: successfully reconciled the Kafka with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-08-01T09:27:17Z + Message: get pod; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-dev-0 + Last Transition Time: 2024-08-01T09:27:17Z + Message: evict pod; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-dev-0 + Last Transition Time: 2024-08-01T09:27:27Z + Message: check pod running; ConditionStatus:True; PodName:kafka-dev-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-dev-0 + Last Transition Time: 2024-08-01T09:27:32Z + Message: get pod; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-dev-1 + Last Transition Time: 2024-08-01T09:27:32Z + Message: evict pod; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-dev-1 + Last Transition Time: 2024-08-01T09:27:52Z + Message: check pod running; ConditionStatus:True; PodName:kafka-dev-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-dev-1 + Last Transition Time: 2024-08-01T09:27:57Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-08-01T09:27:57Z + Message: Successfully completed reconfigure kafka + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m7s KubeDB Ops-manager Operator Start processing for KafkaOpsRequest: demo/kfops-reconfigure-apply-combined + Normal Starting 2m7s KubeDB Ops-manager Operator Pausing Kafka databse: demo/kafka-dev + Normal Successful 2m7s KubeDB Ops-manager Operator Successfully paused Kafka database: demo/kafka-dev for KafkaOpsRequest: kfops-reconfigure-apply-combined + Normal UpdatePetSets 118s KubeDB Ops-manager Operator successfully reconciled the Kafka with new configure + Warning get pod; ConditionStatus:True; PodName:kafka-dev-0 113s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-dev-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-dev-0 113s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-dev-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-dev-0 108s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-dev-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-dev-0 103s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-dev-0 + Warning get pod; ConditionStatus:True; PodName:kafka-dev-1 98s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-dev-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-dev-1 98s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-dev-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-dev-1 93s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-dev-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-dev-1 78s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-dev-1 + Normal RestartNodes 73s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 73s KubeDB Ops-manager Operator Resuming Kafka database: demo/kafka-dev + Normal Successful 73s KubeDB Ops-manager Operator Successfully resumed Kafka database: demo/kafka-dev for KafkaOpsRequest: kfops-reconfigure-apply-combined +``` + +Now let's exec into one of the instance and run a `kafka-configs.sh` command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo kafka-dev-0 -- kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep 'log.retention.hours' + log.retention.hours=150 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=150, DEFAULT_CONFIG:log.retention.hours=168} + log.retention.hours=150 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=150, DEFAULT_CONFIG:log.retention.hours=168} +``` + +As we can see from the configuration of ready kafka, the value of `log.retention.hours` has been changed from `125` to `150`. So the reconfiguration of the database using the `applyConfig` field is successful. + + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete kf -n demo kafka-dev +kubectl delete kafkaopsrequest -n demo kfops-reconfigure-apply-combined kfops-reconfigure-combined +kubectl delete secret -n demo kf-combined-custom-config new-kf-combined-custom-config +kubectl delete namespace demo +``` + +## Next Steps + +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Different Kafka topology clustering modes [here](/docs/guides/kafka/clustering/_index.md). +- Monitor your Kafka database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Kafka database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/kafka/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/kafka/reconfigure/kafka-topology.md b/docs/guides/kafka/reconfigure/kafka-topology.md index 660c6d055f..8ebe426205 100644 --- a/docs/guides/kafka/reconfigure/kafka-topology.md +++ b/docs/guides/kafka/reconfigure/kafka-topology.md @@ -203,8 +203,8 @@ spec: databaseRef: name: kafka-prod configuration: - configSecret: - name: new-kf-topology-custom-config + configuration: + secretName: new-kf-topology-custom-config timeout: 5m apply: IfReady ``` @@ -213,7 +213,7 @@ Here, - `spec.databaseRef.name` specifies that we are reconfiguring `kafka-prod` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configuration.configSecret.name` specifies the name of the new secret. +- `spec.configuration.configuration.secretName` specifies the name of the new secret. Let's create the `KafkaOpsRequest` CR we have shown above, diff --git a/docs/guides/kafka/reconfigure/kafka-topology.md.bak b/docs/guides/kafka/reconfigure/kafka-topology.md.bak new file mode 100644 index 0000000000..0ce03a5210 --- /dev/null +++ b/docs/guides/kafka/reconfigure/kafka-topology.md.bak @@ -0,0 +1,625 @@ +--- +title: Reconfigure Kafka Topology +menu: + docs_{{ .version }}: + identifier: kf-reconfigure-topology + name: Topology + parent: kf-reconfigure + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure Kafka Topology Cluster + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a Kafka Topology cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Kafka](/docs/guides/kafka/concepts/kafka.md) + - [Topology](/docs/guides/kafka/clustering/topology-cluster/index.md) + - [KafkaOpsRequest](/docs/guides/kafka/concepts/kafkaopsrequest.md) + - [Reconfigure Overview](/docs/guides/kafka/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/kafka](/docs/examples/kafka) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `Kafka` Topology cluster using a supported version by `KubeDB` operator. Then we are going to apply `KafkaOpsRequest` to reconfigure its configuration. + +### Prepare Kafka Topology Cluster + +Now, we are going to deploy a `Kafka` topology cluster with version `3.9.0`. + +### Deploy Kafka + +At first, we will create a secret with the `broker.properties` and `controller.properties` file containing required configuration settings. + +**broker.properties:** + +```properties +log.retention.hours=100 +``` + +**controller.properties:** + +```properties +controller.quorum.election.timeout.ms=2000 +``` + +Here, `log.retention.hours` is set to `100`, whereas the default value is `168` for broker and `controller.quorum.election.timeout.ms` is set to `2000` for controller. + +Let's create a k8s secret containing the above configuration where the file name will be the key and the file-content as the value: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: kf-topology-custom-config + namespace: demo +stringData: + broker.properties: |- + log.retention.hours=100 + controller.properties: |- + controller.quorum.election.timeout.ms=2000 +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-topology-custom-config.yaml +secret/kf-topology-custom-config created +``` + +> **Note:** + +In this section, we are going to create a Kafka object specifying `spec.configuration.secretName` field to apply this custom configuration. Below is the YAML of the `Kafka` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Kafka +metadata: + name: kafka-prod + namespace: demo +spec: + version: 3.9.0 + configuration: + secretName: kf-topology-custom-config + topology: + broker: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + controller: + replicas: 2 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut +``` + +Let's create the `Kafka` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-topology.yaml +kafka.kubedb.com/kafka-prod created +``` + +Now, wait until `kafka-prod` has status `Ready`. i.e, + +```bash +$ kubectl get kf -n demo -w +NAME TYPE VERSION STATUS AGE +kafka-prod kubedb.com/v1 3.9.0 Provisioning 0s +kafka-prod kubedb.com/v1 3.9.0 Provisioning 24s +. +. +kafka-prod kubedb.com/v1 3.9.0 Ready 92s +``` + +Now, we will check if the kafka has started with the custom configuration we have provided. + +Exec into the Kafka pod and execute the following commands to see the configurations: +```bash +$ kubectl exec -it -n demo kafka-prod-broker-0 -- bash +kafka@kafka-prod-broker-0:~$ kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep log.retention.hours + log.retention.hours=100 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=100, DEFAULT_CONFIG:log.retention.hours=168} + log.retention.hours=100 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=100, DEFAULT_CONFIG:log.retention.hours=168} +``` +Here, we can see that our given configuration is applied to the Kafka cluster for all brokers. `log.retention.hours` is set to `100` from the default value `168`. + +### Reconfigure using new config secret + +Now we will reconfigure this cluster to set `log.retention.hours` to `125`. + +Now, update our `broker.properties` and `controller.properties` file with the new configuration. + +**broker.properties:** + +```properties +log.retention.hours=125 +``` + +**controller.properties:** + +```properties +controller.quorum.election.timeout.ms=3000 +controller.quorum.fetch.timeout.ms=4000 +``` + +Then, we will create a new secret with this configuration file. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: new-kf-topology-custom-config + namespace: demo +stringData: + broker.properties: |- + log.retention.hours=125 + controller.properties: |- + controller.quorum.election.timeout.ms=3000 + controller.quorum.fetch.timeout.ms=4000 +``` + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/new-kafka-topology-custom-config.yaml +secret/new-kf-topology-custom-config created +``` + +#### Create KafkaOpsRequest + +Now, we will use this secret to replace the previous secret using a `KafkaOpsRequest` CR. The `KafkaOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfigure-topology + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-prod + configuration: + configuration: + secretName: new-kf-topology-custom-config + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `kafka-prod` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.configSecret.name` specifies the name of the new secret. + +Let's create the `KafkaOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-reconfigure-update-topology.yaml +kafkaopsrequest.ops.kubedb.com/kfops-reconfigure-topology created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configuration` of `Kafka` object. + +Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, + +```bash +$ kubectl get kafkaopsrequests -n demo +NAME TYPE STATUS AGE +kfops-reconfigure-topology Reconfigure Successful 4m55s +``` + +We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe kafkaopsrequest -n demo kfops-reconfigure-topology +Name: kfops-reconfigure-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: KafkaOpsRequest +Metadata: + Creation Timestamp: 2024-08-02T05:08:37Z + Generation: 1 + Resource Version: 332491 + UID: b6e8cb1b-d29f-445e-bb01-60d29012c7eb +Spec: + Apply: IfReady + Configuration: + Config Secret: + Name: new-kf-topology-custom-config + Database Ref: + Name: kafka-prod + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-08-02T05:08:37Z + Message: Kafka ops-request has started to reconfigure kafka nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-08-02T05:08:45Z + Message: check reconcile; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: CheckReconcile + Last Transition Time: 2024-08-02T05:09:42Z + Message: successfully reconciled the Kafka with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-08-02T05:09:47Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T05:09:47Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T05:10:02Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T05:10:07Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T05:10:07Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T05:10:22Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T05:10:27Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T05:10:27Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T05:11:12Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T05:11:17Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T05:11:17Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T05:11:32Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T05:11:37Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-08-02T05:11:39Z + Message: Successfully completed reconfigure kafka + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m7s KubeDB Ops-manager Operator Start processing for KafkaOpsRequest: demo/kfops-reconfigure-topology + Normal Starting 3m7s KubeDB Ops-manager Operator Pausing Kafka databse: demo/kafka-prod + Normal Successful 3m7s KubeDB Ops-manager Operator Successfully paused Kafka database: demo/kafka-prod for KafkaOpsRequest: kfops-reconfigure-topology + Warning check reconcile; ConditionStatus:False 2m59s KubeDB Ops-manager Operator check reconcile; ConditionStatus:False + Normal UpdatePetSets 2m2s KubeDB Ops-manager Operator successfully reconciled the Kafka with new configure + Warning get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 117s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 117s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-controller-0 112s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-controller-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 102s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 97s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 97s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-controller-1 92s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-controller-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 82s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 77s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 77s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-broker-0 72s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-broker-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 32s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 27s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 27s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-broker-1 22s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-broker-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 12s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 + Normal RestartNodes 7s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 5s KubeDB Ops-manager Operator Resuming Kafka database: demo/kafka-prod + Normal Successful 5s KubeDB Ops-manager Operator Successfully resumed Kafka database: demo/kafka-prod for KafkaOpsRequest: kfops-reconfigure-topology +``` + +Now let's exec one of the instance and run a kafka-configs.sh command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo kafka-prod-broker-0 -- kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep 'log.retention.hours' + log.retention.hours=125 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=125, DEFAULT_CONFIG:log.retention.hours=168} + log.retention.hours=125 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=125, DEFAULT_CONFIG:log.retention.hours=168} +``` + +As we can see from the configuration of ready kafka, the value of `log.retention.hours` has been changed from `100` to `125`. So the reconfiguration of the cluster is successful. + + +### Reconfigure using apply config + +Now we will reconfigure this cluster again to set `log.retention.hours` to `150`. This time we won't use a new secret. We will use the `applyConfig` field of the `KafkaOpsRequest`. This will merge the new config in the existing secret. + +#### Create KafkaOpsRequest + +Now, we will use the new configuration in the `applyConfig` field in the `KafkaOpsRequest` CR. The `KafkaOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: KafkaOpsRequest +metadata: + name: kfops-reconfigure-apply-topology + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: kafka-prod + configuration: + applyConfig: + broker.properties: |- + log.retention.hours=150 + controller.properties: |- + controller.quorum.election.timeout.ms=4000 + controller.quorum.fetch.timeout.ms=5000 + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `kafka-prod` cluster. +- `spec.type` specifies that we are performing `Reconfigure` on kafka. +- `spec.configuration.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `KafkaOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/kafka/reconfigure/kafka-reconfigure-apply-topology.yaml +kafkaopsrequest.ops.kubedb.com/kfops-reconfigure-apply-topology created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `KafkaOpsRequest` to be `Successful`. Run the following command to watch `KafkaOpsRequest` CR, + +```bash +$ kubectl get kafkaopsrequests -n demo kfops-reconfigure-apply-topology +NAME TYPE STATUS AGE +kfops-reconfigure-apply-topology Reconfigure Successful 55s +``` + +We can see from the above output that the `KafkaOpsRequest` has succeeded. If we describe the `KafkaOpsRequest` we will get an overview of the steps that were followed to reconfigure the cluster. + +```bash +$ kubectl describe kafkaopsrequest -n demo kfops-reconfigure-apply-topology +Name: kfops-reconfigure-apply-topology +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: KafkaOpsRequest +Metadata: + Creation Timestamp: 2024-08-02T05:14:42Z + Generation: 1 + Resource Version: 332996 + UID: 551d2c92-9431-47a7-a699-8f8115131b49 +Spec: + Apply: IfReady + Configuration: + Apply Config: + broker.properties: log.retention.hours=150 + controller.properties: controller.quorum.election.timeout.ms=4000 +controller.quorum.fetch.timeout.ms=5000 + Database Ref: + Name: kafka-prod + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-08-02T05:14:42Z + Message: Kafka ops-request has started to reconfigure kafka nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-08-02T05:14:45Z + Message: Successfully prepared user provided custom config secret + Observed Generation: 1 + Reason: PrepareCustomConfig + Status: True + Type: PrepareCustomConfig + Last Transition Time: 2024-08-02T05:14:52Z + Message: successfully reconciled the Kafka with new configure + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-08-02T05:14:57Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T05:14:57Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T05:15:07Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-controller-0 + Last Transition Time: 2024-08-02T05:15:12Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T05:15:12Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T05:15:27Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-controller-1 + Last Transition Time: 2024-08-02T05:15:32Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T05:15:32Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T05:16:07Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-broker-0 + Last Transition Time: 2024-08-02T05:16:12Z + Message: get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: GetPod--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T05:16:12Z + Message: evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: EvictPod--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T05:16:27Z + Message: check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--kafka-prod-broker-1 + Last Transition Time: 2024-08-02T05:16:32Z + Message: Successfully restarted all nodes + Observed Generation: 1 + Reason: RestartNodes + Status: True + Type: RestartNodes + Last Transition Time: 2024-08-02T05:16:35Z + Message: Successfully completed reconfigure kafka + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m6s KubeDB Ops-manager Operator Start processing for KafkaOpsRequest: demo/kfops-reconfigure-apply-topology + Normal Starting 2m6s KubeDB Ops-manager Operator Pausing Kafka databse: demo/kafka-prod + Normal Successful 2m6s KubeDB Ops-manager Operator Successfully paused Kafka database: demo/kafka-prod for KafkaOpsRequest: kfops-reconfigure-apply-topology + Normal UpdatePetSets 116s KubeDB Ops-manager Operator successfully reconciled the Kafka with new configure + Warning get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 111s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 111s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-controller-0 106s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-controller-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 101s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-controller-0 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 96s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 96s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-controller-1 91s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-controller-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 81s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-controller-1 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 76s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 76s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-broker-0 71s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-broker-0 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 41s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-broker-0 + Warning get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 36s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Warning evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 36s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:kafka-prod-broker-1 + Warning check pod running; ConditionStatus:False; PodName:kafka-prod-broker-1 31s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:kafka-prod-broker-1 + Warning check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 21s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:kafka-prod-broker-1 + Normal RestartNodes 15s KubeDB Ops-manager Operator Successfully restarted all nodes + Normal Starting 14s KubeDB Ops-manager Operator Resuming Kafka database: demo/kafka-prod + Normal Successful 14s KubeDB Ops-manager Operator Successfully resumed Kafka database: demo/kafka-prod for KafkaOpsRequest: kfops-reconfigure-apply-topology +``` + +Now let's exec into one of the instance and run a `kafka-configs.sh` command to check the new configuration we have provided. + +```bash +$ $ kubectl exec -it -n demo kafka-prod-broker-0 -- kafka-configs.sh --bootstrap-server localhost:9092 --command-config /opt/kafka/config/clientauth.properties --describe --entity-type brokers --all | grep 'log.retention.hours' + log.retention.hours=150 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=150, DEFAULT_CONFIG:log.retention.hours=168} + log.retention.hours=150 sensitive=false synonyms={STATIC_BROKER_CONFIG:log.retention.hours=150, DEFAULT_CONFIG:log.retention.hours=168} +``` + +As we can see from the configuration of ready kafka, the value of `log.retention.hours` has been changed from `125` to `150`. So the reconfiguration of the database using the `applyConfig` field is successful. + + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete kf -n demo kafka-dev +kubectl delete kafkaopsrequest -n demo kfops-reconfigure-apply-topology kfops-reconfigure-topology +kubectl delete secret -n demo kf-topology-custom-config new-kf-topology-custom-config +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [Kafka object](/docs/guides/kafka/concepts/kafka.md). +- Different Kafka topology clustering modes [here](/docs/guides/kafka/clustering/_index.md). +- Monitor your Kafka database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/kafka/monitoring/using-prometheus-operator.md). + +[//]: # (- Monitor your Kafka database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/kafka/monitoring/using-builtin-prometheus.md).) +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/mariadb/concepts/mariadb/index.md b/docs/guides/mariadb/concepts/mariadb/index.md index bfb44f7e74..a095c20fdd 100644 --- a/docs/guides/mariadb/concepts/mariadb/index.md +++ b/docs/guides/mariadb/concepts/mariadb/index.md @@ -217,9 +217,9 @@ The following fields are configurable in the `spec.tls` section: - `uriSANs` (optional) is a list of URI Subject Alternative Names to be set in the Certificate. - `emailSANs` (optional) is a list of email Subject Alternative Names to be set in the Certificate. -### spec.configSecret +### spec.configuration -`spec.configSecret` is an optional field that allows users to provide custom configuration for MariaDB. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). +`spec.configuration` is an optional field that allows users to provide custom configuration for MariaDB. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). ### spec.podTemplate diff --git a/docs/guides/mariadb/concepts/mariadb/index.md.bak b/docs/guides/mariadb/concepts/mariadb/index.md.bak new file mode 100644 index 0000000000..bfb44f7e74 --- /dev/null +++ b/docs/guides/mariadb/concepts/mariadb/index.md.bak @@ -0,0 +1,396 @@ +--- +title: MariaDB CRD +menu: + docs_{{ .version }}: + identifier: guides-mariadb-concepts-mariadb + name: MariaDB + parent: guides-mariadb-concepts + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# MariaDB + +## What is MariaDB + +`MariaDB` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for [MariaDB](https://www.mariadb.com/) in a Kubernetes native way. You only need to describe the desired database configuration in a MariaDB object, and the KubeDB operator will create Kubernetes objects in the desired state for you. + +## MariaDB Spec + +As with all other Kubernetes objects, a MariaDB needs `apiVersion`, `kind`, and `metadata` fields. It also needs a `.spec` section. Below is an example MariaDB object. + +```yaml +apiVersion: kubedb.com/v1 +kind: MariaDB +metadata: + name: sample-mariadb + namespace: demo +spec: + authSecret: + kind: Secret + name: sample-mariadb-auth + monitor: + agent: prometheus.io + prometheus: + exporter: + port: 56790 + resources: {} + serviceMonitor: + interval: 10s + labels: + release: prometheus + podTemplate: + controller: {} + metadata: {} + spec: + containers: + - name: mariadb + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 500m + memory: 1Gi + serviceAccountName: sample-mariadb + replicas: 3 + requireSSL: true + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + deletionPolicy: WipeOut + tls: + certificates: + - alias: server + dnsNames: + - localhost + ipAddresses: + - 127.0.0.1 + secretName: sample-mariadb-server-cert + subject: + organizations: + - kubedb:server + - alias: archiver + secretName: sample-mariadb-archiver-cert + - alias: metrics-exporter + secretName: sample-mariadb-metrics-exporter-cert + issuerRef: + apiGroup: cert-manager.io + kind: Issuer + name: md-issuer + version: 10.5.23 +``` + +### spec.version + +`spec.version` is a required field specifying the name of the [MariaDBVersion](/docs/guides/mariadb/concepts/mariadb-version) crd where the docker images are specified. Currently, when you install KubeDB, it creates the following `MariaDBVersion` resources, + +- `10.5.23`, `10.4.32` + +### spec.authSecret + +`spec.authSecret` is an optional field that points to a Secret used to hold credentials for `mariadb` root user. If not set, the KubeDB operator creates a new Secret `{mariadb-object-name}-auth` for storing the password for `mariadb` root user for each MariaDB object. If you want to use an existing secret please specify that when creating the MariaDB object using `spec.authSecret.name`. + +This secret contains a `user` key and a `password` key which contains the `username` and `password` respectively for `mariadb` root user. Here, the value of `user` key is fixed to be `root`. + +Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). + +Example: + +```bash +kubectl create secret generic mariadb-auth -n demo \ + --from-literal=user=root \ + --from-literal=password=6q8u_2jMOW-OOZXk +secret/mariadb-auth created +``` + +```yaml +apiVersion: v1 +data: + password: NnE4dV8yak1PVy1PT1pYaw== + user: cm9vdA== +kind: Secret +metadata: + name: mariadb-auth + namespace: demo +type: Opaque +``` + +### spec.storageType + +`spec.storageType` is an optional field that specifies the type of storage to use for the database. It can be either `Durable` or `Ephemeral`. The default value of this field is `Durable`. If `Ephemeral` is used then KubeDB will create MariaDB database using [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) volume. In this case, you don't have to specify `spec.storage` field. + +### spec.storage + +If you set `spec.storageType:` to `Durable`, then `spec.storage` is a required field that specifies the StorageClass of PVCs dynamically allocated to store data for the database. This storage spec will be passed to the PetSet created by KubeDB operator to run database pods. You can specify any StorageClass available in your cluster with appropriate resource requests. + +- `spec.storage.storageClassName` is the name of the StorageClass used to provision PVCs. PVCs don’t necessarily have to request a class. A PVC with its storageClassName set equal to "" is always interpreted to be requesting a PV with no class, so it can only be bound to PVs with no class (no annotation or one set equal to ""). A PVC with no storageClassName is not quite the same and is treated differently by the cluster depending on whether the DefaultStorageClass admission plugin is turned on. +- `spec.storage.accessModes` uses the same conventions as Kubernetes PVCs when requesting storage with specific access modes. +- `spec.storage.resources` can be used to request specific quantities of storage. This follows the same resource model used by PVCs. + +To learn how to configure `spec.storage`, please visit the links below: + +- https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + +### spec.init + +`spec.init` is an optional section that can be used to initialize a newly created MariaDB database. MariaDB databases can be initialized in one of two ways: + +- Initialize from Script +- Initialize from Stash Restore + +#### Initialize via Script + +To initialize a MariaDB database using a script (shell script, sql script, etc.), set the `spec.init.script` section when creating a MariaDB object. It will execute files alphabetically with extensions `.sh` , `.sql` and `.sql.gz` that is found in the repository. The scripts inside child folders will be skipped. script must have the following information: + +- [VolumeSource](https://kubernetes.io/docs/concepts/storage/volumes/#types-of-volumes): Where your script is loaded from. + +Below is an example showing how a script from a configMap can be used to initialize a MariaDB database. + +```yaml +apiVersion: kubedb.com/v1 +kind: MariaDB +metadata: + name: sample-mariadb + namespace: demo +spec: + version: 10.5.23 + init: + script: + configMap: + name: md-init-script +``` + +In the above example, KubeDB operator will launch a Job to execute all js script of `md-init-script` in alphabetical order once PetSet pods are running. + +### spec.monitor + +MariaDB managed by KubeDB can be monitored with builtin-Prometheus and Prometheus operator out-of-the-box. + +### spec.requireSSL + +`spec.requireSSL` specifies whether the client connections require SSL. If `spec.requireSSL` is `true` then the server permits only TCP/IP connections that use SSL, or connections that use a socket file (on Unix) or shared memory (on Windows). The server rejects any non-secure connection attempt. For more details, please visit [here](https://mariadb.com/kb/en/securing-connections-for-client-and-server/#requiring-tls-for-specific-user-accounts) + +### spec.tls + +`spec.tls` specifies the TLS/SSL configurations for the MariaDB. + +The following fields are configurable in the `spec.tls` section: + +- `issuerRef` is a reference to the `Issuer` or `ClusterIssuer` CR of [cert-manager](https://cert-manager.io/docs/concepts/issuer/) that will be used by `KubeDB` to generate necessary certificates. + + - `apiGroup` is the group name of the resource being referenced. The value for `Issuer` or `ClusterIssuer` is "cert-manager.io" (cert-manager v0.12.0 and later). + - `kind` is the type of resource being referenced. KubeDB supports both `Issuer` and `ClusterIssuer` as values for this field. + - `name` is the name of the resource (`Issuer` or `ClusterIssuer`) being referenced. + +- `certificates` (optional) are a list of certificates used to configure the server and/or client certificate. It has the following fields: + + - `alias` represents the identifier of the certificate. It has the following possible value: + - `server` is used for server certificate identification. + - `client` is used for client certificate identification. + - `metrics-exporter` is used for metrics exporter certificate identification. + - `secretName` (optional) specifies the k8s secret name that holds the certificates. + This field is optional. If the user does not specify this field, the default secret name will be created in the following format: `--cert`. + - `subject` (optional) specifies an `X.509` distinguished name. It has the following possible field, + - `organizations` (optional) are the list of different organization names to be used on the Certificate. + - `organizationalUnits` (optional) are the list of different organization unit name to be used on the Certificate. + - `countries` (optional) are the list of country names to be used on the Certificate. + - `localities` (optional) are the list of locality names to be used on the Certificate. + - `provinces` (optional) are the list of province names to be used on the Certificate. + - `streetAddresses` (optional) are the list of a street address to be used on the Certificate. + - `postalCodes` (optional) are the list of postal code to be used on the Certificate. + - `serialNumber` (optional) is a serial number to be used on the Certificate. + You can found more details from [Here](https://golang.org/pkg/crypto/x509/pkix/#Name) + + - `duration` (optional) is the period during which the certificate is valid. + - `renewBefore` (optional) is a specifiable time before expiration duration. + - `dnsNames` (optional) is a list of subject alt names to be used in the Certificate. + - `ipAddresses` (optional) is a list of IP addresses to be used in the Certificate. + - `uriSANs` (optional) is a list of URI Subject Alternative Names to be set in the Certificate. + - `emailSANs` (optional) is a list of email Subject Alternative Names to be set in the Certificate. + +### spec.configSecret + +`spec.configSecret` is an optional field that allows users to provide custom configuration for MariaDB. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). + +### spec.podTemplate + +KubeDB allows providing a template for database pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the PetSet created for the MariaDB database. + +KubeDB accepts the following fields to set in `spec.podTemplate:` + +- metadata: + - annotations (pod's annotation) +- controller: + - annotations (petset's annotation) +- spec: + - containers + - volumes + - podPlacementPolicy + - initContainers + - imagePullSecrets + - nodeSelector + - serviceAccountName + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + +Uses of some field of `spec.podTemplate` is described below, + +You can check out the full list [here](https://github.com/kmodules/offshoot-api/blob/master/api/v2/types.go#L26C1-L279C1). +Uses of some field of `spec.podTemplate` is described below, + +#### spec.podTemplate.spec.tolerations + +The `spec.podTemplate.spec.tolerations` is an optional field. This can be used to specify the pod's tolerations. + +#### spec.podTemplate.spec.volumes + +The `spec.podTemplate.spec.volumes` is an optional field. This can be used to provide the list of volumes that can be mounted by containers belonging to the pod. + +#### spec.podTemplate.spec.podPlacementPolicy + +`spec.podTemplate.spec.podPlacementPolicy` is an optional field. This can be used to provide the reference of the `podPlacementPolicy`. `name` of the podPlacementPolicy is referred under this attribute. This will be used by our Petset controller to place the db pods throughout the region, zone & nodes according to the policy. It utilizes kubernetes affinity & podTopologySpreadContraints feature to do so. +```yaml +spec: + podPlacementPolicy: + name: default +``` + +#### spec.podTemplate.spec.containers + +The `spec.podTemplate.spec.containers` can be used to provide the list containers and their configurations for to the database pod. some of the fields are described below, + +##### spec.podTemplate.spec.containers[].name +The `spec.podTemplate.spec.containers[].name` field used to specify the name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + +##### spec.podTemplate.spec.containers[].args +`spec.podTemplate.spec.containers[].args` is an optional field. This can be used to provide additional arguments to database installation. + +##### spec.podTemplate.spec.containers[].resources + +`spec.podTemplate.spec.containers[].resources` is an optional field. This can be used to request compute resources required by containers of the database pods. To learn more, visit [here](http://kubernetes.io/docs/user-guide/compute-resources/). + +##### spec.podTemplate.spec.containers[].env + +`spec.podTemplate.spec.env` is an optional field that specifies the environment variables to pass to the MariaDB docker image. To know about supported environment variables, please visit [here](https://hub.docker.com/_/mariadb/). + +Note that, KubeDB does not allow `MYSQL_ROOT_PASSWORD`, `MYSQL_ALLOW_EMPTY_PASSWORD`, `MYSQL_RANDOM_ROOT_PASSWORD`, and `MYSQL_ONETIME_PASSWORD` environment variables to set in `spec.env`. If you want to set the root password, please use `spec.authSecret` instead described earlier. + +If you try to set any of the forbidden environment variables i.e. `MYSQL_ROOT_PASSWORD` in MariaDB crd, Kubed operator will reject the request with the following error, + +```bash +Error from server (Forbidden): error when creating "./mariadb.yaml": admission webhook "mariadb.validators.kubedb.com" denied the request: environment variable MYSQL_ROOT_PASSWORD is forbidden to use in MariaDB spec +``` + +Also, note that KubeDB does not allow to update the environment variables as updating them does not have any effect once the database is created. If you try to update environment variables, KubeDB operator will reject the request with the following error, + +```bash +Error from server (BadRequest): error when applying patch: +... +for: "./mariadb.yaml": admission webhook "mariadb.validators.kubedb.com" denied the request: precondition failed for: +...At least one of the following was changed: + apiVersion + kind + name + namespace + spec.authSecret + spec.init + spec.storageType + spec.storage + spec.podTemplate.spec.nodeSelector + spec.podTemplate.spec.env +``` + +#### spec.podTemplate.spec.imagePullSecrets + +`KubeDB` provides the flexibility of deploying MariaDB database from a private Docker registry. `spec.podTemplate.spec.imagePullSecrets` is an optional field that points to secrets to be used for pulling docker image if you are using a private docker registry. + +#### spec.podTemplate.spec.nodeSelector + +`spec.podTemplate.spec.nodeSelector` is an optional field that specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). To learn more, see [here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) . + +#### spec.podTemplate.spec.serviceAccountName + + `serviceAccountName` is an optional field supported by KubeDB Operator (version 0.13.0 and higher) that can be used to specify a custom service account to fine-tune role-based access control. + + If this field is left empty, the KubeDB operator will create a service account name matching MariaDB crd name. Role and RoleBinding that provide necessary access permissions will also be generated automatically for this service account. + + If a service account name is given, but there's no existing service account by that name, the KubeDB operator will create one, and Role and RoleBinding that provide necessary access permissions will also be generated for this service account. + + If a service account name is given, and there's an existing service account by that name, the KubeDB operator will use that existing service account. Since this service account is not managed by KubeDB, users are responsible for providing necessary access permissions manually. + +### spec.serviceTemplates + +You can also provide a template for the services created by KubeDB operator for MariaDB database through `spec.serviceTemplates`. This will allow you to set the type and other properties of the services. + +KubeDB allows following fields to set in `spec.serviceTemplates`: + +- metadata: + - annotations +- spec: + - type + - ports + - clusterIP + - externalIPs + - loadBalancerIP + - loadBalancerSourceRanges + - externalTrafficPolicy + - healthCheckNodePort + - sessionAffinityConfig + +```bash +spec: + version: "10.6.16" + serviceTemplates: + - alias: primary + metadata: + annotations: + kubedb.com/test-annotation: "test" + spec: + type: LoadBalancer + ports: + - port: 3306 +``` + +See [here](https://github.com/kmodules/offshoot-api/blob/kubernetes-1.16.3/api/v1/types.go#L163) to understand these fields in detail. + +### spec.halted + +`spec.halted` is an optional field. Suppose you want to delete the `MariaDB` resources(`PetSet`, `Service` etc.) except `MariaDB` object, `PVCs` and `Secret` then you need to set `spec.halted` to `true`. If you set `spec.halted` to `true` then the `deletionPolicy` in `MariaDB` object will be set `Halt` by-default. + +### spec.deletionPolicy + +`deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `MariaDB` crd or which resources KubeDB should keep or delete when you delete `MariaDB` crd. KubeDB provides the following four termination policies: + +- DoNotTerminate +- Halt +- Delete (`Default`) +- WipeOut + +When `deletionPolicy` is `DoNotTerminate`, KubeDB takes advantage of `ValidationWebhook` feature in Kubernetes 1.9.0 or later clusters to implement `DoNotTerminate` feature. If admission webhook is enabled, `DoNotTerminate` prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. + +Following table show what KubeDB does when you delete MariaDB crd for different termination policies, + +| Behavior | DoNotTerminate | Halt | Delete | WipeOut | +| ----------------------------------- | :------------: | :------: | :------: | :------: | +| 1. Block Delete operation | ✓ | ✗ | ✗ | ✗ | +| 2. Delete PetSet | ✗ | ✓ | ✓ | ✓ | +| 3. Delete Services | ✗ | ✓ | ✓ | ✓ | +| 4. Delete PVCs | ✗ | ✗ | ✓ | ✓ | +| 5. Delete Secrets | ✗ | ✗ | ✗ | ✓ | +| 6. Delete Snapshots | ✗ | ✗ | ✗ | ✓ | + +If you don't specify `spec.deletionPolicy` KubeDB uses `Delete` termination policy by default. + +> For more details you can visit [here](https://appscode.com/blog/post/deletion-policy/) diff --git a/docs/guides/mariadb/configuration/using-config-file/examples/md-custom.yaml b/docs/guides/mariadb/configuration/using-config-file/examples/md-custom.yaml index fa1809ad46..25afd7e47a 100644 --- a/docs/guides/mariadb/configuration/using-config-file/examples/md-custom.yaml +++ b/docs/guides/mariadb/configuration/using-config-file/examples/md-custom.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: "10.5.23" - configSecret: - name: md-configuration + configuration: + secretName: md-configuration storageType: Durable storage: storageClassName: "standard" diff --git a/docs/guides/mariadb/configuration/using-config-file/examples/md-custom.yaml.bak b/docs/guides/mariadb/configuration/using-config-file/examples/md-custom.yaml.bak new file mode 100644 index 0000000000..fa1809ad46 --- /dev/null +++ b/docs/guides/mariadb/configuration/using-config-file/examples/md-custom.yaml.bak @@ -0,0 +1,18 @@ +apiVersion: kubedb.com/v1 +kind: MariaDB +metadata: + name: sample-mariadb + namespace: demo +spec: + version: "10.5.23" + configSecret: + name: md-configuration + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut diff --git a/docs/guides/mariadb/configuration/using-config-file/index.md b/docs/guides/mariadb/configuration/using-config-file/index.md index c808f04266..c9ed48f4f4 100644 --- a/docs/guides/mariadb/configuration/using-config-file/index.md +++ b/docs/guides/mariadb/configuration/using-config-file/index.md @@ -39,7 +39,7 @@ KubeDB supports providing custom configuration for MariaDB. This tutorial will s MariaDB allows to configure database via configuration file. The default configuration for MariaDB can be found in `/etc/mysql/my.cnf` file. When MariaDB starts, it will look for custom configuration file in `/etc/mysql/conf.d` directory. If configuration file exist, MariaDB instance will use combined startup setting from both `/etc/mysql/my.cnf` and `*.cnf` files in `/etc/mysql/conf.d` directory. This custom configuration will overwrite the existing default one. To know more about configuring MariaDB see [here](https://mariadb.com/kb/en/configuring-mariadb-with-option-files/). -At first, you have to create a config file with `.cnf` extension with your desired configuration. Then you have to put this file into a [volume](https://kubernetes.io/docs/concepts/storage/volumes/). You have to specify this volume in `spec.configSecret` section while creating MariaDB crd. KubeDB will mount this volume into `/etc/mysql/conf.d` directory of the database pod. +At first, you have to create a config file with `.cnf` extension with your desired configuration. Then you have to put this file into a [volume](https://kubernetes.io/docs/concepts/storage/volumes/). You have to specify this volume in `spec.configuration` section while creating MariaDB crd. KubeDB will mount this volume into `/etc/mysql/conf.d` directory of the database pod. In this tutorial, we will configure [max_connections](https://mariadb.com/docs/server/ha-and-performance/optimization-and-tuning/system-variables/server-system-variables#max_connections) and [read_buffer_size](https://mariadb.com/docs/server/ha-and-performance/optimization-and-tuning/system-variables/server-system-variables#read_buffer_size/) via a custom config file. We will use Secret as volume source. @@ -86,7 +86,7 @@ metadata: ... ``` -Now, create MariaDB crd specifying `spec.configSecret` field. +Now, create MariaDB crd specifying `spec.configuration` field. ```bash $ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/mariadb/configuration/using-config-file/examples/md-custom.yaml @@ -103,8 +103,8 @@ metadata: namespace: demo spec: version: "10.5.23" - configSecret: - name: md-configuration + configuration: + secretName: md-configuration storageType: Durable storage: storageClassName: "standard" diff --git a/docs/guides/mariadb/configuration/using-config-file/index.md.bak b/docs/guides/mariadb/configuration/using-config-file/index.md.bak new file mode 100644 index 0000000000..a8664dcd66 --- /dev/null +++ b/docs/guides/mariadb/configuration/using-config-file/index.md.bak @@ -0,0 +1,183 @@ +--- +title: Run MariaDB with Custom Configuration +menu: + docs_{{ .version }}: + identifier: guides-mariadb-configuration-usingconfigfile + name: Config File + parent: guides-mariadb-configuration + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Using Custom Configuration File + +KubeDB supports providing custom configuration for MariaDB. This tutorial will show you how to use KubeDB to run a MariaDB database with custom configuration. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + + $ kubectl get ns demo + NAME STATUS AGE + demo Active 5s + ``` + +> Note: YAML files used in this tutorial are stored in [here](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/guides/mariadb/configuration/using-config-file/examples) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +MariaDB allows to configure database via configuration file. The default configuration for MariaDB can be found in `/etc/mysql/my.cnf` file. When MariaDB starts, it will look for custom configuration file in `/etc/mysql/conf.d` directory. If configuration file exist, MariaDB instance will use combined startup setting from both `/etc/mysql/my.cnf` and `*.cnf` files in `/etc/mysql/conf.d` directory. This custom configuration will overwrite the existing default one. To know more about configuring MariaDB see [here](https://mariadb.com/kb/en/configuring-mariadb-with-option-files/). + +At first, you have to create a config file with `.cnf` extension with your desired configuration. Then you have to put this file into a [volume](https://kubernetes.io/docs/concepts/storage/volumes/). You have to specify this volume in `spec.configSecret` section while creating MariaDB crd. KubeDB will mount this volume into `/etc/mysql/conf.d` directory of the database pod. + +In this tutorial, we will configure [max_connections](https://mariadb.com/docs/server/ha-and-performance/optimization-and-tuning/system-variables/server-system-variables#max_connections) and [read_buffer_size](https://mariadb.com/docs/server/ha-and-performance/optimization-and-tuning/system-variables/server-system-variables#read_buffer_size/) via a custom config file. We will use Secret as volume source. + +## Custom Configuration + +At first, let's create `md-config.cnf` file setting `max_connections` and `read_buffer_size` parameters. + +```bash +cat < md-config.cnf +[mysqld] +max_connections = 200 +read_buffer_size = 1048576 +EOF + +$ cat md-config.cnf +[mysqld] +max_connections = 200 +read_buffer_size = 1048576 +``` + +Here, `read_buffer_size` is set to 1MB in bytes. + +Now, create a Secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo md-configuration --from-file=./md-config.cnf +secret/md-configuration created +``` + +Verify the Secret has the configuration file. + +```yaml +$ kubectl get secret -n demo md-configuration -o yaml +apiVersion: v1 +stringData: + md-config.cnf: | + [mysqld] + max_connections = 200 + read_buffer_size = 1048576 +kind: Secret +metadata: + name: md-configuration + namespace: demo + ... +``` + +Now, create MariaDB crd specifying `spec.configSecret` field. + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/mariadb/configuration/using-config-file/examples/md-custom.yaml +mysql.kubedb.com/custom-mysql created +``` + +Below is the YAML for the MariaDB crd we just created. + +```yaml +apiVersion: kubedb.com/v1 +kind: MariaDB +metadata: + name: sample-mariadb + namespace: demo +spec: + version: "10.5.23" + configuration: + secretName: md-configuration + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut + +``` + +Now, wait a few minutes. KubeDB operator will create necessary PVC, petset, services, secret etc. If everything goes well, we will see that a pod with the name `sample-mariadb-0` has been created. + +Check that the petset's pod is running + +```bash + $ kubectl get pod -n demo +NAME READY STATUS RESTARTS AGE +sample-mariadb-0 1/1 Running 0 21s + +$ kubectl get mariadb -n demo +NAME VERSION STATUS AGE +sample-mariadb 10.5.23 Ready 71s +``` + +We can see the database is in ready phase so it can accept conncetion. + +Now, we will check if the database has started with the custom configuration we have provided. + +> Read the comment written for the following commands. They contain the instructions and explanations of the commands. + +```bash +# Connceting to the database + $ kubectl exec -it -n demo sample-mariadb-0 -- bash +root@sample-mariadb-0:/ mysql -u${MYSQL_ROOT_USERNAME} -p${MYSQL_ROOT_PASSWORD} +Welcome to the MariaDB monitor. Commands end with ; or \g. +Your MariaDB connection id is 23 +Server version: 10.5.23-MariaDB-1:10.5.23+maria~focal mariadb.org binary distribution + +Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +# value of `max_conncetions` is same as provided +MariaDB [(none)]> show variables like 'max_connections'; ++-----------------+-------+ +| Variable_name | Value | ++-----------------+-------+ +| max_connections | 200 | ++-----------------+-------+ +1 row in set (0.001 sec) + +# value of `read_buffer_size` is same as provided +MariaDB [(none)]> show variables like 'read_buffer_size'; ++------------------+---------+ +| Variable_name | Value | ++------------------+---------+ +| read_buffer_size | 1048576 | ++------------------+---------+ +1 row in set (0.001 sec) + +MariaDB [(none)]> exit +Bye +``` + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +$ kubectl delete mariadb -n demo sample-mariadb +mariadb.kubedb.com "sample-mariadb" deleted +$ kubectl delete ns demo +namespace "demo" deleted +``` diff --git a/docs/guides/mariadb/reconfigure/cluster/examples/reconfigure-using-secret.yaml b/docs/guides/mariadb/reconfigure/cluster/examples/reconfigure-using-secret.yaml index 64d28c813e..479f4cac49 100644 --- a/docs/guides/mariadb/reconfigure/cluster/examples/reconfigure-using-secret.yaml +++ b/docs/guides/mariadb/reconfigure/cluster/examples/reconfigure-using-secret.yaml @@ -8,5 +8,5 @@ spec: databaseRef: name: sample-mariadb configuration: - configSecret: - name: new-md-configuration + configuration: + secretName: new-md-configuration diff --git a/docs/guides/mariadb/reconfigure/cluster/examples/reconfigure-using-secret.yaml.bak b/docs/guides/mariadb/reconfigure/cluster/examples/reconfigure-using-secret.yaml.bak new file mode 100644 index 0000000000..64d28c813e --- /dev/null +++ b/docs/guides/mariadb/reconfigure/cluster/examples/reconfigure-using-secret.yaml.bak @@ -0,0 +1,12 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: MariaDBOpsRequest +metadata: + name: mdops-reconfigure-config + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: sample-mariadb + configuration: + configSecret: + name: new-md-configuration diff --git a/docs/guides/mariadb/reconfigure/cluster/examples/sample-mariadb-config.yaml b/docs/guides/mariadb/reconfigure/cluster/examples/sample-mariadb-config.yaml index 882f722362..8ee616b3dd 100644 --- a/docs/guides/mariadb/reconfigure/cluster/examples/sample-mariadb-config.yaml +++ b/docs/guides/mariadb/reconfigure/cluster/examples/sample-mariadb-config.yaml @@ -6,8 +6,8 @@ metadata: spec: version: "10.6.16" replicas: 3 - configSecret: - name: md-configuration + configuration: + secretName: md-configuration storageType: Durable storage: storageClassName: "standard" diff --git a/docs/guides/mariadb/reconfigure/cluster/examples/sample-mariadb-config.yaml.bak b/docs/guides/mariadb/reconfigure/cluster/examples/sample-mariadb-config.yaml.bak new file mode 100644 index 0000000000..882f722362 --- /dev/null +++ b/docs/guides/mariadb/reconfigure/cluster/examples/sample-mariadb-config.yaml.bak @@ -0,0 +1,19 @@ +apiVersion: kubedb.com/v1 +kind: MariaDB +metadata: + name: sample-mariadb + namespace: demo +spec: + version: "10.6.16" + replicas: 3 + configSecret: + name: md-configuration + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut diff --git a/docs/guides/mariadb/reconfigure/cluster/index.md b/docs/guides/mariadb/reconfigure/cluster/index.md index 09c7339120..7b95fa4044 100644 --- a/docs/guides/mariadb/reconfigure/cluster/index.md +++ b/docs/guides/mariadb/reconfigure/cluster/index.md @@ -61,7 +61,7 @@ $ kubectl create secret generic -n demo md-configuration --from-file=./md-config secret/md-configuration created ``` -In this section, we are going to create a MariaDB object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `MariaDB` CR that we are going to create, +In this section, we are going to create a MariaDB object specifying `spec.configuration` field to apply this custom configuration. Below is the YAML of the `MariaDB` CR that we are going to create, ```yaml apiVersion: kubedb.com/v1 @@ -72,8 +72,8 @@ metadata: spec: version: "10.6.16" replicas: 3 - configSecret: - name: md-configuration + configuration: + secretName: md-configuration storageType: Durable storage: storageClassName: "standard" @@ -184,15 +184,15 @@ spec: databaseRef: name: sample-mariadb configuration: - configSecret: - name: new-md-configuration + configuration: + secretName: new-md-configuration ``` Here, - `spec.databaseRef.name` specifies that we are reconfiguring `sample-mariadb` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configuration.configSecret.name` specifies the name of the new secret. +- `spec.configuration.configuration.secretName` specifies the name of the new secret. Let's create the `MariaDBOpsRequest` CR we have shown above, diff --git a/docs/guides/mariadb/reconfigure/cluster/index.md.bak b/docs/guides/mariadb/reconfigure/cluster/index.md.bak new file mode 100644 index 0000000000..0383cfba59 --- /dev/null +++ b/docs/guides/mariadb/reconfigure/cluster/index.md.bak @@ -0,0 +1,596 @@ +--- +title: Reconfigure MariaDB Cluster +menu: + docs_{{ .version }}: + identifier: guides-mariadb-reconfigure-cluster + name: Cluster + parent: guides-mariadb-reconfigure + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure MariaDB Cluster Database + +This guide will show you how to use `KubeDB` Enterprise operator to reconfigure a MariaDB Cluster. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Community and Enterprise operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [MariaDB](/docs/guides/mariadb/concepts/mariadb) + - [MariaDB Cluster](/docs/guides/mariadb/clustering/galera-cluster) + - [MariaDBOpsRequest](/docs/guides/mariadb/concepts/opsrequest) + - [Reconfigure Overview](/docs/guides/mariadb/reconfigure/overview) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +Now, we are going to deploy a `MariaDB` Cluster using a supported version by `KubeDB` operator. Then we are going to apply `MariaDBOpsRequest` to reconfigure its configuration. + +### Prepare MariaDB Cluster + +Now, we are going to deploy a `MariaDB` Cluster database with version `10.6.16`. + +### Deploy MariaDB + +At first, we will create `md-config.cnf` file containing required configuration settings. + +```ini +$ cat md-config.cnf +[mysqld] +max_connections = 200 +read_buffer_size = 1048576 +``` + +Here, `max_connections` is set to `200`, whereas the default value is `151`. Likewise, `read_buffer_size` has the deafult value `131072`. + +Now, we will create a secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo md-configuration --from-file=./md-config.cnf +secret/md-configuration created +``` + +In this section, we are going to create a MariaDB object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `MariaDB` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: MariaDB +metadata: + name: sample-mariadb + namespace: demo +spec: + version: "10.6.16" + replicas: 3 + configuration: + secretName: md-configuration + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut +``` + +Let's create the `MariaDB` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/mariadb/reconfigure/cluster/examples/sample-mariadb-config.yaml +mariadb.kubedb.com/sample-mariadb created +``` + +Now, wait until `sample-mariadb` has status `Ready`. i.e, + +```bash +$ kubectl get mariadb -n demo +NAME VERSION STATUS AGE +sample-mariadb 10.6.16 Ready 71s +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +First we need to get the username and password to connect to a mariadb instance, + +```bash +$ kubectl get secrets -n demo sample-mariadb-auth -o jsonpath='{.data.\username}' | base64 -d +root + +$ kubectl get secrets -n demo sample-mariadb-auth -o jsonpath='{.data.\password}' | base64 -d +nrKuxni0wDSMrgwy +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +```bash +$ kubectl exec -it -n demo sample-mariadb-0 -- bash +root@sample-mariadb-0:/ mysql -u${MYSQL_ROOT_USERNAME} -p${MYSQL_ROOT_PASSWORD} +Welcome to the MariaDB monitor. Commands end with ; or \g. +Your MariaDB connection id is 23 +Server version: 10.6.16-MariaDB-1:10.6.16+maria~focal mariadb.org binary distribution + +Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +# value of `max_conncetions` is same as provided +MariaDB [(none)]> show variables like 'max_connections'; ++-----------------+-------+ +| Variable_name | Value | ++-----------------+-------+ +| max_connections | 200 | ++-----------------+-------+ +1 row in set (0.001 sec) + +# value of `read_buffer_size` is same as provided +MariaDB [(none)]> show variables like 'read_buffer_size'; ++------------------+---------+ +| Variable_name | Value | ++------------------+---------+ +| read_buffer_size | 1048576 | ++------------------+---------+ +1 row in set (0.001 sec) + +MariaDB [(none)]> exit +Bye +``` + +As we can see from the configuration of ready mariadb, the value of `max_connections` has been set to `200` and `read_buffer_size` has been set to `1048576`. + +### Reconfigure using new config secret + +Now we will reconfigure this database to set `max_connections` to `250` and `read_buffer_size` to `122880`. + +Now, we will create new file `new-md-config.cnf` containing required configuration settings. + +```ini +$ cat new-md-config.cnf +[mysqld] +max_connections = 250 +read_buffer_size = 122880 +``` + +Then, we will create a new secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo new-md-configuration --from-file=./new-md-config.cnf +secret/new-md-configuration created +``` + +#### Create MariaDBOpsRequest + +Now, we will use this secret to replace the previous secret using a `MariaDBOpsRequest` CR. The `MariaDBOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MariaDBOpsRequest +metadata: + name: mdops-reconfigure-config + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: sample-mariadb + configuration: + configuration: + secretName: new-md-configuration +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `sample-mariadb` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.configuration.secretName` specifies the name of the new secret. + +Let's create the `MariaDBOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/mariadb/reconfigure/cluster/examples/reconfigure-using-secret.yaml +mariadbopsrequest.ops.kubedb.com/mdops-reconfigure-config created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Enterprise operator will update the `configSecret` of `MariaDB` object. + +Let's wait for `MariaDBOpsRequest` to be `Successful`. Run the following command to watch `MariaDBOpsRequest` CR, + +```bash +$ kubectl get mariadbopsrequest --all-namespaces +NAMESPACE NAME TYPE STATUS AGE +demo mdops-reconfigure-config Reconfigure Successful 3m8s +``` + +We can see from the above output that the `MariaDBOpsRequest` has succeeded. If we describe the `MariaDBOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe mariadbopsrequest -n demo mdops-reconfigure-config +Name: mdops-reconfigure-config +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: MariaDBOpsRequest +Metadata: + Creation Timestamp: 2022-06-10T04:43:50Z + Generation: 1 + Resource Version: 1123451 + UID: 27a73fc6-1d25-4019-8975-f7d4daf782b7 +Spec: + Configuration: + Config Secret: + Name: new-md-configuration + Database Ref: + Name: sample-mariadb + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2022-06-10T04:43:50Z + Message: Controller has started to Progress the MariaDBOpsRequest: demo/mdops-reconfigure-config + Observed Generation: 1 + Reason: OpsRequestProgressingStarted + Status: True + Type: Progressing + Last Transition Time: 2022-06-10T04:47:25Z + Message: Successfully restarted MariaDB pods for MariaDBOpsRequest: demo/mdops-reconfigure-config + Observed Generation: 1 + Reason: SuccessfullyRestatedPetSet + Status: True + Type: RestartPetSetPods + Last Transition Time: 2022-06-10T04:47:30Z + Message: Successfully reconfigured MariaDB for MariaDBOpsRequest: demo/mdops-reconfigure-config + Observed Generation: 1 + Reason: SuccessfullyDBReconfigured + Status: True + Type: DBReady + Last Transition Time: 2022-06-10T04:47:30Z + Message: Controller has successfully reconfigure the MariaDB demo/mdops-reconfigure-config + Observed Generation: 1 + Reason: OpsRequestProcessedSuccessfully + Status: True + Type: Successful + Observed Generation: 3 + Phase: Successful + +``` + +Now let's connect to a mariadb instance and run a mariadb internal command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo sample-mariadb-0 -- bash +root@sample-mariadb-0:/ mysql -u${MYSQL_ROOT_USERNAME} -p${MYSQL_ROOT_PASSWORD} +Welcome to the MariaDB monitor. Commands end with ; or \g. +Your MariaDB connection id is 23 +Server version: 10.6.16-MariaDB-1:10.6.16+maria~focal mariadb.org binary distribution + +Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +# value of `max_conncetions` is same as provided +MariaDB [(none)]> show variables like 'max_connections'; ++-----------------+-------+ +| Variable_name | Value | ++-----------------+-------+ +| max_connections | 250 | ++-----------------+-------+ +1 row in set (0.001 sec) + +# value of `read_buffer_size` is same as provided +MariaDB [(none)]> show variables like 'read_buffer_size'; ++------------------+---------+ +| Variable_name | Value | ++------------------+---------+ +| read_buffer_size | 122880 | ++------------------+---------+ +1 row in set (0.001 sec) + +MariaDB [(none)]> exit +Bye +``` + +As we can see from the configuration has changed, the value of `max_connections` has been changed from `200` to `250` and and the `read_buffer_size` has been changed `1048576` to `122880`. So the reconfiguration of the database is successful. + + +### Reconfigure Existing Config Secret + +Now, we will create a new `MariaDBOpsRequest` to reconfigure our existing secret `new-md-configuration` by modifying our `new-md-config.cnf` file using `applyConfig`. The `MariaDBOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MariaDBOpsRequest +metadata: + name: mdops-reconfigure-apply-config + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: sample-mariadb + configuration: + applyConfig: + new-md-config.cnf: | + [mysqld] + max_connections = 230 + read_buffer_size = 1064960 + innodb-config.cnf: | + [mysqld] + innodb_log_buffer_size = 17408000 +``` +> Note: You can modify multiple fields of your current configuration using `applyConfig`. If you don't have any secrets then `applyConfig` will create a secret for you. Here, we modified value of our two existing fields which are `max_connections` and `read_buffer_size` also, we modified a new field `innodb_log_buffer_size` of our configuration. + +Here, +- `spec.databaseRef.name` specifies that we are reconfiguring `sample-mariadb` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.applyConfig` contains the configuration of existing or newly created secret. + +Before applying this yaml we are going to check the existing value of our new field, + +```bash +$ kubectl exec -it sample-mariadb-0 -n demo -c mariadb -- bash +root@sample-mariadb-0:/# mysql -u${MYSQL_ROOT_USERNAME} -p${MYSQL_ROOT_PASSWORD} +Welcome to the MariaDB monitor. Commands end with ; or \g. +Your MariaDB connection id is 23 +Server version: 10.6.16-MariaDB-1:10.6.16+maria~focal mariadb.org binary distribution + +Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +MariaDB [(none)]> show variables like 'innodb_log_buffer_size'; ++------------------------+----------+ +| Variable_name | Value | ++------------------------+----------+ +| innodb_log_buffer_size | 16777216 | ++------------------------+----------+ +1 row in set (0.001 sec) + +MariaDB [(none)]> exit +Bye +``` +Here, we can see the default value for `innodb_log_buffer_size` is `16777216`. + +Let's create the `MariaDBOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/mariadb/reconfigure/cluster/examples/mdops-reconfigure-apply-config.yaml +mariadbopsrequest.ops.kubedb.com/mdops-reconfigure-apply-config created +``` + + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Enterprise operator will update the `configSecret` of `MariaDB` object. + +Let's wait for `MariaDBOpsRequest` to be `Successful`. Run the following command to watch `MariaDBOpsRequest` CR, + +```bash +$ kubectl get mariadbopsrequest mdops-reconfigure-apply-config -n demo +NAME TYPE STATUS AGE +mdops-reconfigure-apply-config Reconfigure Successful 4m59s +``` + +We can see from the above output that the `MariaDBOpsRequest` has succeeded. If we describe the `MariaDBOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe mariadbopsrequest -n demo mdops-reconfigure-apply-config +Name: mdops-reconfigure-apply-config +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: MariaDBOpsRequest +Metadata: + Creation Timestamp: 2022-06-10T09:13:49Z + Generation: 1 + Resource Version: 14120 + UID: eb8d5df5-a0ce-4011-890c-c18c0200b5ac +Spec: + Configuration: + Apply Config: + innodb-config.cnf: [mysqld] +innodb_log_buffer_size = 17408000 + + new-md-config.cnf: [mysqld] +max_connections = 230 +read_buffer_size = 1064960 + + Database Ref: + Name: sample-mariadb + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2022-06-10T09:13:49Z + Message: Controller has started to Progress the MariaDBOpsRequest: demo/mdops-reconfigure-apply-config + Observed Generation: 1 + Reason: OpsRequestProgressingStarted + Status: True + Type: Progressing + Last Transition Time: 2022-06-10T09:13:49Z + Message: Successfully prepared user provided custom config secret + Observed Generation: 1 + Reason: PrepareSecureCustomConfig + Status: True + Type: PrepareCustomConfig + Last Transition Time: 2022-06-10T09:17:24Z + Message: Successfully restarted MariaDB pods for MariaDBOpsRequest: demo/mdops-reconfigure-apply-config + Observed Generation: 1 + Reason: SuccessfullyRestatedPetSet + Status: True + Type: RestartPetSetPods + Last Transition Time: 2022-06-10T09:17:29Z + Message: Successfully reconfigured MariaDB for MariaDBOpsRequest: demo/mdops-reconfigure-apply-config + Observed Generation: 1 + Reason: SuccessfullyDBReconfigured + Status: True + Type: DBReady + Last Transition Time: 2022-06-10T09:17:29Z + Message: Controller has successfully reconfigure the MariaDB demo/mdops-reconfigure-apply-config + Observed Generation: 1 + Reason: OpsRequestProcessedSuccessfully + Status: True + Type: Successful + Observed Generation: 3 + Phase: Successful +``` + +Now let's connect to a mariadb instance and run a mariadb internal command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo sample-mariadb-0 -- bash +root@sample-mariadb-0:/ mysql -u${MYSQL_ROOT_USERNAME} -p${MYSQL_ROOT_PASSWORD} +Welcome to the MariaDB monitor. Commands end with ; or \g. +Your MariaDB connection id is 23 +Server version: 10.6.16-MariaDB-1:10.6.16+maria~focal mariadb.org binary distribution + +Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +# value of `max_conncetions` is same as provided +MariaDB [(none)]> show variables like 'max_connections'; ++-----------------+-------+ +| Variable_name | Value | ++-----------------+-------+ +| max_connections | 230 | ++-----------------+-------+ +1 row in set (0.001 sec) + +# value of `read_buffer_size` is same as provided +MariaDB [(none)]> show variables like 'read_buffer_size'; ++------------------+---------+ +| Variable_name | Value | ++------------------+---------+ +| read_buffer_size | 1064960 | ++------------------+---------+ +1 row in set (0.001 sec) + +# value of `innodb_log_buffer_size` is same as provided +MariaDB [(none)]> show variables like 'innodb_log_buffer_size'; ++------------------------+----------+ +| Variable_name | Value | ++------------------------+----------+ +| innodb_log_buffer_size | 17408000 | ++------------------------+----------+ +1 row in set (0.001 sec) + +MariaDB [(none)]> exit +Bye +``` + +As we can see from above the configuration has been changed, the value of `max_connections` has been changed from `250` to `230` and the `read_buffer_size` has been changed `122880` to `1064960` also, `innodb_log_buffer_size` has been changed from `16777216` to `17408000`. So the reconfiguration of the `sample-mariadb` database is successful. + + +### Remove Custom Configuration + +We can also remove exisiting custom config using `MariaDBOpsRequest`. Provide `true` to field `spec.configuration.removeCustomConfig` and make an Ops Request to remove existing custom configuration. + +#### Create MariaDBOpsRequest + +Lets create an `MariaDBOpsRequest` having `spec.configuration.removeCustomConfig` is equal `true`, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MariaDBOpsRequest +metadata: + name: mdops-reconfigure-remove + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: sample-mariadb + configuration: + removeCustomConfig: true +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `mdops-reconfigure-remove` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.removeCustomConfig` is a bool field that should be `true` when you want to remove existing custom configuration. + +Let's create the `MariaDBOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/mariadb/reconfigure/cluster/examples/reconfigure-remove.yaml +mariadbopsrequest.ops.kubedb.com/mdops-reconfigure-remove created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Enterprise operator will update the `configSecret` of `MariaDB` object. + +Let's wait for `MariaDBOpsRequest` to be `Successful`. Run the following command to watch `MariaDBOpsRequest` CR, + +```bash +$ kubectl get mariadbopsrequest --all-namespaces +NAMESPACE NAME TYPE STATUS AGE +demo mdops-reconfigure-remove Reconfigure Successful 2m1s +``` + +Now let's connect to a mariadb instance and run a mariadb internal command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo sample-mariadb-0 -- bash +root@sample-mariadb-0:/ mysql -u${MYSQL_ROOT_USERNAME} -p${MYSQL_ROOT_PASSWORD} +Welcome to the MariaDB monitor. Commands end with ; or \g. +Your MariaDB connection id is 23 +Server version: 10.6.16-MariaDB-1:10.6.16+maria~focal mariadb.org binary distribution + +Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +# value of `max_conncetions` is default +MariaDB [(none)]> show variables like 'max_connections'; ++-----------------+-------+ +| Variable_name | Value | ++-----------------+-------+ +| max_connections | 151 | ++-----------------+-------+ +1 row in set (0.001 sec) + +# value of `read_buffer_size` is default +MariaDB [(none)]> show variables like 'read_buffer_size'; ++------------------+---------+ +| Variable_name | Value | ++------------------+---------+ +| read_buffer_size | 131072 | ++------------------+---------+ +1 row in set (0.001 sec) + +# value of `innodb_log_buffer_size` is default +MariaDB [(none)]> show variables like 'innodb_log_buffer_size'; ++------------------------+----------+ +| Variable_name | Value | ++------------------------+----------+ +| innodb_log_buffer_size | 16777216 | ++------------------------+----------+ +1 row in set (0.001 sec) + +MariaDB [(none)]> exit +Bye +``` + +As we can see from the configuration has changed to its default value. So removal of existing custom configuration using `MariaDBOpsRequest` is successful. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +$ kubectl delete mariadb -n demo sample-mariadb +$ kubectl delete mariadbopsrequest -n demo mdops-reconfigure-config mdops-reconfigure-apply-config mdops-reconfigure-remove +$ kubectl delete ns demo +``` diff --git a/docs/guides/mariadb/reconfigure/standalone/examples/reconfigure-using-secret.yaml b/docs/guides/mariadb/reconfigure/standalone/examples/reconfigure-using-secret.yaml index 64d28c813e..479f4cac49 100644 --- a/docs/guides/mariadb/reconfigure/standalone/examples/reconfigure-using-secret.yaml +++ b/docs/guides/mariadb/reconfigure/standalone/examples/reconfigure-using-secret.yaml @@ -8,5 +8,5 @@ spec: databaseRef: name: sample-mariadb configuration: - configSecret: - name: new-md-configuration + configuration: + secretName: new-md-configuration diff --git a/docs/guides/mariadb/reconfigure/standalone/examples/reconfigure-using-secret.yaml.bak b/docs/guides/mariadb/reconfigure/standalone/examples/reconfigure-using-secret.yaml.bak new file mode 100644 index 0000000000..64d28c813e --- /dev/null +++ b/docs/guides/mariadb/reconfigure/standalone/examples/reconfigure-using-secret.yaml.bak @@ -0,0 +1,12 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: MariaDBOpsRequest +metadata: + name: mdops-reconfigure-config + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: sample-mariadb + configuration: + configSecret: + name: new-md-configuration diff --git a/docs/guides/mariadb/reconfigure/standalone/examples/sample-mariadb-config.yaml b/docs/guides/mariadb/reconfigure/standalone/examples/sample-mariadb-config.yaml index 6eca7205ba..96db96b0bc 100644 --- a/docs/guides/mariadb/reconfigure/standalone/examples/sample-mariadb-config.yaml +++ b/docs/guides/mariadb/reconfigure/standalone/examples/sample-mariadb-config.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: "10.6.16" - configSecret: - name: md-configuration + configuration: + secretName: md-configuration storageType: Durable storage: storageClassName: "standard" diff --git a/docs/guides/mariadb/reconfigure/standalone/examples/sample-mariadb-config.yaml.bak b/docs/guides/mariadb/reconfigure/standalone/examples/sample-mariadb-config.yaml.bak new file mode 100644 index 0000000000..6eca7205ba --- /dev/null +++ b/docs/guides/mariadb/reconfigure/standalone/examples/sample-mariadb-config.yaml.bak @@ -0,0 +1,19 @@ +apiVersion: kubedb.com/v1 +kind: MariaDB +metadata: + name: sample-mariadb + namespace: demo +spec: + version: "10.6.16" + configSecret: + name: md-configuration + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut + diff --git a/docs/guides/mariadb/reconfigure/standalone/index.md b/docs/guides/mariadb/reconfigure/standalone/index.md index 50417386e7..576a158e52 100644 --- a/docs/guides/mariadb/reconfigure/standalone/index.md +++ b/docs/guides/mariadb/reconfigure/standalone/index.md @@ -60,7 +60,7 @@ $ kubectl create secret generic -n demo md-configuration --from-file=./md-config secret/md-configuration created ``` -In this section, we are going to create a MariaDB object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `MariaDB` CR that we are going to create, +In this section, we are going to create a MariaDB object specifying `spec.configuration` field to apply this custom configuration. Below is the YAML of the `MariaDB` CR that we are going to create, ```yaml apiVersion: kubedb.com/v1 @@ -70,8 +70,8 @@ metadata: namespace: demo spec: version: "10.6.16" - configSecret: - name: md-configuration + configuration: + secretName: md-configuration storageType: Durable storage: storageClassName: "standard" @@ -180,15 +180,15 @@ spec: databaseRef: name: sample-mariadb configuration: - configSecret: - name: new-md-configuration + configuration: + secretName: new-md-configuration ``` Here, - `spec.databaseRef.name` specifies that we are reconfiguring `mdops-reconfigure-config` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configuration.configSecret.name` specifies the name of the new secret. +- `spec.configuration.configuration.secretName` specifies the name of the new secret. Let's create the `MariaDBOpsRequest` CR we have shown above, diff --git a/docs/guides/mariadb/reconfigure/standalone/index.md.bak b/docs/guides/mariadb/reconfigure/standalone/index.md.bak new file mode 100644 index 0000000000..eb0de3aadc --- /dev/null +++ b/docs/guides/mariadb/reconfigure/standalone/index.md.bak @@ -0,0 +1,587 @@ +--- +title: Reconfigure MariaDB Standalone +menu: + docs_{{ .version }}: + identifier: guides-mariadb-reconfigure-standalone + name: Standalone + parent: guides-mariadb-reconfigure + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure MariaDB Standalone Database + +This guide will show you how to use `KubeDB` Enterprise operator to reconfigure a MariaDB Standalone. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Community and Enterprise operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [MariaDB](/docs/guides/mariadb/concepts/mariadb) + - [MariaDBOpsRequest](/docs/guides/mariadb/concepts/opsrequest) + - [Reconfigure Overview](/docs/guides/mariadb/reconfigure/overview) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +Now, we are going to deploy a `MariaDB` Standalone using a supported version by `KubeDB` operator. Then we are going to apply `MariaDBOpsRequest` to reconfigure its configuration. + +### Prepare MariaDB Standalone + +Now, we are going to deploy a `MariaDB` Standalone database with version `10.6.16`. + +### Deploy MariaDB + +At first, we will create `md-config.cnf` file containing required configuration settings. + +```ini +$ cat md-config.cnf +[mysqld] +max_connections = 200 +read_buffer_size = 1048576 +``` + +Here, `max_connections` is set to `200`, whereas the default value is `151`. Likewise, `read_buffer_size` has the deafult value `131072`. + +Now, we will create a secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo md-configuration --from-file=./md-config.cnf +secret/md-configuration created +``` + +In this section, we are going to create a MariaDB object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `MariaDB` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: MariaDB +metadata: + name: sample-mariadb + namespace: demo +spec: + version: "10.6.16" + configuration: + secretName: md-configuration + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut +``` + +Let's create the `MariaDB` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/mariadb/reconfigure/standalone/examples/sample-mariadb-config.yaml +mariadb.kubedb.com/sample-mariadb created +``` + +Now, wait until `sample-mariadb` has status `Ready`. i.e, + +```bash +$ kubectl get mariadb -n demo +NAME VERSION STATUS AGE +sample-mariadb 10.6.16 Ready 61s +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +First we need to get the username and password to connect to a mariadb instance, + +```bash +$ kubectl get secrets -n demo sample-mariadb-auth -o jsonpath='{.data.\username}' | base64 -d +root + +$ kubectl get secrets -n demo sample-mariadb-auth -o jsonpath='{.data.\password}' | base64 -d +PlWA6JNLkNFudl4I +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +```bash +$ kubectl exec -it -n demo sample-mariadb-0 -c mariadb -- bash +root@sample-mariadb-0:/# mysql -u${MYSQL_ROOT_USERNAME} -p${MYSQL_ROOT_PASSWORD} +Welcome to the MariaDB monitor. Commands end with ; or \g. +Your MariaDB connection id is 11 +Server version: 10.6.16-MariaDB-1:10.6.16+maria~focal mariadb.org binary distribution + +Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +MariaDB [(none)]> show variables like 'max_connections'; ++-----------------+-------+ +| Variable_name | Value | ++-----------------+-------+ +| max_connections | 200 | ++-----------------+-------+ +1 row in set (0.001 sec) + +MariaDB [(none)]> show variables like 'read_buffer_size'; ++------------------+---------+ +| Variable_name | Value | ++------------------+---------+ +| read_buffer_size | 1048576 | ++------------------+---------+ +1 row in set (0.001 sec) + +MariaDB [(none)]> exit +Bye +``` + +As we can see from the configuration of ready mariadb, the value of `max_connections` has been set to `200` and `read_buffer_size` has been set to `1048576`. + +### Reconfigure using new config secret + +Now we will reconfigure this database to set `max_connections` to `250` and `read_buffer_size` to `122880`. + +Now, we will create new file `new-md-config.cnf` containing required configuration settings. + +```ini +$ cat new-md-config.cnf +[mysqld] +max_connections = 250 +read_buffer_size = 122880 +``` + +Then, we will create a new secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo new-md-configuration --from-file=./new-md-config.cnf +secret/new-md-configuration created +``` + +#### Create MariaDBOpsRequest + +Now, we will use this secret to replace the previous secret using a `MariaDBOpsRequest` CR. The `MariaDBOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MariaDBOpsRequest +metadata: + name: mdops-reconfigure-config + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: sample-mariadb + configuration: + configuration: + secretName: new-md-configuration +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `mdops-reconfigure-config` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.configuration.secretName` specifies the name of the new secret. + +Let's create the `MariaDBOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/mariadb/reconfigure/standalone/examples/reconfigure-using-secret.yaml +mariadbopsrequest.ops.kubedb.com/mdops-reconfigure-config created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Enterprise operator will update the `configSecret` of `MariaDB` object. + +Let's wait for `MariaDBOpsRequest` to be `Successful`. Run the following command to watch `MariaDBOpsRequest` CR, + +```bash +$ kubectl get mariadbopsrequest --all-namespaces +NAMESPACE NAME TYPE STATUS AGE +demo mdops-reconfigure-config Reconfigure Successful 2m8s +``` + +We can see from the above output that the `MariaDBOpsRequest` has succeeded. If we describe the `MariaDBOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe mariadbopsrequest -n demo mdops-reconfigure-config +Name: mdops-reconfigure-config +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: MariaDBOpsRequest +Metadata: + Creation Timestamp: 2022-06-14T10:56:01Z + Generation: 1 + Resource Version: 21589 + UID: 43997fe8-fa12-4d38-a29f-d101889d4e72 +Spec: + Configuration: + Config Secret: + Name: new-md-configuration + Database Ref: + Name: sample-mariadb + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2022-06-14T10:56:01Z + Message: Controller has started to Progress the MariaDBOpsRequest: demo/mdops-reconfigure-config + Observed Generation: 1 + Reason: OpsRequestProgressingStarted + Status: True + Type: Progressing + Last Transition Time: 2022-06-14T10:56:11Z + Message: Successfully restarted MariaDB pods for MariaDBOpsRequest: demo/mdops-reconfigure-config + Observed Generation: 1 + Reason: SuccessfullyRestatedPetSet + Status: True + Type: RestartPetSetPods + Last Transition Time: 2022-06-14T10:56:16Z + Message: Successfully reconfigured MariaDB for MariaDBOpsRequest: demo/mdops-reconfigure-config + Observed Generation: 1 + Reason: SuccessfullyDBReconfigured + Status: True + Type: DBReady + Last Transition Time: 2022-06-14T10:56:16Z + Message: Controller has successfully reconfigure the MariaDB demo/mdops-reconfigure-config + Observed Generation: 1 + Reason: OpsRequestProcessedSuccessfully + Status: True + Type: Successful + Observed Generation: 3 + Phase: Successful +``` + +Now let's connect to a mariadb instance and run a mariadb internal command to check the new configuration we have provided. + +```bash +$ $ kubectl exec -it -n demo sample-mariadb-0 -c mariadb -- bash +root@sample-mariadb-0:/# mysql -u${MYSQL_ROOT_USERNAME} -p${MYSQL_ROOT_PASSWORD} +Welcome to the MariaDB monitor. Commands end with ; or \g. +Your MariaDB connection id is 21 +Server version: 10.6.16-MariaDB-1:10.6.16+maria~focal mariadb.org binary distribution + +Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +MariaDB [(none)]> show variables like 'max_connections'; ++-----------------+-------+ +| Variable_name | Value | ++-----------------+-------+ +| max_connections | 250 | ++-----------------+-------+ +1 row in set (0.001 sec) + +MariaDB [(none)]> show variables like 'read_buffer_size'; ++------------------+--------+ +| Variable_name | Value | ++------------------+--------+ +| read_buffer_size | 122880 | ++------------------+--------+ +1 row in set (0.001 sec) + +MariaDB [(none)]> exit +Bye +``` + +As we can see from the configuration has changed, the value of `max_connections` has been changed from `200` to `250` and and the `read_buffer_size` has been changed `1048576` to `122880`. So the reconfiguration of the database is successful. + + +### Reconfigure Existing Config Secret + +Now, we will create a new `MariaDBOpsRequest` to reconfigure our existing secret `new-md-configuration` by modifying our `new-md-config.cnf` file using `applyConfig`. The `MariaDBOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MariaDBOpsRequest +metadata: + name: mdops-reconfigure-apply-config + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: sample-mariadb + configuration: + applyConfig: + new-md-config.cnf: | + [mysqld] + max_connections = 230 + read_buffer_size = 1064960 + innodb-config.cnf: | + [mysqld] + innodb_log_buffer_size = 17408000 +``` +> Note: You can modify multiple fields of your current configuration using `applyConfig`. If you don't have any secrets then `applyConfig` will create a secret for you. Here, we modified value of our two existing fields which are `max_connections` and `read_buffer_size` also, we modified a new field `innodb_log_buffer_size` of our configuration. + +Here, +- `spec.databaseRef.name` specifies that we are reconfiguring `sample-mariadb` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.applyConfig` contains the configuration of existing or newly created secret. + +Before applying this yaml we are going to check the existing value of our new field, + +```bash +$ kubectl exec -it sample-mariadb-0 -n demo -c mariadb -- bash +root@sample-mariadb-0:/# mysql -u${MYSQL_ROOT_USERNAME} -p${MYSQL_ROOT_PASSWORD} +Welcome to the MariaDB monitor. Commands end with ; or \g. +Your MariaDB connection id is 21 +Server version: 10.6.16-MariaDB-1:10.6.16+maria~focal mariadb.org binary distribution + +Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +MariaDB [(none)]> show variables like 'innodb_log_buffer_size'; ++------------------------+----------+ +| Variable_name | Value | ++------------------------+----------+ +| innodb_log_buffer_size | 16777216 | ++------------------------+----------+ +1 row in set (0.001 sec) + +MariaDB [(none)]> exit +Bye +``` +Here, we can see the default value for `innodb_log_buffer_size` is `16777216`. + +Let's create the `MariaDBOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/mariadb/reconfigure/standalone/examples/mdops-reconfigure-apply-config.yaml +mariadbopsrequest.ops.kubedb.com/mdops-reconfigure-apply-config created +``` + + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Enterprise operator will update the `configSecret` of `MariaDB` object. + +Let's wait for `MariaDBOpsRequest` to be `Successful`. Run the following command to watch `MariaDBOpsRequest` CR, + +```bash +$ kubectl get mariadbopsrequest mdops-reconfigure-apply-config -n demo +NAME TYPE STATUS AGE +mdops-reconfigure-apply-config Reconfigure Successful 3m11s +``` + +We can see from the above output that the `MariaDBOpsRequest` has succeeded. If we describe the `MariaDBOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe mariadbopsrequest -n demo mdops-reconfigure-apply-config +Name: mdops-reconfigure-apply-config +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: MariaDBOpsRequest +Metadata: + Creation Timestamp: 2022-06-14T09:13:49Z + Generation: 1 + Resource Version: 14120 + UID: eb8d5df5-a0ce-4011-890c-c18c0200b5ac +Spec: + Configuration: + Apply Config: + innodb-config.cnf: [mysqld] +innodb_log_buffer_size = 17408000 + + new-md-config.cnf: [mysqld] +max_connections = 230 +read_buffer_size = 1064960 + + Database Ref: + Name: sample-mariadb + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2022-06-14T09:13:49Z + Message: Controller has started to Progress the MariaDBOpsRequest: demo/mdops-reconfigure-apply-config + Observed Generation: 1 + Reason: OpsRequestProgressingStarted + Status: True + Type: Progressing + Last Transition Time: 2022-06-14T09:13:49Z + Message: Successfully prepared user provided custom config secret + Observed Generation: 1 + Reason: PrepareSecureCustomConfig + Status: True + Type: PrepareCustomConfig + Last Transition Time: 2022-06-14T09:17:24Z + Message: Successfully restarted MariaDB pods for MariaDBOpsRequest: demo/mdops-reconfigure-apply-config + Observed Generation: 1 + Reason: SuccessfullyRestatedPetSet + Status: True + Type: RestartPetSetPods + Last Transition Time: 2022-06-14T09:17:29Z + Message: Successfully reconfigured MariaDB for MariaDBOpsRequest: demo/mdops-reconfigure-apply-config + Observed Generation: 1 + Reason: SuccessfullyDBReconfigured + Status: True + Type: DBReady + Last Transition Time: 2022-06-14T09:17:29Z + Message: Controller has successfully reconfigure the MariaDB demo/mdops-reconfigure-apply-config + Observed Generation: 1 + Reason: OpsRequestProcessedSuccessfully + Status: True + Type: Successful + Observed Generation: 3 + Phase: Successful +``` + +Now let's connect to a mariadb instance and run a mariadb internal command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo sample-mariadb-0 -c mariadb -- bash +root@sample-mariadb-0:/# mysql -u${MYSQL_ROOT_USERNAME} -p${MYSQL_ROOT_PASSWORD} +Welcome to the MariaDB monitor. Commands end with ; or \g. +Your MariaDB connection id is 24 +Server version: 10.6.16-MariaDB-1:10.6.16+maria~focal mariadb.org binary distribution + +Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +MariaDB [(none)]> show variables like 'max_connections'; ++-----------------+-------+ +| Variable_name | Value | ++-----------------+-------+ +| max_connections | 230 | ++-----------------+-------+ +1 row in set (0.001 sec) + +MariaDB [(none)]> show variables like 'read_buffer_size'; ++------------------+---------+ +| Variable_name | Value | ++------------------+---------+ +| read_buffer_size | 1064960 | ++------------------+---------+ +1 row in set (0.002 sec) + +MariaDB [(none)]> show variables like 'innodb_log_buffer_size'; ++------------------------+----------+ +| Variable_name | Value | ++------------------------+----------+ +| innodb_log_buffer_size | 17408000 | ++------------------------+----------+ +1 row in set (0.001 sec) + +MariaDB [(none)]> exit +Bye +``` + +As we can see from above the configuration has been changed, the value of `max_connections` has been changed from `250` to `230` and the `read_buffer_size` has been changed `122880` to `1064960` also, `innodb_log_buffer_size` has been changed from `16777216` to `17408000`. So the reconfiguration of the `sample-mariadb` database is successful. + + + +### Remove Custom Configuration + +We can also remove exisiting custom config using `MariaDBOpsRequest`. Provide `true` to field `spec.configuration.removeCustomConfig` and make an Ops Request to remove existing custom configuration. + +#### Create MariaDBOpsRequest + +Lets create an `MariaDBOpsRequest` having `spec.configuration.removeCustomConfig` is equal `true`, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MariaDBOpsRequest +metadata: + name: mdops-reconfigure-remove + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: sample-mariadb + configuration: + removeCustomConfig: true +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `mdops-reconfigure-remove` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.removeCustomConfig` is a bool field that should be `true` when you want to remove existing custom configuration. + +Let's create the `MariaDBOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/mariadb/reconfigure/standalone/examples/reconfigure-remove.yaml +mariadbopsrequest.ops.kubedb.com/mdops-reconfigure-remove created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Enterprise operator will update the `configSecret` of `MariaDB` object. + +Let's wait for `MariaDBOpsRequest` to be `Successful`. Run the following command to watch `MariaDBOpsRequest` CR, + +```bash +$ kubectl get mariadbopsrequest --all-namespaces +NAMESPACE NAME TYPE STATUS AGE +demo mdops-reconfigure-remove Reconfigure Successful 2m5s +``` + +Now let's connect to a mariadb instance and run a mariadb internal command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo sample-mariadb-0 -- bash +root@sample-mariadb-0:/ mysql -u${MYSQL_ROOT_USERNAME} -p${MYSQL_ROOT_PASSWORD} +Welcome to the MariaDB monitor. Commands end with ; or \g. +Your MariaDB connection id is 8 +Server version: 10.6.16-MariaDB-1:10.6.16+maria~focal mariadb.org binary distribution + +Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. + +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. + +# value of `max_conncetions` is default +MariaDB [(none)]> show variables like 'max_connections'; ++-----------------+-------+ +| Variable_name | Value | ++-----------------+-------+ +| max_connections | 151 | ++-----------------+-------+ +1 row in set (0.001 sec) + +# value of `read_buffer_size` is default +MariaDB [(none)]> show variables like 'read_buffer_size'; ++------------------+---------+ +| Variable_name | Value | ++------------------+---------+ +| read_buffer_size | 131072 | ++------------------+---------+ +1 row in set (0.001 sec) + +# value of `innodb_log_buffer_size` is default +MariaDB [(none)]> show variables like 'innodb_log_buffer_size'; ++------------------------+----------+ +| Variable_name | Value | ++------------------------+----------+ +| innodb_log_buffer_size | 16777216 | ++------------------------+----------+ +1 row in set (0.001 sec) + +MariaDB [(none)]> exit +Bye +``` + +As we can see from the configuration has changed to its default value. So removal of existing custom configuration using `MariaDBOpsRequest` is successful. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +$ kubectl delete mariadb -n demo sample-mariadb +$ kubectl delete mariadbopsrequest -n demo mdops-reconfigure-config mdops-reconfigure-apply-config mdops-reconfigure-remove +$ kubectl delete ns demo +``` \ No newline at end of file diff --git a/docs/guides/memcached/concepts/memcached.md b/docs/guides/memcached/concepts/memcached.md index fce448f28f..ed89935f34 100644 --- a/docs/guides/memcached/concepts/memcached.md +++ b/docs/guides/memcached/concepts/memcached.md @@ -38,8 +38,8 @@ spec: labels: app: kubedb interval: 10s - configSecret: - name: mc-custom-config + configuration: + secretName: mc-custom-config podTemplate: metadata: annotations: @@ -102,9 +102,9 @@ Memcached managed by KubeDB can be monitored with builtin-Prometheus and Prometh - [Monitor Memcached with builtin Prometheus](/docs/guides/memcached/monitoring/using-builtin-prometheus.md) - [Monitor Memcached with Prometheus operator](/docs/guides/memcached/monitoring/using-prometheus-operator.md) -### spec.configSecret +### spec.configuration -`spec.configSecret` is an optional field that allows users to provide custom configuration for Memcached. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). So you can use any Kubernetes supported volume source such as `configMap`, `secret`, `azureDisk` etc. To learn more about how to use a custom configuration file see [here](/docs/guides/memcached/custom-configuration/using-config-file.md). +`spec.configuration` is an optional field that allows users to provide custom configuration for Memcached. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). So you can use any Kubernetes supported volume source such as `configMap`, `secret`, `azureDisk` etc. To learn more about how to use a custom configuration file see [here](/docs/guides/memcached/custom-configuration/using-config-file.md). ### spec.podTemplate diff --git a/docs/guides/memcached/concepts/memcached.md.bak b/docs/guides/memcached/concepts/memcached.md.bak new file mode 100644 index 0000000000..451e4a35e3 --- /dev/null +++ b/docs/guides/memcached/concepts/memcached.md.bak @@ -0,0 +1,275 @@ +--- +title: Memcached +menu: + docs_{{ .version }}: + identifier: mc-memcached-concepts + name: Memcached + parent: mc-concepts-memcached + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Memcached + +## What is Memcached + +`Memcached` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for [Memcached](https://memcached.org/) in a Kubernetes native way. You only need to describe the desired database configuration in a Memcached object, and the KubeDB operator will create Kubernetes objects in the desired state for you. + +## Memcached Spec + +As with all other Kubernetes objects, a Memcached needs `apiVersion`, `kind`, and `metadata` fields. It also needs a `.spec` section. Below is an example of a Memcached object. + +```yaml +apiVersion: kubedb.com/v1 +kind: Memcached +metadata: + name: mc1 + namespace: demo +spec: + replicas: 1 + version: 1.6.22 + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + app: kubedb + interval: 10s + configuration: + secretName: mc-custom-config + podTemplate: + metadata: + annotations: + passMe: ToDatabasePod + controller: + annotations: + passMe: ToDeployment + spec: + serviceAccountName: my-service-account + schedulerName: my-scheduler + nodeSelector: + disktype: ssd + imagePullSecrets: + - name: myregistrykey + containers: + - name: memcached + args: + - "-u memcache" + env: + - name: TEST_ENV + value: "value" + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + serviceTemplates: + - alias: primary + metadata: + annotations: + passMe: ToService + spec: + type: NodePort + ports: + - name: http + port: 9200 + deletionPolicy: Delete +``` + +### spec.replicas + +`spec.replicas` is an optional field that specifies the number of desired Instances/Replicas of Memcached server. If you do not specify .spec.replicas, then it defaults to 1. + +KubeDB uses `PodDisruptionBudget` to ensure that majority of these replicas are available during [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions) so that quorum is maintained. + +### spec.version + +`spec.version` is a required field specifying the name of the [MemcachedVersion](/docs/guides/memcached/concepts/memcached-version.md) crd where the docker images are specified. Currently, when you install KubeDB, it creates the following `MemcachedVersion` crds, + +- `1.5.22` +- `1.6.22` +- `1.6.29` + +### spec.monitor + +Memcached managed by KubeDB can be monitored with builtin-Prometheus and Prometheus operator out-of-the-box. To learn more, + +- [Monitor Memcached with builtin Prometheus](/docs/guides/memcached/monitoring/using-builtin-prometheus.md) +- [Monitor Memcached with Prometheus operator](/docs/guides/memcached/monitoring/using-prometheus-operator.md) + +### spec.configSecret + +`spec.configSecret` is an optional field that allows users to provide custom configuration for Memcached. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). So you can use any Kubernetes supported volume source such as `configMap`, `secret`, `azureDisk` etc. To learn more about how to use a custom configuration file see [here](/docs/guides/memcached/custom-configuration/using-config-file.md). + +### spec.podTemplate + +KubeDB allows providing a template for database pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the Petset created for Memcached server. + +KubeDB accept following fields to set in `spec.podTemplate:` + +- metadata + - annotations (pod's annotation) +- controller + - annotations (petset's annotation) +- spec: + - containers + - volumes + - podPlacementPolicy + - initContainers + - containers + - podPlacementPolicy + - imagePullSecrets + - nodeSelector + - serviceAccountName + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + +Uses of some field of `spec.podTemplate` is described below, + +You can check out the full list [here](https://github.com/kmodules/offshoot-api/blob/master/api/v2/types.go#L26C1-L279C1). +Uses of some field of `spec.podTemplate` is described below, + +#### spec.podTemplate.spec.tolerations + +The `spec.podTemplate.spec.tolerations` is an optional field. This can be used to specify the pod's tolerations. + +#### spec.podTemplate.spec.volumes + +The `spec.podTemplate.spec.volumes` is an optional field. This can be used to provide the list of volumes that can be mounted by containers belonging to the pod. + +#### spec.podTemplate.spec.podPlacementPolicy + +`spec.podTemplate.spec.podPlacementPolicy` is an optional field. This can be used to provide the reference of the `podPlacementPolicy`. `name` of the podPlacementPolicy is referred under this attribute. This will be used by our Petset controller to place the db pods throughout the region, zone & nodes according to the policy. It utilizes kubernetes affinity & podTopologySpreadContraints feature to do so. +```yaml +spec: + podPlacementPolicy: + name: default +``` + +#### spec.podTemplate.spec.containers + +The `spec.podTemplate.spec.containers` can be used to provide the list containers and their configurations for to the database pod. some of the fields are described below, + +##### spec.podTemplate.spec.containers[].name +The `spec.podTemplate.spec.containers[].name` field used to specify the name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + +##### spec.podTemplate.spec.containers[].args +`spec.podTemplate.spec.containers[].args` is an optional field. This can be used to provide additional arguments to database installation. + +##### spec.podTemplate.spec.containers[].env + +`.env` is an optional field that specifies the environment variables to pass to the Memcached containers. + +Note that, KubeDB does not allow to update the environment variables. If you try to update environment variables, KubeDB operator will reject the request with following error, + +```ini +Error from server (BadRequest): error when applying patch: +... +for: "./mc.yaml": admission webhook "memcached.validators.kubedb.com" denied the request: precondition failed for: +... +At least one of the following was changed: + apiVersion + kind + name + namespace + spec.podTemplate.spec.nodeSelector + spec.podTemplate.spec.env +``` + +##### spec.podTemplate.spec.containers[].resources + +`spec.podTemplate.spec.containers[].resources` is an optional field. This can be used to request compute resources required by containers of the database pods. To learn more, visit [here](http://kubernetes.io/docs/user-guide/compute-resources/). + +#### spec.podTemplate.spec.imagePullSecrets + +`KubeDB` provides the flexibility of deploying Memcached server from a private Docker registry. To learn how to deploym Memcached from a private registry, please visit [here](/docs/guides/memcached/private-registry/using-private-registry.md). + +#### spec.podTemplate.spec.nodeSelector + +`spec.nodeSelector` is an optional field that specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). To learn more, see [here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) . + +#### spec.podTemplate.spec.serviceAccountName + +`serviceAccountName` is an optional field supported by KubeDB Operator (version 0.13.0 and higher) that can be used to specify a custom service account to fine tune role based access control. + +If this field is left empty, the KubeDB operator will create a service account name matching Memcached crd name. Role and RoleBinding that provide necessary access permissions will also be generated automatically for this service account. + +If a service account name is given, but there's no existing service account by that name, the KubeDB operator will create one, and Role and RoleBinding that provide necessary access permissions will also be generated for this service account. + +If a service account name is given, and there's an existing service account by that name, the KubeDB operator will use that existing service account. Since this service account is not managed by KubeDB, users are responsible for providing necessary access permissions manually. Follow the guide [here](/docs/guides/memcached/custom-rbac/using-custom-rbac.md) to grant necessary permissions in this scenario. + +#### spec.podTemplate.spec.resources + +`spec.resources` is an optional field. This can be used to request compute resources required by the database pods. To learn more, visit [here](http://kubernetes.io/docs/user-guide/compute-resources/). + +### spec.serviceTemplates + +You can also provide a template for the services created by KubeDB operator for Memcached server through `spec.serviceTemplates`. This will allow you to set the type and other properties of the services. + +KubeDB allows following fields to set in `spec.serviceTemplates`: + +- `alias` represents the identifier of the service. It has the following possible value: + - `primary` is used for the primary service identification. + - `standby` is used for the secondary service identification. + - `stats` is used for the exporter service identification. + +- metadata: + - annotations +- spec: + - type + - ports + - clusterIP + - externalIPs + - loadBalancerIP + - loadBalancerSourceRanges + - externalTrafficPolicy + - healthCheckNodePort + - sessionAffinityConfig + +See [here](https://github.com/kmodules/offshoot-api/blob/kubernetes-1.16.3/api/v1/types.go#L163) to understand these fields in details. + + +### spec.deletionPolicy + +`deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `Memcached` crd or which resources KubeDB should keep or delete when you delete `Memcached` crd. KubeDB provides following four termination policies: + +- DoNotTerminate +- Delete (`Default`) +- WipeOut + +When `deletionPolicy` is `DoNotTerminate`, KubeDB takes advantage of `ValidationWebhook` feature in Kubernetes 1.9.0 or later clusters to implement `DoNotTerminate` feature. If admission webhook is enabled, `DoNotTerminate` prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. + +Following table show what KubeDB does when you delete Memcached crd for different termination policies, + +| Behavior | DoNotTerminate | Delete | WipeOut | +| ---------------------------| :------------: | :------: | :------: | +| 1. Block Delete operation | ✓ | ✗ | ✗ | +| 2. Delete PetSet | ✗ | ✓ | ✓ | +| 3. Delete Services | ✗ | ✓ | ✓ | +| 4. Delete Secrets | ✗ | ✗ | ✓ | + +If you don't specify `spec.deletionPolicy` KubeDB uses `Delete` termination policy by default. + +> For more details you can visit [here](https://appscode.com/blog/post/deletion-policy/) + +## spec.helathChecker +It defines the attributes for the health checker. +- spec.healthChecker.periodSeconds specifies how often to perform the health check. +- spec.healthChecker.timeoutSeconds specifies the number of seconds after which the probe times out. +- spec.healthChecker.failureThreshold specifies minimum consecutive failures for the healthChecker to be considered failed. +- spec.healthChecker.disableWriteCheck specifies whether to disable the writeCheck or not. + +Know details about KubeDB Health checking from this blog post. + +## Next Steps + +- Learn how to use KubeDB to run a Memcached server [here](/docs/guides/memcached/README.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/memcached/custom-configuration/using-config-file.md b/docs/guides/memcached/custom-configuration/using-config-file.md index 514118a85b..753dd3a2d1 100644 --- a/docs/guides/memcached/custom-configuration/using-config-file.md +++ b/docs/guides/memcached/custom-configuration/using-config-file.md @@ -41,7 +41,7 @@ Memcached does not allows to configuration via any file. However, configuration To know more about configuring Memcached server see [here](https://github.com/memcached/memcached/wiki/ConfiguringServer). -At first, you have to create a config file named `memcached.conf` with your desired configuration. Then you have to put this file into a [volume](https://kubernetes.io/docs/concepts/storage/volumes/). You have to specify this volume in `spec.configSecret` section while creating Memcached crd. KubeDB will mount this volume into `/usr/config` directory of the database pod. +At first, you have to create a config file named `memcached.conf` with your desired configuration. Then you have to put this file into a [volume](https://kubernetes.io/docs/concepts/storage/volumes/). You have to specify this volume in `spec.configuration` section while creating Memcached crd. KubeDB will mount this volume into `/usr/config` directory of the database pod. In this tutorial, we will configure [max_connections](https://github.com/memcached/memcached/blob/ee171109b3afe1f30ff053166d205768ce635342/doc/protocol.txt#L672) and [limit_maxbytes](https://github.com/memcached/memcached/blob/ee171109b3afe1f30ff053166d205768ce635342/doc/protocol.txt#L720) via secret. @@ -86,7 +86,7 @@ metadata: type: Opaque ``` -Now, create Memcached crd specifying `spec.configSecret` field. +Now, create Memcached crd specifying `spec.configuration` field. ```bash $ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/memcached/configuration/mc-custom.yaml @@ -104,8 +104,8 @@ metadata: spec: replicas: 1 version: "1.6.22" - configSecret: - name: mc-configuration + configuration: + secretName: mc-configuration podTemplate: spec: containers: diff --git a/docs/guides/memcached/custom-configuration/using-config-file.md.bak b/docs/guides/memcached/custom-configuration/using-config-file.md.bak new file mode 100644 index 0000000000..729f053fd9 --- /dev/null +++ b/docs/guides/memcached/custom-configuration/using-config-file.md.bak @@ -0,0 +1,182 @@ +--- +title: Run Memcached with Custom Configuration +menu: + docs_{{ .version }}: + identifier: mc-using-config-file-configuration + name: Customize Configurations + parent: custom-configuration + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Using Custom Configuration File + +KubeDB supports providing custom configuration for Memcached. This tutorial will show you how to use KubeDB to run Memcached with custom configuration. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + + $ kubectl get ns demo + NAME STATUS AGE + demo Active 5s + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/memcached](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/memcached) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +Memcached does not allows to configuration via any file. However, configuration parameters can be set as arguments while starting the memcached docker image. To keep similarity with other KubeDB supported databases which support configuration through a config file, KubeDB has added an additional executable script on top of the official memcached docker image. This script parses the configuration file then set them as arguments of memcached binary. + +To know more about configuring Memcached server see [here](https://github.com/memcached/memcached/wiki/ConfiguringServer). + +At first, you have to create a config file named `memcached.conf` with your desired configuration. Then you have to put this file into a [volume](https://kubernetes.io/docs/concepts/storage/volumes/). You have to specify this volume in `spec.configSecret` section while creating Memcached crd. KubeDB will mount this volume into `/usr/config` directory of the database pod. + +In this tutorial, we will configure [max_connections](https://github.com/memcached/memcached/blob/ee171109b3afe1f30ff053166d205768ce635342/doc/protocol.txt#L672) and [limit_maxbytes](https://github.com/memcached/memcached/blob/ee171109b3afe1f30ff053166d205768ce635342/doc/protocol.txt#L720) via secret. + +Create a secret with custom configuration file: +```yaml +apiVersion: v1 +stringData: + memcached.conf: | + --conn-limit=500 + --memory-limit=128 +kind: Secret +metadata: + name: mc-configuration + namespace: demo + resourceVersion: "4505" +``` +Here, --con-limit means max simultaneous connections which is default value is 1024. +and --memory-limit means item memory in megabytes which default value is 64. + +```bash + $ kubectl apply -f mc-configuration.yaml +secret/mc-configuration created +``` + +Let's get the mc-configuration `secret` with custom configuration: + +```yaml +$ kubectl get secret -n demo mc-configuration -o yaml +apiVersion: v1 +data: + memcached.conf: LS1jb25uLWxpbWl0PTUwMAotLW1lbW9yeS1saW1pdD01MTIK +kind: Secret +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","kind":"Secret","metadata":{"annotations":{},"name":"mc-configuration","namespace":"demo","resourceVersion":"4505"},"stringData":{"memcached.conf":"--conn-limit=500\n--memory-limit=512\n"}} + creationTimestamp: "2024-08-26T12:19:54Z" + name: mc-configuration + namespace: demo + resourceVersion: "4580860" + uid: 02d41fc0-590e-44d1-ae95-2ee8f9632d36 +type: Opaque +``` + +Now, create Memcached crd specifying `spec.configSecret` field. + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/memcached/configuration/mc-custom.yaml +memcached.kubedb.com/custom-memcached created +``` + +Below is the YAML for the Memcached crd we just created. + +```yaml +apiVersion: kubedb.com/v1 +kind: Memcached +metadata: + name: custom-memcached + namespace: demo +spec: + replicas: 1 + version: "1.6.22" + configuration: + secretName: mc-configuration + podTemplate: + spec: + containers: + - name: memcached + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 250m + memory: 64Mi + deletionPolicy: WipeOut +``` + +Now, wait a few minutes. KubeDB operator will create necessary petset, services etc. If everything goes well, we will see that a pod with the name `custom-memcached-0` has been created. + +Check if the database is ready + +```bash +$ kubectl get mc -n demo +NAME VERSION STATUS AGE +custom-memcached 1.6.22 Ready 17m +``` + +Now, we will check if the database has started with the custom configuration we have provided. We will use [stats](https://github.com/memcached/memcached/wiki/ConfiguringServer#inspecting-running-configuration) command to check the configuration. + +We will connect to `custom-memcached-0` pod from local-machine using port-frowarding. + +```bash +$ kubectl port-forward -n demo custom-memcached-0 11211 +Forwarding from 127.0.0.1:11211 -> 11211 +Forwarding from [::1]:11211 -> 11211 +``` + +Now, connect to the memcached server from a different terminal through `telnet`. + +```bash +$ telnet 127.0.0.1 11211 +Trying 127.0.0.1... +Connected to 127.0.0.1. +Escape character is '^]'. +stats +... +STAT max_connections 500 +... +STAT limit_maxbytes 134217728 +... +END +``` + +Here, `limit_maxbytes` is represented in bytes. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl patch -n demo mc/custom-memcached -p '{"spec":{"deletionPolicy":"WipeOut"}}' --type="merge" +kubectl delete -n demo mc/custom-memcached + +kubectl patch -n demo drmn/custom-memcached -p '{"spec":{"wipeOut":true}}' --type="merge" +kubectl delete -n demo drmn/custom-memcached + +kubectl delete -n demo secret mc-configuration + +kubectl delete ns demo +``` + +If you would like to uninstall KubeDB operator, please follow the steps [here](/docs/setup/README.md). + +## Next Steps + +- Learn how to use KubeDB to run a Memcached server [here](/docs/guides/memcached/README.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/memcached/reconfigure/reconfigure.md b/docs/guides/memcached/reconfigure/reconfigure.md index c08f926739..518c34c7fa 100644 --- a/docs/guides/memcached/reconfigure/reconfigure.md +++ b/docs/guides/memcached/reconfigure/reconfigure.md @@ -65,7 +65,7 @@ $ kubectl create -f mc-configuration secret/mc-configuration created ``` -In this section, we are going to create a Memcached object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `Memcahced` CR that we are going to create, +In this section, we are going to create a Memcached object specifying `spec.configuration` field to apply this custom configuration. Below is the YAML of the `Memcahced` CR that we are going to create, ```yaml apiVersion: kubedb.com/v1 @@ -76,8 +76,8 @@ metadata: spec: replicas: 1 version: "1.6.22" - configSecret: - name: mc-configuration + configuration: + secretName: mc-configuration deletionPolicy: WipeOut ``` @@ -162,15 +162,15 @@ spec: databaseRef: name: memcd-quickstart configuration: - configSecret: - name: new-configuration + configuration: + secretName: new-configuration ``` Here, - `spec.databaseRef.name` specifies that we are reconfiguring `memcd-quickstart` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configuration.configSecret.name` specifies the name of the new secret. +- `spec.configuration.configuration.secretName` specifies the name of the new secret. Let's create the `MemcachedOpsRequest` CR we have shown above, diff --git a/docs/guides/memcached/reconfigure/reconfigure.md.bak b/docs/guides/memcached/reconfigure/reconfigure.md.bak new file mode 100644 index 0000000000..98b5776484 --- /dev/null +++ b/docs/guides/memcached/reconfigure/reconfigure.md.bak @@ -0,0 +1,472 @@ +--- +title: Reconfigure Memcached Database +menu: + docs_{{ .version }}: + identifier: mc-reconfigure + name: Reconfigure + parent: reconfigure + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure Memcached Database + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a Memcached database. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [Memcached](/docs/guides/memcached/concepts/memcached.md) + - [MemcachedOpsRequest](/docs/guides/memcached/concepts/memcached-opsrequest.md) + - [Reconfigure Overview](/docs/guides/memcached/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/memcached](/docs/examples/memcached) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `Memcached` database using a supported version by `KubeDB` operator. Then we are going to apply `MemcachedOpsRequest` to reconfigure its configuration. + +### Prepare Memcached Database + +Now, we are going to deploy a `Memcached` database with version `1.6.22`. + +### Deploy Memcached + +At first, we will create `secret` named mc-configuration containing required configuration settings. + +```yaml +apiVersion: v1 +stringData: + memcached.conf: | + --conn-limit=500 +kind: Secret +metadata: + name: mc-configuration + namespace: demo + resourceVersion: "4505" +``` +Here, `maxclients` is set to `500`, whereas the default value is `1024`. + +Now, we will apply the secret with custom configuration. +```bash +$ kubectl create -f mc-configuration +secret/mc-configuration created +``` + +In this section, we are going to create a Memcached object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `Memcahced` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: Memcached +metadata: + name: memcd-quickstart + namespace: demo +spec: + replicas: 1 + version: "1.6.22" + configuration: + secretName: mc-configuration + deletionPolicy: WipeOut +``` + +Let's create the `Memcached` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/memcached/reconfigure/sample-memcached-config.yaml +memcached.kubedb.com/memcd-quickstart created +``` + +Now, wait until `memcd-quickstart` has status `Ready`. i.e, + +```bash +$ kubectl get mc -n demo +NAME VERSION STATUS AGE +memcd-quickstart 1.6.22 Ready 23s +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +We will connect to `memcd-quickstart-0` pod from local-machine using port-frowarding. + +```bash +$ kubectl port-forward -n demo memcd-quickstart-0 11211 +Forwarding from 127.0.0.1:11211 -> 11211 +Forwarding from [::1]:11211 -> 11211 +``` + +Now, connect to the memcached server from a different terminal through `telnet`. + +```bash +$ telnet 127.0.0.1 11211 +Trying 127.0.0.1... +Connected to 127.0.0.1. +Escape character is '^]'. +stats +... +STAT max_connections 500 +... +END +``` + +As we can see from the configuration of running memcached, the value of `maxclients` has been set to `500`. + +### Reconfigure using new secret + +Now we will reconfigure this database to set `maxclients` to `2000`. + +At first, we will create `secret` named new-configuration containing required configuration settings. + +```yaml +apiVersion: v1 +stringData: + memcached.conf: | + --conn-limit=2000 +kind: Secret +metadata: + name: new-configuration + namespace: demo + resourceVersion: "4505" +``` +Here, `maxclients` is set to `2000`. + +Now, we will apply the secret with custom configuration. +```bash +$ kubectl create -f new-configuration +secret/new-configuration created +``` + +#### Create MemcachedOpsRequest + +Now, we will use this secret to replace the previous secret using a `MemcachedOpsRequest` CR. The `MemcachedOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MemcachedOpsRequest +metadata: + name: memcd-reconfig + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: memcd-quickstart + configuration: + configuration: + secretName: new-configuration +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `memcd-quickstart` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.configuration.secretName` specifies the name of the new secret. + +Let's create the `MemcachedOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/memcached/reconfigure/ops-request-reconfigure.yaml +memcachedopsrequest.ops.kubedb.com/memcd-reconfig created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `Memcached` object. + +Let's wait for `MemcachedOpsRequest` to be `Successful`. Run the following command to watch `MemcahcedOpsRequest` CR, + +```bash +$ watch kubectl get memcahcedopsrequest -n demo +Every 2.0s: kubectl get memcachedopsrequest -n demo +NAME TYPE STATUS AGE +memcd-reconfig Reconfigure Successful 1m +``` + +We can see from the above output that the `MemcachedOpsRequest` has succeeded. If we describe the `MemcachedOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe memcachedopsrequest -n demo memcd-reconfig +Name: memcd-reconfig +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: MemcachedOpsRequest +Metadata: + Creation Timestamp: 2024-09-02T11:59:59Z + Generation: 1 + Resource Version: 166566 + UID: bb4a1057-ccfa-49c9-8d07-e03cb631a0c9 +Spec: + Apply: IfReady + Configuration: + Config Secret: + Name: new-configuration + Database Ref: + Name: memcd-quickstart + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-09-02T11:59:59Z + Message: Memcached ops request is reconfiguring the cluster + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-09-02T12:00:02Z + Message: reconfiguring memcached + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-09-02T12:00:07Z + Message: evict pod; ConditionStatus:True; PodName:memcd-quickstart-0 + Observed Generation: 1 + Status: True + Type: EvictPod--memcd-quickstart-0 + Last Transition Time: 2024-09-02T12:00:07Z + Message: is pod ready; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: IsPodReady + Last Transition Time: 2024-09-02T12:00:12Z + Message: is pod ready; ConditionStatus:True; PodName:memcd-quickstart-0 + Observed Generation: 1 + Status: True + Type: IsPodReady--memcd-quickstart-0 + Last Transition Time: 2024-09-02T12:00:12Z + Message: Restarted pods after reconfiguration + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-09-02T12:00:13Z + Message: Successfully completed the modification process. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 51s KubeDB Ops-manager Operator Pausing Memcached demo/memcd-quickstart + Normal RestartPods 38s KubeDB Ops-manager Operator Restarted pods after reconfiguration + Normal ResumeDatabase 38s KubeDB Ops-manager Operator Resuming Memcached demo/memcd-quickstart + Normal ResumeDatabase 38s KubeDB Ops-manager Operator Successfully resumed Memcached demo/memcd-quickstart + Normal Successful 38s KubeDB Ops-manager Operator Successfully Reconfigured Database + +``` + +Now need to check the new configuration we have provided. + +Now, wait until `memcd-quickstart` has status `Ready`. i.e, + +```bash +$ kubectl get mc -n demo +NAME VERSION STATUS AGE +memcd-quickstart 1.6.22 Ready 20s +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +We will connect to `memcd-quickstart-0` pod from local-machine using port-frowarding. + +```bash +$ kubectl port-forward -n demo memcd-quickstart-0 11211 +Forwarding from 127.0.0.1:11211 -> 11211 +Forwarding from [::1]:11211 -> 11211 +``` + +Now, connect to the memcached server from a different terminal through `telnet`. + +```bash +$ telnet 127.0.0.1 11211 +Trying 127.0.0.1... +Connected to 127.0.0.1. +Escape character is '^]'. +stats +... +STAT max_connections 2000 +... +END +``` + +As we can see from the configuration of running memcached, the value of `maxclients` has been updated to `2000`. + +As we can see from the configuration of running memcached, the value of `maxclients` has been changed from `500` to `2000`. So, the reconfiguration of the database is successful. + + +### Reconfigure using apply config + +Now we will reconfigure this database again to set `maxclients` to `3000`. This time we won't use a new secret. We will use the `applyConfig` field of the `MemcachedOpsRequest`. This will merge the new config in the existing secret. + +#### Create MemcachedOpsRequest + +Now, we will use the new configuration in the `data` field in the `MemcahcedOpsRequest` CR. The `MemcachedOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MemcachedOpsRequest +metadata: + name: memcd-reconfig + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: memcd-quickstart + configuration: + applyConfig: + memcached.conf: | + --conn-limit=3000 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `memcd-quickstart` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `MemcachedOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/memcached/reconfigure/ops-request-reconfigure.yaml +memcachedopsrequest.ops.kubedb.com/memcd-reconfig created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `MemcachedOpsRequest` to be `Successful`. Run the following command to watch `MemcachedOpsRequest` CR, + +```bash +$ watch kubectl get memcachedopsrequest -n demo +Every 2.0s: kubectl get memcachedopsrequest -n demo +NAME TYPE STATUS AGE +memcd-apply-reconfig Reconfigure Successful 38s +``` + +We can see from the above output that the `MemcachedOpsRequest` has succeeded. If we describe the `MemcahcedOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe memcachedopsrequest -n demo memcd-apply-reconfig +Name: memcd-apply-reconfig +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: MemcachedOpsRequest +Metadata: + Creation Timestamp: 2024-09-03T06:02:41Z + Generation: 1 + Resource Version: 178039 + UID: d1f90151-abeb-4035-87f4-e2bc89c35b89 +Spec: + Apply: IfReady + Configuration: + Apply Config: + memcached.conf: --conn-limit=3000 + + Database Ref: + Name: memcd-quickstart + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-09-03T06:02:41Z + Message: Memcached ops request is reconfiguring the cluster + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-09-03T06:02:44Z + Message: reconfiguring memcached + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-09-03T06:02:49Z + Message: evict pod; ConditionStatus:True; PodName:memcd-quickstart-0 + Observed Generation: 1 + Status: True + Type: EvictPod--memcd-quickstart-0 + Last Transition Time: 2024-09-03T06:02:49Z + Message: is pod ready; ConditionStatus:False + Observed Generation: 1 + Status: False + Type: IsPodReady + Last Transition Time: 2024-09-03T06:02:54Z + Message: is pod ready; ConditionStatus:True; PodName:memcd-quickstart-0 + Observed Generation: 1 + Status: True + Type: IsPodReady--memcd-quickstart-0 + Last Transition Time: 2024-09-03T06:02:54Z + Message: Restarted pods after reconfiguration + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-09-03T06:02:54Z + Message: Successfully completed the modification process. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 26s KubeDB Ops-manager Operator Pausing Memcached demo/memcd-quickstart + Normal RestartPods 13s KubeDB Ops-manager Operator Restarted pods after reconfiguration + Normal ResumeDatabase 13s KubeDB Ops-manager Operator Resuming Memcached demo/memcd-quickstart + Normal ResumeDatabase 13s KubeDB Ops-manager Operator Successfully resumed Memcached demo/memcd-quickstart + Normal Successful 13s KubeDB Ops-manager Operator Successfully Reconfigured Database + +``` + +Now let's check the new configuration we have provided. + +We will connect to `memcd-quickstart-0` pod from local-machine using port-frowarding. + +```bash +$ kubectl port-forward -n demo memcd-quickstart-0 11211 +Forwarding from 127.0.0.1:11211 -> 11211 +Forwarding from [::1]:11211 -> 11211 +``` + +Now, connect to the memcached server from a different terminal through `telnet`. + +```bash +$ telnet 127.0.0.1 11211 +Trying 127.0.0.1... +Connected to 127.0.0.1. +Escape character is '^]'. +stats +... +STAT max_connections 3000 +... +END +``` + +As we can see from the configuration of running memcached, the value of `maxclients` has been changed from `2000` to `3000`. So, the reconfiguration of the database using the `applyConfig` field is successful. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete mc -n demo memcd-quickstart +kubectl delete memcachedopsrequest -n demo memcd-reconfig memcd-apply-reconfig +``` \ No newline at end of file diff --git a/docs/guides/mongodb/concepts/mongodb.md b/docs/guides/mongodb/concepts/mongodb.md index 25b1092efa..af32b0de4c 100644 --- a/docs/guides/mongodb/concepts/mongodb.md +++ b/docs/guides/mongodb/concepts/mongodb.md @@ -109,8 +109,8 @@ spec: labels: app: kubedb interval: 10s - configSecret: - name: mg-custom-config + configuration: + secretName: mg-custom-config podTemplate: metadata: annotations: @@ -167,8 +167,8 @@ spec: requests: cpu: "200m" memory: "200Mi" - configSecret: - name: another-config + configuration: + secretName: another-config allowedSchemas: namespaces: from: Selector @@ -285,7 +285,7 @@ When `spec.shardTopology` is set, the following fields needs to be empty, otherw - `spec.replicas` - `spec.podTemplate` -- `spec.configSecret` +- `spec.configuration` - `spec.storage` - `spec.ephemeralStorage` @@ -300,7 +300,7 @@ Available configurable fields: - `shards` represents number of shards for a mongodb deployment. Each shard is deployed as a [replicaset](/docs/guides/mongodb/clustering/replication_concept.md). - `replicas` represents number of replicas of each shard replicaset. - `prefix` represents the prefix of each shard node. -- `configSecret` is an optional field to provide custom configuration file for shards (i.e. mongod.cnf). If specified, this file will be used as configuration file otherwise a default configuration file will be used. See below to know about [spec.configSecret](/docs/guides/mongodb/concepts/mongodb.md#specconfigsecret) in details. +- `configSecret` is an optional field to provide custom configuration file for shards (i.e. mongod.cnf). If specified, this file will be used as configuration file otherwise a default configuration file will be used. See below to know about [spec.configuration](/docs/guides/mongodb/concepts/mongodb.md#specconfigsecret) in details. - `podTemplate` is an optional configuration for pods. See below to know about [spec.podTemplate](/docs/guides/mongodb/concepts/mongodb.md#specpodtemplate) in details. - `storage` to specify pvc spec for each node of sharding. You can specify any StorageClass available in your cluster with appropriate resource requests. See below to know about [spec.storage](/docs/guides/mongodb/concepts/mongodb.md#specstorage) in details. - `ephemeralStorage` to specify the configuration of ephemeral storage type, If you want to use volatile temporary storage attached to your instances which is only present during the running lifetime of the instance. @@ -313,7 +313,7 @@ Available configurable fields: - `replicas` represents number of replicas for configServer replicaset. Here, configServer is deployed as a replicaset of mongodb. - `prefix` represents the prefix of configServer nodes. -- `configSecret` is an optional field to provide custom configuration file for config server (i.e mongod.cnf). If specified, this file will be used as configuration file otherwise a default configuration file will be used. See below to know about [spec.configSecret](/docs/guides/mongodb/concepts/mongodb.md#specconfigsecret) in details. +- `configSecret` is an optional field to provide custom configuration file for config server (i.e mongod.cnf). If specified, this file will be used as configuration file otherwise a default configuration file will be used. See below to know about [spec.configuration](/docs/guides/mongodb/concepts/mongodb.md#specconfigsecret) in details. - `podTemplate` is an optional configuration for pods. See below to know about [spec.podTemplate](/docs/guides/mongodb/concepts/mongodb.md#specpodtemplate) in details. - `storage` to specify pvc spec for each node of configServer. You can specify any StorageClass available in your cluster with appropriate resource requests. See below to know about [spec.storage](/docs/guides/mongodb/concepts/mongodb.md#specstorage) in details. - `ephemeralStorage` to specify the configuration of ephemeral storage type, If you want to use volatile temporary storage attached to your instances which is only present during the running lifetime of the instance. @@ -326,7 +326,7 @@ Available configurable fields: - `replicas` represents number of replicas of `Mongos` instance. Here, Mongos is deployed as stateless (deployment) instance. - `prefix` represents the prefix of mongos nodes. -- `configSecret` is an optional field to provide custom configuration file for mongos (i.e. mongod.cnf). If specified, this file will be used as configuration file otherwise a default configuration file will be used. See below to know about [spec.configSecret](/docs/guides/mongodb/concepts/mongodb.md#specconfigsecret) in details. +- `configSecret` is an optional field to provide custom configuration file for mongos (i.e. mongod.cnf). If specified, this file will be used as configuration file otherwise a default configuration file will be used. See below to know about [spec.configuration](/docs/guides/mongodb/concepts/mongodb.md#specconfigsecret) in details. - `podTemplate` is an optional configuration for pods. See below to know about [spec.podTemplate](/docs/guides/mongodb/concepts/mongodb.md#specpodtemplate) in details. ### spec.sslMode @@ -463,15 +463,15 @@ MongoDB managed by KubeDB can be monitored with builtin-Prometheus and Prometheu - [Monitor MongoDB with builtin Prometheus](/docs/guides/mongodb/monitoring/using-builtin-prometheus.md) - [Monitor MongoDB with Prometheus operator](/docs/guides/mongodb/monitoring/using-prometheus-operator.md) -### spec.configSecret +### spec.configuration -`spec.configSecret` is an optional field that allows users to provide custom configuration for MongoDB. You can provide the custom configuration in a secret, then you can specify the secret name `spec.configSecret.name`. +`spec.configuration` is an optional field that allows users to provide custom configuration for MongoDB. You can provide the custom configuration in a secret, then you can specify the secret name `spec.configuration.secretName`. > Please note that, the secret key needs to be `mongod.conf`. To learn more about how to use a custom configuration file see [here](/docs/guides/mongodb/configuration/using-config-file.md). -NB. If `spec.shardTopology` is set, then `spec.configSecret` needs to be empty. Instead use `spec.shardTopology..configSecret` +NB. If `spec.shardTopology` is set, then `spec.configuration` needs to be empty. Instead use `spec.shardTopology..configSecret` ### spec.podTemplate @@ -645,7 +645,7 @@ Indicates that the database is halted and all offshoot Kubernetes resources exce ### spec.arbiter If `spec.arbiter` is not null, there will be one arbiter pod on each of the replicaset structure, including shards. It has two fields. - `spec.arbiter.podTemplate` defines the arbiter-pod's template. See [spec.podTemplate](/docs/guides/mongodb/configuration/using-config-file.md) part for more details of this. -- `spec.arbiter.configSecret` is an optional field that allows users to provide custom configurations for MongoDB arbiter. You just need to refer the configuration secret in `spec.arbiter.configSecret.name` field. +- `spec.arbiter.configSecret` is an optional field that allows users to provide custom configurations for MongoDB arbiter. You just need to refer the configuration secret in `spec.arbiter.configuration.secretName` field. > Please note that, the secret key needs to be `mongod.conf`. N.B. If `spec.replicaset` & `spec.shardTopology` both is empty, `spec.arbiter` has to be empty too. diff --git a/docs/guides/mongodb/concepts/mongodb.md.bak b/docs/guides/mongodb/concepts/mongodb.md.bak new file mode 100644 index 0000000000..3a048b5c39 --- /dev/null +++ b/docs/guides/mongodb/concepts/mongodb.md.bak @@ -0,0 +1,676 @@ +--- +title: MongoDB CRD +menu: + docs_{{ .version }}: + identifier: mg-mongodb-concepts + name: MongoDB + parent: mg-concepts-mongodb + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# MongoDB + +## What is MongoDB + +`MongoDB` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for [MongoDB](https://www.mongodb.com/) in a Kubernetes native way. You only need to describe the desired database configuration in a MongoDB object, and the KubeDB operator will create Kubernetes objects in the desired state for you. + +## MongoDB Spec + +As with all other Kubernetes objects, a MongoDB needs `apiVersion`, `kind`, and `metadata` fields. It also needs a `.spec` section. Below is an example MongoDB object. + +```yaml +apiVersion: kubedb.com/v1 +kind: MongoDB +metadata: + name: mgo1 + namespace: demo +spec: + autoOps: + disabled: true + version: "4.4.26" + replicas: 3 + authSecret: + kind: Secret + name: mgo1-auth + externallyManaged: false + replicaSet: + name: rs0 + horizons: + dns: kubedb.cloud + pods: + - mongo-0.kubedb.cloud + - mongo-1.kubedb.cloud + - mongo-2.kubedb.cloud + shardTopology: + configServer: + podTemplate: {} + replicas: 3 + storage: + resources: + requests: + storage: 1Gi + storageClassName: standard + mongos: + podTemplate: {} + replicas: 2 + shard: + podTemplate: {} + replicas: 3 + shards: 3 + storage: + resources: + requests: + storage: 1Gi + storageClassName: standard + sslMode: requireSSL + tls: + issuerRef: + name: mongo-ca-issuer + kind: Issuer + apiGroup: "cert-manager.io" + certificates: + - alias: client + subject: + organizations: + - kubedb + emailAddresses: + - abc@appscode.com + - alias: server + subject: + organizations: + - kubedb + emailAddresses: + - abc@appscode.com + clusterAuthMode: x509 + storageType: "Durable" + storageEngine: wiredTiger + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + ephemeralStorage: + medium: "Memory" + sizeLimit: 500Mi + init: + script: + configMap: + name: mg-init-script + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + app: kubedb + interval: 10s + configuration: + secretName: mg-custom-config + podTemplate: + metadata: + annotations: + passMe: ToDatabasePod + labels: + thisLabel: willGoToPod + controller: + annotations: + passMe: ToPetSet + labels: + thisLabel: willGoToSts + spec: + serviceAccountName: my-service-account + schedulerName: my-scheduler + nodeSelector: + disktype: ssd + imagePullSecrets: + - name: myregistrykey + containers: + - name: mongo + args: + - --maxConns=100 + env: + - name: MONGO_INITDB_DATABASE + value: myDB + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: replication-mode-detector + resources: + requests: + cpu: "300m" + memory: 500Mi + securityContext: + runAsUser: 1001 + serviceTemplates: + - alias: primary + spec: + type: NodePort + ports: + - name: primary + port: 27017 + nodePort: 300006 + deletionPolicy: Halt + halted: false + arbiter: + podTemplate: + spec: + resources: + requests: + cpu: "200m" + memory: "200Mi" + configuration: + secretName: another-config + allowedSchemas: + namespaces: + from: Selector + selector: + matchExpressions: + - {key: kubernetes.io/metadata.name, operator: In, values: [dev]} + selector: + matchLabels: + "schema.kubedb.com": "mongo" + healthChecker: + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 2 + disableWriteCheck: false +``` + +### spec.autoOps +AutoOps is an optional field to control the generation of versionUpdate & TLS-related recommendations. + +### spec.version + +`spec.version` is a required field specifying the name of the [MongoDBVersion](/docs/guides/mongodb/concepts/catalog.md) crd where the docker images are specified. Currently, when you install KubeDB, it creates the following `MongoDBVersion` resources, + +- `3.4.17-v1`, `3.4.22-v1` +- `3.6.13-v1`, `4.4.26`, +- `4.0.3-v1`, `4.4.26`, `4.0.11-v1`, +- `4.1.4-v1`, `4.1.7-v3`, `4.4.26` +- `4.4.26`, `4.4.26` +- `5.0.2`, `5.0.3` +- `percona-3.6.18` +- `percona-4.0.10`, `percona-4.2.7`, `percona-4.4.10` + +### spec.replicas + +`spec.replicas` the number of members(primary & secondary) in mongodb replicaset. + +If `spec.shardTopology` is set, then `spec.replicas` needs to be empty. Instead use `spec.shardTopology..replicas` + +If both `spec.replicaset` and `spec.shardTopology` is not set, then `spec.replicas` can be value `1`. + +KubeDB uses `PodDisruptionBudget` to ensure that majority of these replicas are available during [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions) so that quorum is maintained. + +### spec.authSecret + +`spec.authSecret` is an optional field that points to a Secret used to hold credentials for `mongodb` superuser. If not set, KubeDB operator creates a new Secret `{mongodb-object-name}-auth` for storing the password for `mongodb` superuser for each MongoDB object. + +We can use this field in 3 mode. +1. Using an external secret. In this case, You need to create an auth secret first with required fields, then specify the secret name when creating the MongoDB object using `spec.authSecret.name` & set `spec.authSecret.externallyManaged` to true. +```yaml +authSecret: + name: + externallyManaged: true +``` + +2. Specifying the secret name only. In this case, You need to specify the secret name when creating the MongoDB object using `spec.authSecret.name`. `externallyManaged` is by default false. +```yaml +authSecret: + name: +``` + +3. Let KubeDB do everything for you. In this case, no work for you. + +AuthSecret contains a `user` key and a `password` key which contains the `username` and `password` respectively for `mongodb` superuser. + +Example: + +```bash +$ kubectl create secret generic mgo1-auth -n demo \ +--from-literal=username=jhon-doe \ +--from-literal=password=6q8u_2jMOW-OOZXk +secret "mgo1-auth" created +``` + +```yaml +apiVersion: v1 +data: + password: NnE4dV8yak1PVy1PT1pYaw== + username: amhvbi1kb2U= +kind: Secret +metadata: + name: mgo1-auth + namespace: demo +type: Opaque +``` + +Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). + +### spec.replicaSet + +`spec.replicaSet` represents the configuration for replicaset. When `spec.replicaSet` is set, KubeDB will deploy a mongodb replicaset where number of replicaset member is `spec.replicas`. + +`.spec.replicaSet.horizons` configures the MongoDB replica set for external connections, specifying the primary DNS (`dns`) and pod DNS names (`pods`) for SRV records used in `mongodb+srv` connection strings. + - `dns` denotes the primary dns name for `srv` of the external mongodb cluster. + - `pods` denotes the list of pods DNS names of external mongodb cluster replicaset members. + +- `name` denotes the name of mongodb replicaset. +NB. If `spec.shardTopology` is set, then `spec.replicaset` needs to be empty. + +### spec.keyFileSecret +`keyFileSecret.name` denotes the name of the secret that contains the `key.txt`, which provides the security between replicaset members using internal authentication. See [Keyfile Authentication](https://docs.mongodb.com/manual/tutorial/enforce-keyfile-access-control-in-existing-replica-set/) for more information. +It will make impact only if the ClusterAuthMode is `keyFile` or `sendKeyFile`. + +### spec.shardTopology + +`spec.shardTopology` represents the topology configuration for sharding. + +Available configurable fields: + +- shard +- configServer +- mongos + +When `spec.shardTopology` is set, the following fields needs to be empty, otherwise validating webhook will throw error. + +- `spec.replicas` +- `spec.podTemplate` +- `spec.configSecret` +- `spec.storage` +- `spec.ephemeralStorage` + +KubeDB uses `PodDisruptionBudget` to ensure that majority of the replicas of these shard components are available during [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions) so that quorum and data integrity is maintained. + +#### spec.shardTopology.shard + +`shard` represents configuration for Shard component of mongodb. + +Available configurable fields: + +- `shards` represents number of shards for a mongodb deployment. Each shard is deployed as a [replicaset](/docs/guides/mongodb/clustering/replication_concept.md). +- `replicas` represents number of replicas of each shard replicaset. +- `prefix` represents the prefix of each shard node. +- `configSecret` is an optional field to provide custom configuration file for shards (i.e. mongod.cnf). If specified, this file will be used as configuration file otherwise a default configuration file will be used. See below to know about [spec.configSecret](/docs/guides/mongodb/concepts/mongodb.md#specconfigsecret) in details. +- `podTemplate` is an optional configuration for pods. See below to know about [spec.podTemplate](/docs/guides/mongodb/concepts/mongodb.md#specpodtemplate) in details. +- `storage` to specify pvc spec for each node of sharding. You can specify any StorageClass available in your cluster with appropriate resource requests. See below to know about [spec.storage](/docs/guides/mongodb/concepts/mongodb.md#specstorage) in details. +- `ephemeralStorage` to specify the configuration of ephemeral storage type, If you want to use volatile temporary storage attached to your instances which is only present during the running lifetime of the instance. + +#### spec.shardTopology.configServer + +`configServer` represents configuration for ConfigServer component of mongodb. + +Available configurable fields: + +- `replicas` represents number of replicas for configServer replicaset. Here, configServer is deployed as a replicaset of mongodb. +- `prefix` represents the prefix of configServer nodes. +- `configSecret` is an optional field to provide custom configuration file for config server (i.e mongod.cnf). If specified, this file will be used as configuration file otherwise a default configuration file will be used. See below to know about [spec.configSecret](/docs/guides/mongodb/concepts/mongodb.md#specconfigsecret) in details. +- `podTemplate` is an optional configuration for pods. See below to know about [spec.podTemplate](/docs/guides/mongodb/concepts/mongodb.md#specpodtemplate) in details. +- `storage` to specify pvc spec for each node of configServer. You can specify any StorageClass available in your cluster with appropriate resource requests. See below to know about [spec.storage](/docs/guides/mongodb/concepts/mongodb.md#specstorage) in details. +- `ephemeralStorage` to specify the configuration of ephemeral storage type, If you want to use volatile temporary storage attached to your instances which is only present during the running lifetime of the instance. + +#### spec.shardTopology.mongos + +`mongos` represents configuration for Mongos component of mongodb. + +Available configurable fields: + +- `replicas` represents number of replicas of `Mongos` instance. Here, Mongos is deployed as stateless (deployment) instance. +- `prefix` represents the prefix of mongos nodes. +- `configSecret` is an optional field to provide custom configuration file for mongos (i.e. mongod.cnf). If specified, this file will be used as configuration file otherwise a default configuration file will be used. See below to know about [spec.configSecret](/docs/guides/mongodb/concepts/mongodb.md#specconfigsecret) in details. +- `podTemplate` is an optional configuration for pods. See below to know about [spec.podTemplate](/docs/guides/mongodb/concepts/mongodb.md#specpodtemplate) in details. + +### spec.sslMode + +Enables TLS/SSL or mixed TLS/SSL used for all network connections. The value of [`sslMode`](https://docs.mongodb.com/manual/reference/program/mongod/#cmdoption-mongod-sslmode) field can be one of the following: + +| Value | Description | +| :----------: | :----------------------------------------------------------------------------------------------------------------------------- | +| `disabled` | The server does not use TLS/SSL. | +| `allowSSL` | Connections between servers do not use TLS/SSL. For incoming connections, the server accepts both TLS/SSL and non-TLS/non-SSL. | +| `preferSSL` | Connections between servers use TLS/SSL. For incoming connections, the server accepts both TLS/SSL and non-TLS/non-SSL. | +| `requireSSL` | The server uses and accepts only TLS/SSL encrypted connections. | + +### spec.tls + +`spec.tls` specifies the TLS/SSL configurations for the MongoDB. KubeDB uses [cert-manager](https://cert-manager.io/) v1 api to provision and manage TLS certificates. + +The following fields are configurable in the `spec.tls` section: + +- `issuerRef` is a reference to the `Issuer` or `ClusterIssuer` CR of [cert-manager](https://cert-manager.io/docs/concepts/issuer/) that will be used by `KubeDB` to generate necessary certificates. + + - `apiGroup` is the group name of the resource that is being referenced. Currently, the only supported value is `cert-manager.io`. + - `kind` is the type of resource that is being referenced. KubeDB supports both `Issuer` and `ClusterIssuer` as values for this field. + - `name` is the name of the resource (`Issuer` or `ClusterIssuer`) being referenced. + +- `certificates` (optional) are a list of certificates used to configure the server and/or client certificate. It has the following fields: + - `alias` represents the identifier of the certificate. It has the following possible value: + - `server` is used for server certificate identification. + - `client` is used for client certificate identification. + - `metrics-exporter` is used for metrics exporter certificate identification. + - `secretName` (optional) specifies the k8s secret name that holds the certificates. + > This field is optional. If the user does not specify this field, the default secret name will be created in the following format: `--cert`. + + - `subject` (optional) specifies an `X.509` distinguished name. It has the following possible field, + - `organizations` (optional) are the list of different organization names to be used on the Certificate. + - `organizationalUnits` (optional) are the list of different organization unit name to be used on the Certificate. + - `countries` (optional) are the list of country names to be used on the Certificate. + - `localities` (optional) are the list of locality names to be used on the Certificate. + - `provinces` (optional) are the list of province names to be used on the Certificate. + - `streetAddresses` (optional) are the list of a street address to be used on the Certificate. + - `postalCodes` (optional) are the list of postal code to be used on the Certificate. + - `serialNumber` (optional) is a serial number to be used on the Certificate. + You can find more details from [Here](https://golang.org/pkg/crypto/x509/pkix/#Name) + - `duration` (optional) is the period during which the certificate is valid. + - `renewBefore` (optional) is a specifiable time before expiration duration. + - `dnsNames` (optional) is a list of subject alt names to be used in the Certificate. + - `ipAddresses` (optional) is a list of IP addresses to be used in the Certificate. + - `uris` (optional) is a list of URI Subject Alternative Names to be set in the Certificate. + - `emailAddresses` (optional) is a list of email Subject Alternative Names to be set in the Certificate. + - `privateKey` (optional) specifies options to control private keys used for the Certificate. + - `encoding` (optional) is the private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8, respectively. It defaults to PKCS#1 if not specified. + +### spec.clusterAuthMode + +The authentication mode used for cluster authentication. This option can have one of the following values: + +| Value | Description | +| :-----------: | :------------------------------------------------------------------------------------------------------------------------------- | +| `keyFile` | Use a keyfile for authentication. Accept only keyfiles. | +| `sendKeyFile` | For rolling update purposes. Send a keyfile for authentication but can accept both keyfiles and x.509 certificates. | +| `sendX509` | For rolling update purposes. Send the x.509 certificate for authentication but can accept both keyfiles and x.509 certificates. | +| `x509` | Recommended. Send the x.509 certificate for authentication and accept only x.509 certificates. | + +### spec.storageType + +`spec.storageType` is an optional field that specifies the type of storage to use for database. It can be either `Durable` or `Ephemeral`. The default value of this field is `Durable`. If `Ephemeral` is used then KubeDB will create MongoDB database using [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) volume. +In this case, you don't have to specify `spec.storage` field. Specify `spec.ephemeralStorage` spec instead. + +### spec.storageEngine + +`spec.storageEngine` is an optional field that specifies the type of storage engine is going to be used by mongodb. There are two types of storage engine, `wiredTiger` and `inMemory`. Default value of storage engine is `wiredTiger`. `inMemory` storage engine is only supported by the percona variant of mongodb, i.e. the version that has the `percona-` prefix in the mongodb-version name. + +### spec.storage + +Since 0.9.0-rc.0, If you set `spec.storageType:` to `Durable`, then `spec.storage` is a required field that specifies the StorageClass of PVCs dynamically allocated to store data for the database. This storage spec will be passed to the PetSet created by KubeDB operator to run database pods. You can specify any StorageClass available in your cluster with appropriate resource requests. + +- `spec.storage.storageClassName` is the name of the StorageClass used to provision PVCs. PVCs don’t necessarily have to request a class. A PVC with its storageClassName set equal to "" is always interpreted to be requesting a PV with no class, so it can only be bound to PVs with no class (no annotation or one set equal to ""). A PVC with no storageClassName is not quite the same and is treated differently by the cluster depending on whether the DefaultStorageClass admission plugin is turned on. +- `spec.storage.accessModes` uses the same conventions as Kubernetes PVCs when requesting storage with specific access modes. +- `spec.storage.resources` can be used to request specific quantities of storage. This follows the same resource model used by PVCs. + +To learn how to configure `spec.storage`, please visit the links below: + +- https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + +NB. If `spec.shardTopology` is set, then `spec.storage` needs to be empty. Instead use `spec.shardTopology..storage` + +### spec.ephemeralStorage +Use this field to specify the configuration of ephemeral storage type, If you want to use volatile temporary storage attached to your instances which is only present during the running lifetime of the instance. +- `spec.ephemeralStorage.medium` refers to the name of the storage medium. +- `spec.ephemeralStorage.sizeLimit` to specify the sizeLimit of the emptyDir volume. + +For more details of these two fields, see [EmptyDir struct](https://github.com/kubernetes/api/blob/ed22bb34e3bbae9e2fafba51d66ee3f68ee304b2/core/v1/types.go#L700-L715) + +### spec.init + +`spec.init` is an optional section that can be used to initialize a newly created MongoDB database. MongoDB databases can be initialized by Script. + +`Initialize from Snapshot` is still not supported. + +#### Initialize via Script + +To initialize a MongoDB database using a script (shell script, js script), set the `spec.init.script` section when creating a MongoDB object. It will execute files alphabetically with extensions `.sh` and `.js` that are found in the repository. script must have the following information: + +- [VolumeSource](https://kubernetes.io/docs/concepts/storage/volumes/#types-of-volumes): Where your script is loaded from. + +Below is an example showing how a script from a configMap can be used to initialize a MongoDB database. + +```yaml +apiVersion: kubedb.com/v1 +kind: MongoDB +metadata: + name: mgo1 + namespace: demo +spec: + version: 4.4.26 + init: + script: + configMap: + name: mongodb-init-script +``` + +In the above example, KubeDB operator will launch a Job to execute all js script of `mongodb-init-script` in alphabetical order once PetSet pods are running. For more details tutorial on how to initialize from script, please visit [here](/docs/guides/mongodb/initialization/using-script.md). + +These are the fields of `spec.init` which you can make use of : +- `spec.init.initialized` indicating that this database has been initialized or not. `false` by default. +- `spec.init.script.scriptPath` to specify where all the init scripts should be mounted. +- `spec.init.script.` as described in the above example. To see all the volumeSource options go to [VolumeSource](https://github.com/kubernetes/api/blob/ed22bb34e3bbae9e2fafba51d66ee3f68ee304b2/core/v1/types.go#L49). +- `spec.init.waitForInitialRestore` to tell the operator if it should wait for the initial restore process or not. + +### spec.monitor + +MongoDB managed by KubeDB can be monitored with builtin-Prometheus and Prometheus operator out-of-the-box. To learn more, + +- [Monitor MongoDB with builtin Prometheus](/docs/guides/mongodb/monitoring/using-builtin-prometheus.md) +- [Monitor MongoDB with Prometheus operator](/docs/guides/mongodb/monitoring/using-prometheus-operator.md) + +### spec.configSecret + +`spec.configSecret` is an optional field that allows users to provide custom configuration for MongoDB. You can provide the custom configuration in a secret, then you can specify the secret name `spec.configuration.secretName`. + +> Please note that, the secret key needs to be `mongod.conf`. + +To learn more about how to use a custom configuration file see [here](/docs/guides/mongodb/configuration/using-config-file.md). + +NB. If `spec.shardTopology` is set, then `spec.configSecret` needs to be empty. Instead use `spec.shardTopology..configSecret` + +### spec.podTemplate + +KubeDB allows providing a template for database pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the PetSet created for MongoDB database. + +KubeDB accept following fields to set in `spec.podTemplate:` + +- metadata: + - annotations (pod's annotation) + - labels (pod's labels) +- controller: + - annotations (petset's annotation) + - labels (petset's labels) +- spec: + - containers + - volumes + - podPlacementPolicy + - initContainers + - imagePullSecrets + - nodeSelector + - serviceAccountName + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + +You can check out the full list [here](https://github.com/kmodules/offshoot-api/blob/master/api/v2/types.go#L26C1-L279C1). +Uses of some field of `spec.podTemplate` is described below, + +NB. If `spec.shardTopology` is set, then `spec.podTemplate` needs to be empty. Instead use `spec.shardTopology..podTemplate` + +#### spec.podTemplate.spec.tolerations + +The `spec.podTemplate.spec.tolerations` is an optional field. This can be used to specify the pod's tolerations. + +#### spec.podTemplate.spec.volumes + +The `spec.podTemplate.spec.volumes` is an optional field. This can be used to provide the list of volumes that can be mounted by containers belonging to the pod. + +#### spec.podTemplate.spec.podPlacementPolicy + +`spec.podTemplate.spec.podPlacementPolicy` is an optional field. This can be used to provide the reference of the `podPlacementPolicy`. `name` of the podPlacementPolicy is referred under this attribute. This will be used by our Petset controller to place the db pods throughout the region, zone & nodes according to the policy. It utilizes kubernetes affinity & podTopologySpreadContraints feature to do so. +```yaml +spec: + podPlacementPolicy: + name: default +``` + + + +#### spec.podTemplate.spec.containers + +The `spec.podTemplate.spec.containers` can be used to provide the list containers and their configurations for to the database pod. some of the fields are described below, + +##### spec.podTemplate.spec.containers[].name +The `spec.podTemplate.spec.containers[].name` field used to specify the name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + +##### spec.podTemplate.spec.containers[].args +`spec.podTemplate.spec.containers[].args` is an optional field. This can be used to provide additional arguments to database installation. + +##### spec.podTemplate.spec.containers[].env + +`spec.podTemplate.spec.containers[].env` is an optional field that specifies the environment variables to pass to the MongoDB docker image. To know about supported environment variables, please visit [here](https://hub.docker.com/r/_/mongo/). + +Note that, KubeDB does not allow `MONGO_INITDB_ROOT_USERNAME` and `MONGO_INITDB_ROOT_PASSWORD` environment variables to set in `spec.podTemplate.spec.env`. If you want to use custom superuser and password, please use `spec.authSecret` instead described earlier. + +If you try to set `MONGO_INITDB_ROOT_USERNAME` or `MONGO_INITDB_ROOT_PASSWORD` environment variable in MongoDB crd, KubeDB operator will reject the request with following error, + +```ini +Error from server (Forbidden): error when creating "./mongodb.yaml": admission webhook "mongodb.validators.kubedb.com" denied the request: environment variable MONGO_INITDB_ROOT_USERNAME is forbidden to use in MongoDB spec +``` + +Also, note that KubeDB does not allow updating the environment variables as updating them does not have any effect once the database is created. If you try to update environment variables, KubeDB operator will reject the request with following error, + +```ini +Error from server (BadRequest): error when applying patch: +... +for: "./mongodb.yaml": admission webhook "mongodb.validators.kubedb.com" denied the request: precondition failed for: +...At least one of the following was changed: + apiVersion + kind + name + namespace + spec.ReplicaSet + spec.authSecret + spec.init + spec.storageType + spec.storage + spec.podTemplate.spec.nodeSelector + spec.podTemplate.spec.env +``` + +##### spec.podTemplate.spec.containers[].resources + +`spec.podTemplate.spec.containers[].resources` is an optional field. This can be used to request compute resources required by containers of the database pods. To learn more, visit [here](http://kubernetes.io/docs/user-guide/compute-resources/). + +#### spec.podTemplate.spec.imagePullSecret + +`KubeDB` provides the flexibility of deploying MongoDB database from a private Docker registry. `spec.podTemplate.spec.imagePullSecrets` is an optional field that points to secrets to be used for pulling docker image if you are using a private docker registry. To learn how to deploy MongoDB from a private registry, please visit [here](/docs/guides/mongodb/private-registry/using-private-registry.md). + +#### spec.podTemplate.spec.nodeSelector + +`spec.podTemplate.spec.nodeSelector` is an optional field that specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). To learn more, see [here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) . + +#### spec.podTemplate.spec.serviceAccountName + +`serviceAccountName` is an optional field supported by KubeDB Operator (version 0.13.0 and higher) that can be used to specify a custom service account to fine tune role based access control. + +If this field is left empty, the KubeDB operator will create a service account name matching MongoDB crd name. Role and RoleBinding that provide necessary access permissions will also be generated automatically for this service account. + +If a service account name is given, but there's no existing service account by that name, the KubeDB operator will create one, and Role and RoleBinding that provide necessary access permissions will also be generated for this service account. + +If a service account name is given, and there's an existing service account by that name, the KubeDB operator will use that existing service account. Since this service account is not managed by KubeDB, users are responsible for providing necessary access permissions manually. Follow the guide [here](/docs/guides/mongodb/custom-rbac/using-custom-rbac.md) to grant necessary permissions in this scenario. + +### spec.serviceTemplates + +You can also provide template for the services created by KubeDB operator for MongoDB database through `spec.serviceTemplates`. This will allow you to set the type and other properties of the services. + +KubeDB allows following fields to set in `spec.serviceTemplates`: +- `alias` represents the identifier of the service. It has the following possible value: + - `primary` is used for the primary service identification. + - `standby` is used for the secondary service identification. + - `stats` is used for the exporter service identification. +- metadata: + - labels + - annotations +- spec: + - type + - ports + - clusterIP + - externalIPs + - loadBalancerIP + - loadBalancerSourceRanges + - externalTrafficPolicy + - healthCheckNodePort + - sessionAffinityConfig + +See [here](https://github.com/kmodules/offshoot-api/blob/kubernetes-1.21.1/api/v1/types.go#L237) to understand these fields in detail. + +### spec.deletionPolicy + +`deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `MongoDB` crd or which resources KubeDB should keep or delete when you delete `MongoDB` crd. KubeDB provides following four termination policies: + +- DoNotTerminate +- Halt +- Delete (`Default`) +- WipeOut + +When `deletionPolicy` is `DoNotTerminate`, KubeDB takes advantage of `ValidationWebhook` feature in Kubernetes 1.9.0 or later clusters to implement `DoNotTerminate` feature. If admission webhook is enabled, `DoNotTerminate` prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. + +Following table show what KubeDB does when you delete MongoDB crd for different termination policies, + +| Behavior | DoNotTerminate | Halt | Delete | WipeOut | +| ----------------------------------- | :------------: | :------: | :------: | :------: | +| 1. Block Delete operation | ✓ | ✗ | ✗ | ✗ | +| 2. Delete PetSet | ✗ | ✓ | ✓ | ✓ | +| 3. Delete Services | ✗ | ✓ | ✓ | ✓ | +| 4. Delete PVCs | ✗ | ✗ | ✓ | ✓ | +| 5. Delete Secrets | ✗ | ✗ | ✗ | ✓ | +| 6. Delete Snapshots | ✗ | ✗ | ✗ | ✓ | +| 7. Delete Snapshot data from bucket | ✗ | ✗ | ✗ | ✓ | + +If you don't specify `spec.deletionPolicy` KubeDB uses `Delete` termination policy by default. + +> For more details you can visit [here](https://appscode.com/blog/post/deletion-policy/) + +### spec.halted +Indicates that the database is halted and all offshoot Kubernetes resources except PVCs are deleted. + +### spec.arbiter +If `spec.arbiter` is not null, there will be one arbiter pod on each of the replicaset structure, including shards. It has two fields. +- `spec.arbiter.podTemplate` defines the arbiter-pod's template. See [spec.podTemplate](/docs/guides/mongodb/configuration/using-config-file.md) part for more details of this. +- `spec.arbiter.configSecret` is an optional field that allows users to provide custom configurations for MongoDB arbiter. You just need to refer the configuration secret in `spec.arbiter.configuration.secretName` field. +> Please note that, the secret key needs to be `mongod.conf`. + +N.B. If `spec.replicaset` & `spec.shardTopology` both is empty, `spec.arbiter` has to be empty too. + +### spec.allowedSchemas +It defines which consumers may refer to a database instance. We implemented double-optIn feature between database instance and schema-manager using this field. +- `spec.allowedSchemas.namespace.from` indicates how you want to filter the namespaces, from which a schema-manager will be able to communicate with this db instance. +Possible values are : i) `All` to allow all namespaces, ii) `Same` to allow only if schema-manager & MongoDB is deployed in same namespace & iii) `Selector` to select some namespaces through labels. +- `spec.allowedSchemas.namespace.selector`. You need to set this field only if `spec.allowedSchemas.namespace.from` is set to `selector`. Here you will give the labels of the namespaces to allow. +- `spec.allowedSchemas.selctor` denotes the labels of the schema-manager instances, which you want to give allowance to use this database. + +### spec.coordinator +We use a dedicated container, named `replication-mode-detector`, to continuously select primary pod and add label as primary. By specifying `spec.coordinator.resources` & `spec.coordinator.securityContext`, you can set the resources and securityContext of that mode-detector container. + + +## spec.healthChecker +It defines the attributes for the health checker. +- `spec.healthChecker.periodSeconds` specifies how often to perform the health check. +- `spec.healthChecker.timeoutSeconds` specifies the number of seconds after which the probe times out. +- `spec.healthChecker.failureThreshold` specifies minimum consecutive failures for the healthChecker to be considered failed. +- `spec.healthChecker.disableWriteCheck` specifies whether to disable the writeCheck or not. + +Know details about KubeDB Health checking from this [blog post](https://appscode.com/blog/post/kubedb-health-checker/). + +## Next Steps + +- Learn how to use KubeDB to run a MongoDB database [here](/docs/guides/mongodb/README.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/mongodb/concepts/opsrequest.md b/docs/guides/mongodb/concepts/opsrequest.md index 06a9e4843b..ed8dbb4422 100644 --- a/docs/guides/mongodb/concepts/opsrequest.md +++ b/docs/guides/mongodb/concepts/opsrequest.md @@ -325,8 +325,8 @@ spec: name: mg-replicaset configuration: replicaSet: - configSecret: - name: new-custom-config + configuration: + secretName: new-custom-config status: conditions: - lastTransitionTime: "2020-08-25T18:22:38Z" @@ -351,14 +351,14 @@ spec: name: mg-sharding configuration: shard: - configSecret: - name: new-custom-config + configuration: + secretName: new-custom-config configServer: - configSecret: - name: new-custom-config + configuration: + secretName: new-custom-config mongos: - configSecret: - name: new-custom-config + configuration: + secretName: new-custom-config status: conditions: - lastTransitionTime: "2020-08-25T18:22:38Z" @@ -383,8 +383,8 @@ spec: name: mg-standalone configuration: standalone: - configSecret: - name: new-custom-config + configuration: + secretName: new-custom-config status: conditions: - lastTransitionTime: "2020-08-25T18:22:38Z" diff --git a/docs/guides/mongodb/concepts/opsrequest.md.bak b/docs/guides/mongodb/concepts/opsrequest.md.bak new file mode 100644 index 0000000000..06a9e4843b --- /dev/null +++ b/docs/guides/mongodb/concepts/opsrequest.md.bak @@ -0,0 +1,783 @@ +--- +title: MongoDBOpsRequests CRD +menu: + docs_{{ .version }}: + identifier: mg-opsrequest-concepts + name: MongoDBOpsRequest + parent: mg-concepts-mongodb + weight: 25 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# MongoDBOpsRequest + +## What is MongoDBOpsRequest + +`MongoDBOpsRequest` is a Kubernetes `Custom Resource Definitions` (CRD). It provides a declarative configuration for [MongoDB](https://www.mongodb.com/) administrative operations like database version updating, horizontal scaling, vertical scaling etc. in a Kubernetes native way. + +## MongoDBOpsRequest CRD Specifications + +Like any official Kubernetes resource, a `MongoDBOpsRequest` has `TypeMeta`, `ObjectMeta`, `Spec` and `Status` sections. + +Here, some sample `MongoDBOpsRequest` CRs for different administrative operations is given below: + +**Sample `MongoDBOpsRequest` for updating database:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-update + namespace: demo +spec: + type: UpdateVersion + databaseRef: + name: mg-standalone + updateVersion: + targetVersion: 4.4.26 +status: + conditions: + - lastTransitionTime: "2020-08-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +**Sample `MongoDBOpsRequest` Objects for Horizontal Scaling of different component of the database:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-hscale-configserver + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: mg-sharding + horizontalScaling: + shard: + shards: 3 + replicas: 3 + configServer: + replicas: 3 + mongos: + replicas: 2 +status: + conditions: + - lastTransitionTime: "2020-08-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-hscale-down-replicaset + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: mg-replicaset + horizontalScaling: + replicas: 3 +status: + conditions: + - lastTransitionTime: "2020-08-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +**Sample `MongoDBOpsRequest` Objects for Vertical Scaling of different component of the database:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-vscale-configserver + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: mg-sharding + verticalScaling: + configServer: + resources: + requests: + memory: "150Mi" + cpu: "0.1" + limits: + memory: "250Mi" + cpu: "0.2" + mongos: + resources: + requests: + memory: "150Mi" + cpu: "0.1" + limits: + memory: "250Mi" + cpu: "0.2" + shard: + resources: + requests: + memory: "150Mi" + cpu: "0.1" + limits: + memory: "250Mi" + cpu: "0.2" +status: + conditions: + - lastTransitionTime: "2020-08-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-vscale-standalone + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: mg-standalone + verticalScaling: + standalone: + resources: + requests: + memory: "150Mi" + cpu: "0.1" + limits: + memory: "250Mi" + cpu: "0.2" +status: + conditions: + - lastTransitionTime: "2020-08-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-vscale-replicaset + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: mg-replicaset + verticalScaling: + replicaSet: + resources: + requests: + memory: "150Mi" + cpu: "0.1" + limits: + memory: "250Mi" + cpu: "0.2" +status: + conditions: + - lastTransitionTime: "2020-08-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +**Sample `MongoDBOpsRequest` Objects for Reconfiguring different database components:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-reconfiugre-data-replicaset + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: mg-replicaset + configuration: + replicaSet: + applyConfig: + mongod.conf: |- + net: + maxIncomingConnections: 30000 +status: + conditions: + - lastTransitionTime: "2020-08-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-reconfiugre-data-shard + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: mg-sharding + configuration: + shard: + applyConfig: + mongod.conf: |- + net: + maxIncomingConnections: 30000 + configServer: + applyConfig: + mongod.conf: |- + net: + maxIncomingConnections: 30000 + mongos: + applyConfig: + mongod.conf: |- + net: + maxIncomingConnections: 30000 +status: + conditions: + - lastTransitionTime: "2020-08-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-reconfiugre-data-standalone + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: mg-standalone + configuration: + standalone: + applyConfig: + mongod.conf: |- + net: + maxIncomingConnections: 30000 +status: + conditions: + - lastTransitionTime: "2020-08-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-reconfiugre-replicaset + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: mg-replicaset + configuration: + replicaSet: + configSecret: + name: new-custom-config +status: + conditions: + - lastTransitionTime: "2020-08-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-reconfiugre-shard + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: mg-sharding + configuration: + shard: + configSecret: + name: new-custom-config + configServer: + configSecret: + name: new-custom-config + mongos: + configSecret: + name: new-custom-config +status: + conditions: + - lastTransitionTime: "2020-08-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-reconfiugre-standalone + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: mg-standalone + configuration: + standalone: + configSecret: + name: new-custom-config +status: + conditions: + - lastTransitionTime: "2020-08-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +**Sample `MongoDBOpsRequest` Objects for Volume Expansion of different database components:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-volume-exp-replicaset + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: mg-replicaset + volumeExpansion: + mode: "Online" + replicaSet: 2Gi +status: + conditions: + - lastTransitionTime: "2020-08-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-volume-exp-shard + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: mg-sharding + volumeExpansion: + mode: "Online" + shard: 2Gi + configServer: 2Gi +status: + conditions: + - lastTransitionTime: "2020-08-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-volume-exp-standalone + namespace: demo +spec: + type: VolumeExpansion + databaseRef: + name: mg-standalone + volumeExpansion: + mode: "Online" + standalone: 2Gi +status: + conditions: + - lastTransitionTime: "2020-08-25T18:22:38Z" + message: Successfully completed the modification process + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + +**Sample `MongoDBOpsRequest` Objects for Reconfiguring TLS of the database:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-add-tls + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: mg-rs + tls: + issuerRef: + name: mg-issuer + kind: Issuer + apiGroup: "cert-manager.io" + certificates: + - alias: client + emailAddresses: + - abc@appscode.com +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-rotate + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: mg-rs + tls: + rotateCertificates: true +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-change-issuer + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: mg-rs + tls: + issuerRef: + name: mg-new-issuer + kind: Issuer + apiGroup: "cert-manager.io" +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-remove + namespace: demo +spec: + type: ReconfigureTLS + databaseRef: + name: mg-rs + tls: + remove: true +``` + +Here, we are going to describe the various sections of a `MongoDBOpsRequest` crd. + +A `MongoDBOpsRequest` object has the following fields in the `spec` section. + +### spec.databaseRef + +`spec.databaseRef` is a required field that point to the [MongoDB](/docs/guides/mongodb/concepts/mongodb.md) object for which the administrative operations will be performed. This field consists of the following sub-field: + +- **spec.databaseRef.name :** specifies the name of the [MongoDB](/docs/guides/mongodb/concepts/mongodb.md) object. + +### spec.type + +`spec.type` specifies the kind of operation that will be applied to the database. Currently, the following types of operations are allowed in `MongoDBOpsRequest`. + +- `Upgrade` / `UpdateVersion` +- `HorizontalScaling` +- `VerticalScaling` +- `VolumeExpansion` +- `Reconfigure` +- `ReconfigureTLS` +- `Restart` + +> You can perform only one type of operation on a single `MongoDBOpsRequest` CR. For example, if you want to update your database and scale up its replica then you have to create two separate `MongoDBOpsRequest`. At first, you have to create a `MongoDBOpsRequest` for updating. Once it is completed, then you can create another `MongoDBOpsRequest` for scaling. + +> Note: There is an exception to the above statement. It is possible to specify both `spec.configuration` & `spec.verticalScaling` in a OpsRequest of type `VerticalScaling`. + +### spec.updateVersion + +If you want to update you MongoDB version, you have to specify the `spec.updateVersion` section that specifies the desired version information. This field consists of the following sub-field: + +- `spec.updateVersion.targetVersion` refers to a [MongoDBVersion](/docs/guides/mongodb/concepts/catalog.md) CR that contains the MongoDB version information where you want to update. + +Have a look on the [`updateConstraints`](/docs/guides/mongodb/concepts/catalog.md#specupdateconstraints) of the mongodbVersion spec to know which versions are supported for updating from the current version. +```yaml +kubectl get mgversion -o=jsonpath='{.spec.updateConstraints}' | jq +``` + +> You can only update between MongoDB versions. KubeDB does not support downgrade for MongoDB. + +### spec.horizontalScaling + +If you want to scale-up or scale-down your MongoDB cluster or different components of it, you have to specify `spec.horizontalScaling` section. This field consists of the following sub-field: + +- `spec.horizontalScaling.replicas` indicates the desired number of nodes for MongoDB replicaset cluster after scaling. For example, if your cluster currently has 4 replicaset nodes, and you want to add additional 2 nodes then you have to specify 6 in `spec.horizontalScaling.replicas` field. Similarly, if you want to remove one node from the cluster, you have to specify 3 in `spec.horizontalScaling.replicas` field. +- `spec.horizontalScaling.configServer.replicas` indicates the desired number of ConfigServer nodes for Sharded MongoDB cluster after scaling. +- `spec.horizontalScaling.mongos.replicas` indicates the desired number of Mongos nodes for Sharded MongoDB cluster after scaling. +- `spec.horizontalScaling.shard` indicates the configuration of shard nodes for Sharded MongoDB cluster after scaling. This field consists of the following sub-field: + - `spec.horizontalScaling.shard.replicas` indicates the number of replicas each shard will have after scaling. + - `spec.horizontalScaling.shard.shards` indicates the number of shards after scaling + +### spec.verticalScaling + +`spec.verticalScaling` is a required field specifying the information of `MongoDB` resources like `cpu`, `memory` etc that will be scaled. This field consists of the following sub-fields: + +- `spec.verticalScaling.standalone` indicates the desired resources for standalone MongoDB database after scaling. +- `spec.verticalScaling.replicaSet` indicates the desired resources for replicaSet of MongoDB database after scaling. +- `spec.verticalScaling.mongos` indicates the desired resources for Mongos nodes of Sharded MongoDB database after scaling. +- `spec.verticalScaling.configServer` indicates the desired resources for ConfigServer nodes of Sharded MongoDB database after scaling. +- `spec.verticalScaling.shard` indicates the desired resources for Shard nodes of Sharded MongoDB database after scaling. +- `spec.verticalScaling.exporter` indicates the desired resources for the `exporter` container. +- `spec.verticalScaling.arbiter` indicates the desired resources for arbiter node of MongoDB database after scaling. +- `spec.verticalScaling.coordinator` indicates the desired resources for the coordinator container. + +All of them has the below structure: + +```yaml +requests: + memory: "200Mi" + cpu: "0.1" +limits: + memory: "300Mi" + cpu: "0.2" +``` + +Here, when you specify the resource request, the scheduler uses this information to decide which node to place the container of the Pod on and when you specify a resource limit for the container, the `kubelet` enforces those limits so that the running container is not allowed to use more of that resource than the limit you set. You can found more details from [here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +### spec.volumeExpansion + +> To use the volume expansion feature the storage class must support volume expansion + +If you want to expand the volume of your MongoDB cluster or different components of it, you have to specify `spec.volumeExpansion` section. This field consists of the following sub-field: + +- `spec.mode` specifies the volume expansion mode. Supported values are `Online` & `Offline`. The default is `Online`. +- `spec.volumeExpansion.standalone` indicates the desired size for the persistent volume of a standalone MongoDB database. +- `spec.volumeExpansion.replicaSet` indicates the desired size for the persistent volume of replicaSets of a MongoDB database. +- `spec.volumeExpansion.configServer` indicates the desired size for the persistent volume of the config server of a sharded MongoDB database. +- `spec.volumeExpansion.shard` indicates the desired size for the persistent volume of shards of a sharded MongoDB database. + +All of them refer to [Quantity](https://v1-22.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#quantity-resource-core) types of Kubernetes. + +Example usage of this field is given below: + +```yaml +spec: + volumeExpansion: + shard: "2Gi" +``` + +This will expand the volume size of all the shard nodes to 2 GB. + +### spec.configuration + +If you want to reconfigure your Running MongoDB cluster or different components of it with new custom configuration, you have to specify `spec.configuration` section. This field consists of the following sub-field: + +- `spec.configuration.standalone` indicates the desired new custom configuration for a standalone MongoDB database. +- `spec.configuration.replicaSet` indicates the desired new custom configuration for replicaSet of a MongoDB database. +- `spec.configuration.configServer` indicates the desired new custom configuration for config servers of a sharded MongoDB database. +- `spec.configuration.mongos` indicates the desired new custom configuration for the mongos nodes of a sharded MongoDB database. +- `spec.configuration.shard` indicates the desired new custom configuration for the shard nodes of a sharded MongoDB database. +- `spec.verticalScaling.arbiter` indicates the desired new custom configuration for arbiter node of MongoDB database after scaling. + +All of them has the following sub-fields: + +- `configSecret` points to a secret in the same namespace of a MongoDB resource, which contains the new custom configurations. If there are any configSecret set before in the database, this secret will replace it. +- `applyConfig` contains the new custom config as a string which will be merged with the previous configuration. + +- `applyConfig` is a map where key supports 3 values, namely `mongod.conf`, `replicaset.json`, `configuration.js`. And value represents the corresponding configurations. +For your information, replicaset.json is used to modify replica set configurations, which we see in the output of `rs.config()`. And `configurarion.js` is used to apply a js script to configure mongodb at runtime. +KubeDB provisioner operator applies these two directly while reconciling. + +```yaml + applyConfig: + configuration.js: | + print("hello world!!!!") + replicaset.json: | + { + "settings" : { + "electionTimeoutMillis" : 4000 + } + } + mongod.conf: | + net: + maxIncomingConnections: 30000 +``` + +- `removeCustomConfig` is a boolean field. Specify this field to true if you want to remove all the custom configuration from the deployed mongodb server. + +### spec.tls + +If you want to reconfigure the TLS configuration of your database i.e. add TLS, remove TLS, update issuer/cluster issuer or Certificates and rotate the certificates, you have to specify `spec.tls` section. This field consists of the following sub-field: + +- `spec.tls.issuerRef` specifies the issuer name, kind and api group. +- `spec.tls.certificates` specifies the certificates. You can learn more about this field from [here](/docs/guides/mongodb/concepts/mongodb.md#spectls). +- `spec.tls.rotateCertificates` specifies that we want to rotate the certificate of this database. +- `spec.tls.remove` specifies that we want to remove tls from this database. + +### spec.readinessCriteria + +`spec.readinessCriteria` is the criteria for checking readiness of a MongoDB pod after restarting it. It has two fields. +- `spec.readinessCriteria.oplogMaxLagSeconds` defines the maximum allowed lagging time between the primary & secondary. +- `spec.readinessCriteria.objectsCountDiffPercentage` denotes the maximum allowed object-count-difference between the primary & secondary. + +```yaml +... +spec: + readinessCriteria: + oplogMaxLagSeconds: 20 + objectsCountDiffPercentage: 10 +... +``` +Exceeding these thresholds results in opsRequest failure. One thing to note that, readinessCriteria field will make impact only if pod restarting is associated with the opsRequest type. + +### spec.timeout +As we internally retry the ops request steps multiple times, This `timeout` field helps the users to specify the timeout for those steps of the ops request (in second). +If a step doesn't finish within the specified timeout, the ops request will result in failure. + +### spec.apply +This field controls the execution of obsRequest depending on the database state. It has two supported values: `Always` & `IfReady`. +Use IfReady, if you want to process the opsRequest only when the database is Ready. And use Always, if you want to process the execution of opsReq irrespective of the Database state. + + +### MongoDBOpsRequest `Status` + +`.status` describes the current state and progress of a `MongoDBOpsRequest` operation. It has the following fields: + +### status.phase + +`status.phase` indicates the overall phase of the operation for this `MongoDBOpsRequest`. It can have the following three values: + +| Phase | Meaning | +|-------------|------------------------------------------------------------------------------------| +| Successful | KubeDB has successfully performed the operation requested in the MongoDBOpsRequest | +| Progressing | KubeDB has started the execution of the applied MongoDBOpsRequest | +| Failed | KubeDB has failed the operation requested in the MongoDBOpsRequest | +| Denied | KubeDB has denied the operation requested in the MongoDBOpsRequest | +| Skipped | KubeDB has skipped the operation requested in the MongoDBOpsRequest | + +Important: Ops-manager Operator can skip an opsRequest, only if its execution has not been started yet & there is a newer opsRequest applied in the cluster. `spec.type` has to be same as the skipped one, in this case. + +### status.observedGeneration + +`status.observedGeneration` shows the most recent generation observed by the `MongoDBOpsRequest` controller. + +### status.conditions + +`status.conditions` is an array that specifies the conditions of different steps of `MongoDBOpsRequest` processing. Each condition entry has the following fields: + +- `types` specifies the type of the condition. MongoDBOpsRequest has the following types of conditions: + +| Type | Meaning | +| ----------------------------- | ------------------------------------------------------------------------- | +| `Progressing` | Specifies that the operation is now in the progressing state | +| `Successful` | Specifies such a state that the operation on the database was successful. | +| `HaltDatabase` | Specifies such a state that the database is halted by the operator | +| `ResumeDatabase` | Specifies such a state that the database is resumed by the operator | +| `Failed` | Specifies such a state that the operation on the database failed. | +| `StartingBalancer` | Specifies such a state that the balancer has successfully started | +| `StoppingBalancer` | Specifies such a state that the balancer has successfully stopped | +| `UpdateShardImage` | Specifies such a state that the Shard Images has been updated | +| `UpdateReplicaSetImage` | Specifies such a state that the Replicaset Image has been updated | +| `UpdateConfigServerImage` | Specifies such a state that the ConfigServer Image has been updated | +| `UpdateMongosImage` | Specifies such a state that the Mongos Image has been updated | +| `UpdatePetSetResources` | Specifies such a state that the Petset resources has been updated | +| `UpdateShardResources` | Specifies such a state that the Shard resources has been updated | +| `UpdateReplicaSetResources` | Specifies such a state that the Replicaset resources has been updated | +| `UpdateConfigServerResources` | Specifies such a state that the ConfigServer resources has been updated | +| `UpdateMongosResources` | Specifies such a state that the Mongos resources has been updated | +| `ScaleDownReplicaSet` | Specifies such a state that the scale down operation of replicaset | +| `ScaleUpReplicaSet` | Specifies such a state that the scale up operation of replicaset | +| `ScaleUpShardReplicas` | Specifies such a state that the scale up operation of shard replicas | +| `ScaleDownShardReplicas` | Specifies such a state that the scale down operation of shard replicas | +| `ScaleDownConfigServer` | Specifies such a state that the scale down operation of config server | +| `ScaleUpConfigServer` | Specifies such a state that the scale up operation of config server | +| `ScaleMongos` | Specifies such a state that the scale down operation of replicaset | +| `VolumeExpansion` | Specifies such a state that the volume expansion operaton of the database | +| `ReconfigureReplicaset` | Specifies such a state that the reconfiguration of replicaset nodes | +| `ReconfigureMongos` | Specifies such a state that the reconfiguration of mongos nodes | +| `ReconfigureShard` | Specifies such a state that the reconfiguration of shard nodes | +| `ReconfigureConfigServer` | Specifies such a state that the reconfiguration of config server nodes | + +- The `status` field is a string, with possible values `True`, `False`, and `Unknown`. + - `status` will be `True` if the current transition succeeded. + - `status` will be `False` if the current transition failed. + - `status` will be `Unknown` if the current transition was denied. +- The `message` field is a human-readable message indicating details about the condition. +- The `reason` field is a unique, one-word, CamelCase reason for the condition's last transition. +- The `lastTransitionTime` field provides a timestamp for when the operation last transitioned from one state to another. +- The `observedGeneration` shows the most recent condition transition generation observed by the controller. diff --git a/docs/guides/mongodb/configuration/using-config-file.md b/docs/guides/mongodb/configuration/using-config-file.md index 487f9a8810..98b023cd66 100644 --- a/docs/guides/mongodb/configuration/using-config-file.md +++ b/docs/guides/mongodb/configuration/using-config-file.md @@ -37,11 +37,11 @@ MongoDB allows configuring database via configuration file. The default configur > To learn available configuration option of MongoDB see [Configuration File Options](https://docs.mongodb.com/manual/reference/configuration-options/). -At first, you have to create a secret with your configuration file contents as the value of this key `mongod.conf`. Then, you have to specify the name of this secret in `spec.configSecret.name` section while creating MongoDB crd. KubeDB will mount this secret into `/configdb-readonly/` directory of the database pod. +At first, you have to create a secret with your configuration file contents as the value of this key `mongod.conf`. Then, you have to specify the name of this secret in `spec.configuration.secretName` section while creating MongoDB crd. KubeDB will mount this secret into `/configdb-readonly/` directory of the database pod. -Here one important thing to note that, `spec.configSecret.name` will be used for standard replicaset members & standalone mongodb only. If you want to configure a specific type of mongo nodes, you have to set the name in respective fields. -For example, to configure shard topology node, set `spec.shardTopology..configSecret.name` field. -Similarly, To configure arbiter node, set `spec.arbiter.configSecret.name` field. +Here one important thing to note that, `spec.configuration.secretName` will be used for standard replicaset members & standalone mongodb only. If you want to configure a specific type of mongo nodes, you have to set the name in respective fields. +For example, to configure shard topology node, set `spec.shardTopology..configuration.secretName` field. +Similarly, To configure arbiter node, set `spec.arbiter.configuration.secretName` field. In this tutorial, we will configure [net.maxIncomingConnections](https://docs.mongodb.com/manual/reference/configuration-options/#net.maxIncomingConnections) (default value: 65536) via a custom config file. @@ -85,7 +85,7 @@ net: maxIncomingConnections: 100000 ``` -Now, create MongoDB crd specifying `spec.configSecret` field. +Now, create MongoDB crd specifying `spec.configuration` field. ```yaml apiVersion: kubedb.com/v1 diff --git a/docs/guides/mongodb/configuration/using-config-file.md.bak b/docs/guides/mongodb/configuration/using-config-file.md.bak new file mode 100644 index 0000000000..5469d43efd --- /dev/null +++ b/docs/guides/mongodb/configuration/using-config-file.md.bak @@ -0,0 +1,205 @@ +--- +title: Run MongoDB with Custom Configuration +menu: + docs_{{ .version }}: + identifier: mg-using-config-file-configuration + name: Config File + parent: mg-configuration + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Using Custom Configuration File + +KubeDB supports providing custom configuration for MongoDB. This tutorial will show you how to use KubeDB to run a MongoDB database with custom configuration. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. Run the following command to prepare your cluster for this tutorial: + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: The yaml files used in this tutorial are stored in [docs/examples/mongodb](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/mongodb) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +MongoDB allows configuring database via configuration file. The default configuration file for MongoDB deployed by `KubeDB` can be found in `/data/configdb/mongod.conf`. When MongoDB starts, it will look for custom configuration file in `/configdb-readonly/mongod.conf`. If configuration file exist, this custom configuration will overwrite the existing default one. + +> To learn available configuration option of MongoDB see [Configuration File Options](https://docs.mongodb.com/manual/reference/configuration-options/). + +At first, you have to create a secret with your configuration file contents as the value of this key `mongod.conf`. Then, you have to specify the name of this secret in `spec.configuration.secretName` section while creating MongoDB crd. KubeDB will mount this secret into `/configdb-readonly/` directory of the database pod. + +Here one important thing to note that, `spec.configuration.secretName` will be used for standard replicaset members & standalone mongodb only. If you want to configure a specific type of mongo nodes, you have to set the name in respective fields. +For example, to configure shard topology node, set `spec.shardTopology..configuration.secretName` field. +Similarly, To configure arbiter node, set `spec.arbiter.configuration.secretName` field. + +In this tutorial, we will configure [net.maxIncomingConnections](https://docs.mongodb.com/manual/reference/configuration-options/#net.maxIncomingConnections) (default value: 65536) via a custom config file. + +## Custom Configuration + +At first, create `mongod.conf` file containing required configuration settings. + +```ini +$ cat mongod.conf +net: + maxIncomingConnections: 10000 +``` + +Here, `maxIncomingConnections` is set to `10000`, whereas the default value is 65536. + +Now, create the secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo mg-configuration --from-file=./mongod.conf +secret/mg-configuration created +``` + +Verify the secret has the configuration file. + +```yaml +$ kubectl get secret -n demo mg-configuration -o yaml +apiVersion: v1 +data: + mongod.conf: bmV0OgogIG1heEluY29taW5nQ29ubmVjdGlvbnM6IDEwMDAwMA== +kind: Secret +metadata: + creationTimestamp: "2021-02-09T12:59:50Z" + name: mg-configuration + namespace: demo + resourceVersion: "52495" + uid: 92ca4191-eb97-4274-980c-9430ab7cc5d1 +type: Opaque + +$ echo bmV0OgogIG1heEluY29taW5nQ29ubmVjdGlvbnM6IDEwMDAwMA== | base64 -d +net: + maxIncomingConnections: 100000 +``` + +Now, create MongoDB crd specifying `spec.configSecret` field. + +```yaml +apiVersion: kubedb.com/v1 +kind: MongoDB +metadata: + name: mgo-custom-config + namespace: demo +spec: + version: "4.4.26" + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + configuration: + secretName: mg-configuration +``` + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mongodb/configuration/replicaset.yaml +mongodb.kubedb.com/mgo-custom-config created +``` + +Now, wait a few minutes. KubeDB operator will create necessary PVC, petset, services, secret etc. If everything goes well, we will see that a pod with the name `mgo-custom-config-0` has been created. + +Check that the petset's pod is running + +```bash +$ kubectl get pod -n demo mgo-custom-config-0 +NAME READY STATUS RESTARTS AGE +mgo-custom-config-0 1/1 Running 0 1m +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +Now, you can connect to this database through [mongo-shell](https://docs.mongodb.com/v4.2/mongo/). In this tutorial, we are connecting to the MongoDB server from inside the pod. + +```bash +$ kubectl get secrets -n demo mgo-custom-config-auth -o jsonpath='{.data.\username}' | base64 -d +root + +$ kubectl get secrets -n demo mgo-custom-config-auth -o jsonpath='{.data.\password}' | base64 -d +ErialNojWParBFoP + +$ kubectl exec -it mgo-custom-config-0 -n demo sh + +> mongo admin + +> db.auth("root","ErialNojWParBFoP") +1 + +> db._adminCommand( {getCmdLineOpts: 1}) +{ + "argv" : [ + "mongod", + "--dbpath=/data/db", + "--auth", + "--ipv6", + "--bind_ip_all", + "--port=27017", + "--tlsMode=disabled", + "--config=/data/configdb/mongod.conf" + ], + "parsed" : { + "config" : "/data/configdb/mongod.conf", + "net" : { + "bindIp" : "*", + "ipv6" : true, + "maxIncomingConnections" : 10000, + "port" : 27017, + "tls" : { + "mode" : "disabled" + } + }, + "security" : { + "authorization" : "enabled" + }, + "storage" : { + "dbPath" : "/data/db" + } + }, + "ok" : 1 +} + +> exit +bye +``` + +As we can see from the configuration of running mongodb, the value of `maxIncomingConnections` has been set to 10000 successfully. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl patch -n demo mg/mgo-custom-config -p '{"spec":{"deletionPolicy":"WipeOut"}}' --type="merge" +kubectl delete -n demo mg/mgo-custom-config + +kubectl delete -n demo secret mg-configuration + +kubectl delete ns demo +``` + +## Next Steps + +- [Backup and Restore](/docs/guides/mongodb/backup/stash/overview/index.md) MongoDB databases using Stash. +- Initialize [MongoDB with Script](/docs/guides/mongodb/initialization/using-script.md). +- Monitor your MongoDB database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/mongodb/monitoring/using-prometheus-operator.md). +- Monitor your MongoDB database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/mongodb/monitoring/using-builtin-prometheus.md). +- Use [private Docker registry](/docs/guides/mongodb/private-registry/using-private-registry.md) to deploy MongoDB with KubeDB. +- Use [kubedb cli](/docs/guides/mongodb/cli/cli.md) to manage databases like kubectl for Kubernetes. +- Detail concepts of [MongoDB object](/docs/guides/mongodb/concepts/mongodb.md). +- Detail concepts of [MongoDBVersion object](/docs/guides/mongodb/concepts/catalog.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/mongodb/monitoring/overview.md b/docs/guides/mongodb/monitoring/overview.md index 461fbb54a5..887ad06920 100644 --- a/docs/guides/mongodb/monitoring/overview.md +++ b/docs/guides/mongodb/monitoring/overview.md @@ -55,8 +55,8 @@ metadata: spec: version: "4.4.26" deletionPolicy: WipeOut - configSecret: - name: config + configuration: + secretName: config storageType: Durable storage: storageClassName: default diff --git a/docs/guides/mongodb/monitoring/overview.md.bak b/docs/guides/mongodb/monitoring/overview.md.bak new file mode 100644 index 0000000000..461fbb54a5 --- /dev/null +++ b/docs/guides/mongodb/monitoring/overview.md.bak @@ -0,0 +1,105 @@ +--- +title: MongoDB Monitoring Overview +description: MongoDB Monitoring Overview +menu: + docs_{{ .version }}: + identifier: mg-monitoring-overview + name: Overview + parent: mg-monitoring-mongodb + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Monitoring MongoDB with KubeDB + +KubeDB has native support for monitoring via [Prometheus](https://prometheus.io/). You can use builtin [Prometheus](https://github.com/prometheus/prometheus) scraper or [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) to monitor KubeDB managed databases. This tutorial will show you how database monitoring works with KubeDB and how to configure Database crd to enable monitoring. + +## Overview + +KubeDB uses Prometheus [exporter](https://prometheus.io/docs/instrumenting/exporters/#databases) images to export Prometheus metrics for respective databases. Following diagram shows the logical flow of database monitoring with KubeDB. + +

+  Database Monitoring Flow +

+ +When a user creates a database crd with `spec.monitor` section configured, KubeDB operator provisions the respective database and injects an exporter image as sidecar to the database pod. It also creates a dedicated stats service with name `{database-crd-name}-stats` for monitoring. Prometheus server can scrape metrics using this stats service. + +## Configure Monitoring + +In order to enable monitoring for a database, you have to configure `spec.monitor` section. KubeDB provides following options to configure `spec.monitor` section: + +| Field | Type | Uses | +| -------------------------------------------------- | ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | +| `spec.monitor.agent` | `Required` | Type of the monitoring agent that will be used to monitor this database. It can be `prometheus.io/builtin` or `prometheus.io/operator`. | +| `spec.monitor.prometheus.exporter.port` | `Optional` | Port number where the exporter side car will serve metrics. | +| `spec.monitor.prometheus.exporter.args` | `Optional` | Arguments to pass to the exporter sidecar. | +| `spec.monitor.prometheus.exporter.env` | `Optional` | List of environment variables to set in the exporter sidecar container. | +| `spec.monitor.prometheus.exporter.resources` | `Optional` | Resources required by exporter sidecar container. | +| `spec.monitor.prometheus.exporter.securityContext` | `Optional` | Security options the exporter should run with. | +| `spec.monitor.prometheus.serviceMonitor.labels` | `Optional` | Labels for `ServiceMonitor` crd. | +| `spec.monitor.prometheus.serviceMonitor.interval` | `Optional` | Interval at which metrics should be scraped. | + +## Sample Configuration + +A sample YAML for MongoDB crd with `spec.monitor` section configured to enable monitoring with [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) is shown below. + +```yaml +apiVersion: kubedb.com/v1 +kind: MongoDB +metadata: + name: sample-mongo + namespace: databases +spec: + version: "4.4.26" + deletionPolicy: WipeOut + configSecret: + name: config + storageType: Durable + storage: + storageClassName: default + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + release: prometheus + exporter: + args: + - --collect.database + env: + - name: ENV_VARIABLE + valueFrom: + secretKeyRef: + name: env_name + key: env_value + resources: + requests: + memory: 512Mi + cpu: 200m + limits: + memory: 512Mi + cpu: 250m + securityContext: + runAsUser: 2000 + allowPrivilegeEscalation: false +``` + +Here, we have specified that we are going to monitor this server using Prometheus operator through `spec.monitor.agent: prometheus.io/operator`. KubeDB will create a `ServiceMonitor` crd in databases namespace and this `ServiceMonitor` will have `release: prometheus` label. + +One thing to note that, we internally use `--collect-all` args, if the mongodb exporter version >= v0.31.0 . You can check the exporter version by getting the mgversion object, like this, +`kubectl get mgversion -o=jsonpath='{.spec.exporter.image}' 4.4.26` +In that case, specifying args to collect something (as we used `--collect.database` above) will not have any effect. + +## Next Steps + +- Learn how to monitor MongoDB database with KubeDB using [builtin-Prometheus](/docs/guides/mongodb/monitoring/using-builtin-prometheus.md) +- Learn how to monitor MongoDB database with KubeDB using [Prometheus operator](/docs/guides/mongodb/monitoring/using-prometheus-operator.md). + diff --git a/docs/guides/mongodb/reconfigure/replicaset.md b/docs/guides/mongodb/reconfigure/replicaset.md index 543fc789ba..ea65c5d97c 100644 --- a/docs/guides/mongodb/reconfigure/replicaset.md +++ b/docs/guides/mongodb/reconfigure/replicaset.md @@ -61,7 +61,7 @@ $ kubectl create secret generic -n demo mg-custom-config --from-file=./mongod.co secret/mg-custom-config created ``` -In this section, we are going to create a MongoDB object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `MongoDB` CR that we are going to create, +In this section, we are going to create a MongoDB object specifying `spec.configuration` field to apply this custom configuration. Below is the YAML of the `MongoDB` CR that we are going to create, ```yaml apiVersion: kubedb.com/v1 @@ -82,8 +82,8 @@ spec: resources: requests: storage: 1Gi - configSecret: - name: mg-custom-config + configuration: + secretName: mg-custom-config ``` Let's create the `MongoDB` CR we have shown above, @@ -202,8 +202,8 @@ spec: name: mg-replicaset configuration: replicaSet: - configSecret: - name: new-custom-config + configuration: + secretName: new-custom-config readinessCriteria: oplogMaxLagSeconds: 20 objectsCountDiffPercentage: 10 @@ -215,8 +215,8 @@ Here, - `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-replicaset` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.customConfig.replicaSet.configSecret.name` specifies the name of the new secret. -- `spec.customConfig.arbiter.configSecret.name` could also be specified with a config-secret. +- `spec.customConfig.replicaSet.configuration.secretName` specifies the name of the new secret. +- `spec.customConfig.arbiter.configuration.secretName` could also be specified with a config-secret. - Have a look [here](/docs/guides/mongodb/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. Let's create the `MongoDBOpsRequest` CR we have shown above, @@ -444,7 +444,7 @@ Here, - `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-apply-replicaset` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. - `spec.configuration.replicaSet.applyConfig` specifies the new configuration that will be merged in the existing secret. -- `spec.customConfig.arbiter.configSecret.name` could also be specified with a config-secret. +- `spec.customConfig.arbiter.configuration.secretName` could also be specified with a config-secret. - Have a look [here](/docs/guides/mongodb/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. Let's create the `MongoDBOpsRequest` CR we have shown above, diff --git a/docs/guides/mongodb/reconfigure/replicaset.md.bak b/docs/guides/mongodb/reconfigure/replicaset.md.bak new file mode 100644 index 0000000000..3bcaf7b872 --- /dev/null +++ b/docs/guides/mongodb/reconfigure/replicaset.md.bak @@ -0,0 +1,645 @@ +--- +title: Reconfigure MongoDB Replicaset +menu: + docs_{{ .version }}: + identifier: mg-reconfigure-replicaset + name: Replicaset + parent: mg-reconfigure + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure MongoDB Replicaset Database + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a MongoDB Replicaset. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [MongoDB](/docs/guides/mongodb/concepts/mongodb.md) + - [ReplicaSet](/docs/guides/mongodb/clustering/replicaset.md) + - [MongoDBOpsRequest](/docs/guides/mongodb/concepts/opsrequest.md) + - [Reconfigure Overview](/docs/guides/mongodb/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/mongodb](/docs/examples/mongodb) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `MongoDB` Replicaset using a supported version by `KubeDB` operator. Then we are going to apply `MongoDBOpsRequest` to reconfigure its configuration. + +### Prepare MongoDB Replicaset + +Now, we are going to deploy a `MongoDB` Replicaset database with version `4.4.26`. + +### Deploy MongoDB + +At first, we will create `mongod.conf` file containing required configuration settings. + +```ini +$ cat mongod.conf +net: + maxIncomingConnections: 10000 +``` +Here, `maxIncomingConnections` is set to `10000`, whereas the default value is `65536`. + +Now, we will create a secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo mg-custom-config --from-file=./mongod.conf +secret/mg-custom-config created +``` + +In this section, we are going to create a MongoDB object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `MongoDB` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: MongoDB +metadata: + name: mg-replicaset + namespace: demo +spec: + version: "4.4.26" + replicas: 3 + replicaSet: + name: rs0 + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + configuration: + secretName: mg-custom-config +``` + +Let's create the `MongoDB` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mongodb/reconfigure/mg-replicaset-config.yaml +mongodb.kubedb.com/mg-replicaset created +``` + +Now, wait until `mg-replicaset` has status `Ready`. i.e, + +```bash +$ kubectl get mg -n demo +NAME VERSION STATUS AGE +mg-replicaset 4.4.26 Ready 19m +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +First we need to get the username and password to connect to a mongodb instance, +```bash +$ kubectl get secrets -n demo mg-replicaset-auth -o jsonpath='{.data.\username}' | base64 -d +root + +$ kubectl get secrets -n demo mg-replicaset-auth -o jsonpath='{.data.\password}' | base64 -d +nrKuxni0wDSMrgwy +``` + +Now let's connect to a mongodb instance and run a mongodb internal command to check the configuration we have provided. + +```bash +$ kubectl exec -n demo mg-replicaset-0 -- mongo admin -u root -p nrKuxni0wDSMrgwy --eval "db._adminCommand( {getCmdLineOpts: 1})" --quiet +{ + "argv" : [ + "mongod", + "--dbpath=/data/db", + "--auth", + "--ipv6", + "--bind_ip_all", + "--port=27017", + "--tlsMode=disabled", + "--replSet=rs0", + "--keyFile=/data/configdb/key.txt", + "--clusterAuthMode=keyFile", + "--config=/data/configdb/mongod.conf" + ], + "parsed" : { + "config" : "/data/configdb/mongod.conf", + "net" : { + "bindIp" : "*", + "ipv6" : true, + "maxIncomingConnections" : 10000, + "port" : 27017, + "tls" : { + "mode" : "disabled" + } + }, + "replication" : { + "replSet" : "rs0" + }, + "security" : { + "authorization" : "enabled", + "clusterAuthMode" : "keyFile", + "keyFile" : "/data/configdb/key.txt" + }, + "storage" : { + "dbPath" : "/data/db" + } + }, + "ok" : 1, + "$clusterTime" : { + "clusterTime" : Timestamp(1614668500, 1), + "signature" : { + "hash" : BinData(0,"7sh886HhsNYajGxYGp5Jxi52IzA="), + "keyId" : NumberLong("6934943333319966722") + } + }, + "operationTime" : Timestamp(1614668500, 1) +} +``` + +As we can see from the configuration of ready mongodb, the value of `maxIncomingConnections` has been set to `10000`. + +### Reconfigure using new config secret + +Now we will reconfigure this database to set `maxIncomingConnections` to `20000`. + +Now, we will edit the `mongod.conf` file containing required configuration settings. + +```ini +$ cat mongod.conf +net: + maxIncomingConnections: 20000 +``` + +Then, we will create a new secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo new-custom-config --from-file=./mongod.conf +secret/new-custom-config created +``` + +#### Create MongoDBOpsRequest + +Now, we will use this secret to replace the previous secret using a `MongoDBOpsRequest` CR. The `MongoDBOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-reconfigure-replicaset + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: mg-replicaset + configuration: + replicaSet: + configuration: + secretName: new-custom-config + readinessCriteria: + oplogMaxLagSeconds: 20 + objectsCountDiffPercentage: 10 + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-replicaset` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.customConfig.replicaSet.configuration.secretName` specifies the name of the new secret. +- `spec.customConfig.arbiter.configuration.secretName` could also be specified with a config-secret. +- Have a look [here](/docs/guides/mongodb/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. + +Let's create the `MongoDBOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mongodb/reconfigure/mops-reconfigure-replicaset.yaml +mongodbopsrequest.ops.kubedb.com/mops-reconfigure-replicaset created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `MongoDB` object. + +Let's wait for `MongoDBOpsRequest` to be `Successful`. Run the following command to watch `MongoDBOpsRequest` CR, + +```bash +$ watch kubectl get mongodbopsrequest -n demo +Every 2.0s: kubectl get mongodbopsrequest -n demo +NAME TYPE STATUS AGE +mops-reconfigure-replicaset Reconfigure Successful 113s +``` + +We can see from the above output that the `MongoDBOpsRequest` has succeeded. If we describe the `MongoDBOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe mongodbopsrequest -n demo mops-reconfigure-replicaset +Name: mops-reconfigure-replicaset +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: MongoDBOpsRequest +Metadata: + Creation Timestamp: 2021-03-02T07:04:31Z + Generation: 1 + Managed Fields: + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:apply: + f:configuration: + .: + f:replicaSet: + .: + f:configSecret: + .: + f:name: + f:databaseRef: + .: + f:name: + f:readinessCriteria: + .: + f:objectsCountDiffPercentage: + f:oplogMaxLagSeconds: + f:timeout: + f:type: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2021-03-02T07:04:31Z + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:spec: + f:configuration: + f:replicaSet: + f:podTemplate: + .: + f:controller: + f:metadata: + f:spec: + .: + f:resources: + f:status: + .: + f:conditions: + f:observedGeneration: + f:phase: + Manager: kubedb-enterprise + Operation: Update + Time: 2021-03-02T07:04:31Z + Resource Version: 29869 + Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/mongodbopsrequests/mops-reconfigure-replicaset + UID: 064733d6-19db-4153-82f7-bc0580116ee6 +Spec: + Apply: IfReady + Configuration: + Replica Set: + Config Secret: + Name: new-custom-config + Database Ref: + Name: mg-replicaset + Readiness Criteria: + Objects Count Diff Percentage: 10 + Oplog Max Lag Seconds: 20 + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2021-03-02T07:04:31Z + Message: MongoDB ops request is reconfiguring database + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2021-03-02T07:06:21Z + Message: Successfully Reconfigured MongoDB + Observed Generation: 1 + Reason: ReconfigureReplicaset + Status: True + Type: ReconfigureReplicaset + Last Transition Time: 2021-03-02T07:06:21Z + Message: Successfully completed the modification process. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 2m55s KubeDB Ops-manager operator Pausing MongoDB demo/mg-replicaset + Normal PauseDatabase 2m55s KubeDB Ops-manager operator Successfully paused MongoDB demo/mg-replicaset + Normal ReconfigureReplicaset 65s KubeDB Ops-manager operator Successfully Reconfigured MongoDB + Normal ResumeDatabase 65s KubeDB Ops-manager operator Resuming MongoDB demo/mg-replicaset + Normal ResumeDatabase 65s KubeDB Ops-manager operator Successfully resumed MongoDB demo/mg-replicaset + Normal Successful 65s KubeDB Ops-manager operator Successfully Reconfigured Database +``` + +Now let's connect to a mongodb instance and run a mongodb internal command to check the new configuration we have provided. + +```bash +$ kubectl exec -n demo mg-replicaset-0 -- mongo admin -u root -p nrKuxni0wDSMrgwy --eval "db._adminCommand( {getCmdLineOpts: 1})" --quiet +{ + "argv" : [ + "mongod", + "--dbpath=/data/db", + "--auth", + "--ipv6", + "--bind_ip_all", + "--port=27017", + "--tlsMode=disabled", + "--replSet=rs0", + "--keyFile=/data/configdb/key.txt", + "--clusterAuthMode=keyFile", + "--config=/data/configdb/mongod.conf" + ], + "parsed" : { + "config" : "/data/configdb/mongod.conf", + "net" : { + "bindIp" : "*", + "ipv6" : true, + "maxIncomingConnections" : 20000, + "port" : 27017, + "tls" : { + "mode" : "disabled" + } + }, + "replication" : { + "replSet" : "rs0" + }, + "security" : { + "authorization" : "enabled", + "clusterAuthMode" : "keyFile", + "keyFile" : "/data/configdb/key.txt" + }, + "storage" : { + "dbPath" : "/data/db" + } + }, + "ok" : 1, + "$clusterTime" : { + "clusterTime" : Timestamp(1614668887, 1), + "signature" : { + "hash" : BinData(0,"5q35Y51+YpbVHFKoaU7lUWi38oY="), + "keyId" : NumberLong("6934943333319966722") + } + }, + "operationTime" : Timestamp(1614668887, 1) +} +``` + +As we can see from the configuration of ready mongodb, the value of `maxIncomingConnections` has been changed from `10000` to `20000`. So the reconfiguration of the database is successful. + + +### Reconfigure using apply config + +Now we will reconfigure this database again to set `maxIncomingConnections` to `30000`. This time we won't use a new secret. We will use the `applyConfig` field of the `MongoDBOpsRequest`. This will merge the new config in the existing secret. + +#### Create MongoDBOpsRequest + +Now, we will use the new configuration in the `applyConfig` field in the `MongoDBOpsRequest` CR. The `MongoDBOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-reconfigure-apply-replicaset + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: mg-replicaset + configuration: + replicaSet: + applyConfig: + mongod.conf: |- + net: + maxIncomingConnections: 30000 + readinessCriteria: + oplogMaxLagSeconds: 20 + objectsCountDiffPercentage: 10 + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-apply-replicaset` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.replicaSet.applyConfig` specifies the new configuration that will be merged in the existing secret. +- `spec.customConfig.arbiter.configuration.secretName` could also be specified with a config-secret. +- Have a look [here](/docs/guides/mongodb/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. + +Let's create the `MongoDBOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mongodb/reconfigure/mops-reconfigure-apply-replicaset.yaml +mongodbopsrequest.ops.kubedb.com/mops-reconfigure-apply-replicaset created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `MongoDBOpsRequest` to be `Successful`. Run the following command to watch `MongoDBOpsRequest` CR, + +```bash +$ watch kubectl get mongodbopsrequest -n demo +Every 2.0s: kubectl get mongodbopsrequest -n demo +NAME TYPE STATUS AGE +mops-reconfigure-apply-replicaset Reconfigure Successful 109s +``` + +We can see from the above output that the `MongoDBOpsRequest` has succeeded. If we describe the `MongoDBOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe mongodbopsrequest -n demo mops-reconfigure-apply-replicaset +Name: mops-reconfigure-apply-replicaset +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: MongoDBOpsRequest +Metadata: + Creation Timestamp: 2021-03-02T07:09:39Z + Generation: 1 + Managed Fields: + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:apply: + f:configuration: + .: + f:replicaSet: + .: + f:applyConfig: + f:databaseRef: + .: + f:name: + f:readinessCriteria: + .: + f:objectsCountDiffPercentage: + f:oplogMaxLagSeconds: + f:timeout: + f:type: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2021-03-02T07:09:39Z + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:spec: + f:configuration: + f:replicaSet: + f:podTemplate: + .: + f:controller: + f:metadata: + f:spec: + .: + f:resources: + f:status: + .: + f:conditions: + f:observedGeneration: + f:phase: + Manager: kubedb-enterprise + Operation: Update + Time: 2021-03-02T07:09:39Z + Resource Version: 31005 + Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/mongodbopsrequests/mops-reconfigure-apply-replicaset + UID: 0137442b-1b04-43ed-8de7-ecd913b44065 +Spec: + Apply: IfReady + Configuration: + Replica Set: + Apply Config: net: + maxIncomingConnections: 30000 + + Database Ref: + Name: mg-replicaset + Readiness Criteria: + Objects Count Diff Percentage: 10 + Oplog Max Lag Seconds: 20 + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2021-03-02T07:09:39Z + Message: MongoDB ops request is reconfiguring database + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2021-03-02T07:11:14Z + Message: Successfully Reconfigured MongoDB + Observed Generation: 1 + Reason: ReconfigureReplicaset + Status: True + Type: ReconfigureReplicaset + Last Transition Time: 2021-03-02T07:11:14Z + Message: Successfully completed the modification process. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 9m20s KubeDB Ops-manager operator Pausing MongoDB demo/mg-replicaset + Normal PauseDatabase 9m20s KubeDB Ops-manager operator Successfully paused MongoDB demo/mg-replicaset + Normal ReconfigureReplicaset 7m45s KubeDB Ops-manager operator Successfully Reconfigured MongoDB + Normal ResumeDatabase 7m45s KubeDB Ops-manager operator Resuming MongoDB demo/mg-replicaset + Normal ResumeDatabase 7m45s KubeDB Ops-manager operator Successfully resumed MongoDB demo/mg-replicaset + Normal Successful 7m45s KubeDB Ops-manager operator Successfully Reconfigured Database +``` + +Now let's connect to a mongodb instance and run a mongodb internal command to check the new configuration we have provided. + +```bash +$ kubectl exec -n demo mg-replicaset-0 -- mongo admin -u root -p nrKuxni0wDSMrgwy --eval "db._adminCommand( {getCmdLineOpts: 1})" --quiet +{ + "argv" : [ + "mongod", + "--dbpath=/data/db", + "--auth", + "--ipv6", + "--bind_ip_all", + "--port=27017", + "--tlsMode=disabled", + "--replSet=rs0", + "--keyFile=/data/configdb/key.txt", + "--clusterAuthMode=keyFile", + "--config=/data/configdb/mongod.conf" + ], + "parsed" : { + "config" : "/data/configdb/mongod.conf", + "net" : { + "bindIp" : "*", + "ipv6" : true, + "maxIncomingConnections" : 30000, + "port" : 27017, + "tls" : { + "mode" : "disabled" + } + }, + "replication" : { + "replSet" : "rs0" + }, + "security" : { + "authorization" : "enabled", + "clusterAuthMode" : "keyFile", + "keyFile" : "/data/configdb/key.txt" + }, + "storage" : { + "dbPath" : "/data/db" + } + }, + "ok" : 1, + "$clusterTime" : { + "clusterTime" : Timestamp(1614669580, 1), + "signature" : { + "hash" : BinData(0,"u/xTAa4aW/8bsRvBYPffwQCeTF0="), + "keyId" : NumberLong("6934943333319966722") + } + }, + "operationTime" : Timestamp(1614669580, 1) +} +``` + +As we can see from the configuration of ready mongodb, the value of `maxIncomingConnections` has been changed from `20000` to `30000`. So the reconfiguration of the database using the `applyConfig` field is successful. + + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete mg -n demo mg-replicaset +kubectl delete mongodbopsrequest -n demo mops-reconfigure-replicaset mops-reconfigure-apply-replicaset +``` \ No newline at end of file diff --git a/docs/guides/mongodb/reconfigure/sharding.md b/docs/guides/mongodb/reconfigure/sharding.md index cc6210a731..835ec46361 100644 --- a/docs/guides/mongodb/reconfigure/sharding.md +++ b/docs/guides/mongodb/reconfigure/sharding.md @@ -61,7 +61,7 @@ $ kubectl create secret generic -n demo mg-custom-config --from-file=./mongod.co secret/mg-custom-config created ``` -In this section, we are going to create a MongoDB object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `MongoDB` CR that we are going to create, +In this section, we are going to create a MongoDB object specifying `spec.configuration` field to apply this custom configuration. Below is the YAML of the `MongoDB` CR that we are going to create, ```yaml apiVersion: kubedb.com/v1 @@ -74,8 +74,8 @@ spec: shardTopology: configServer: replicas: 3 - configSecret: - name: mg-custom-config + configuration: + secretName: mg-custom-config storage: resources: requests: @@ -83,13 +83,13 @@ spec: storageClassName: standard mongos: replicas: 2 - configSecret: - name: mg-custom-config + configuration: + secretName: mg-custom-config shard: replicas: 3 shards: 2 - configSecret: - name: mg-custom-config + configuration: + secretName: mg-custom-config storage: resources: requests: @@ -197,14 +197,14 @@ spec: name: mg-sharding configuration: shard: - configSecret: - name: new-custom-config + configuration: + secretName: new-custom-config configServer: - configSecret: - name: new-custom-config + configuration: + secretName: new-custom-config mongos: - configSecret: - name: new-custom-config + configuration: + secretName: new-custom-config readinessCriteria: oplogMaxLagSeconds: 20 objectsCountDiffPercentage: 10 @@ -216,10 +216,10 @@ Here, - `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-shard` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configuration.shard.configSecret.name` specifies the name of the new secret for shard nodes. -- `spec.configuration.configServer.configSecret.name` specifies the name of the new secret for configServer nodes. -- `spec.configuration.mongos.configSecret.name` specifies the name of the new secret for mongos nodes. -- `spec.customConfig.arbiter.configSecret.name` could also be specified with a config-secret. +- `spec.configuration.shard.configuration.secretName` specifies the name of the new secret for shard nodes. +- `spec.configuration.configServer.configuration.secretName` specifies the name of the new secret for configServer nodes. +- `spec.configuration.mongos.configuration.secretName` specifies the name of the new secret for mongos nodes. +- `spec.customConfig.arbiter.configuration.secretName` could also be specified with a config-secret. - Have a look [here](/docs/guides/mongodb/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. > **Note:** If you don't want to reconfigure all the components together, you can only specify the components (shard, configServer and mongos) that you want to reconfigure. @@ -335,7 +335,7 @@ Here, - `spec.configuration.shard.applyConfig` specifies the new configuration that will be merged in the existing secret for shard nodes. - `spec.configuration.configServer.applyConfig` specifies the new configuration that will be merged in the existing secret for configServer nodes. - `spec.configuration.mongos.applyConfig` specifies the new configuration that will be merged in the existing secret for mongos nodes. -- `spec.customConfig.arbiter.configSecret.name` could also be specified with a config-secret. +- `spec.customConfig.arbiter.configuration.secretName` could also be specified with a config-secret. - Have a look [here](/docs/guides/mongodb/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. > **Note:** If you don't want to reconfigure all the components together, you can only specify the components (shard, configServer and mongos) that you want to reconfigure. diff --git a/docs/guides/mongodb/reconfigure/sharding.md.bak b/docs/guides/mongodb/reconfigure/sharding.md.bak new file mode 100644 index 0000000000..03bd2eabe8 --- /dev/null +++ b/docs/guides/mongodb/reconfigure/sharding.md.bak @@ -0,0 +1,571 @@ +--- +title: Reconfigure MongoDB Sharded Cluster +menu: + docs_{{ .version }}: + identifier: mg-reconfigure-shard + name: Sharding + parent: mg-reconfigure + weight: 40 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure MongoDB Shard + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a MongoDB shard. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [MongoDB](/docs/guides/mongodb/concepts/mongodb.md) + - [Sharding](/docs/guides/mongodb/clustering/sharding.md) + - [MongoDBOpsRequest](/docs/guides/mongodb/concepts/opsrequest.md) + - [Reconfigure Overview](/docs/guides/mongodb/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/mongodb](/docs/examples/mongodb) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `MongoDB` sharded database using a supported version by `KubeDB` operator. Then we are going to apply `MongoDBOpsRequest` to reconfigure its configuration. + +### Prepare MongoDB Shard + +Now, we are going to deploy a `MongoDB` sharded database with version `4.4.26`. + +### Deploy MongoDB database + +At first, we will create `mongod.conf` file containing required configuration settings. + +```ini +$ cat mongod.conf +net: + maxIncomingConnections: 10000 +``` +Here, `maxIncomingConnections` is set to `10000`, whereas the default value is `65536`. + +Now, we will create a secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo mg-custom-config --from-file=./mongod.conf +secret/mg-custom-config created +``` + +In this section, we are going to create a MongoDB object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `MongoDB` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: MongoDB +metadata: + name: mg-sharding + namespace: demo +spec: + version: 4.4.26 + shardTopology: + configServer: + replicas: 3 + configuration: + secretName: mg-custom-config + storage: + resources: + requests: + storage: 1Gi + storageClassName: standard + mongos: + replicas: 2 + configuration: + secretName: mg-custom-config + shard: + replicas: 3 + shards: 2 + configuration: + secretName: mg-custom-config + storage: + resources: + requests: + storage: 1Gi + storageClassName: standard +``` + +Let's create the `MongoDB` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mongodb/reconfigure/mg-shard-config.yaml +mongodb.kubedb.com/mg-sharding created +``` + +Now, wait until `mg-sharding` has status `Ready`. i.e, + +```bash +$ kubectl get mg -n demo +NAME VERSION STATUS AGE +mg-sharding 4.4.26 Ready 3m23s +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +First we need to get the username and password to connect to a mongodb instance, +```bash +$ kubectl get secrets -n demo mg-sharding-auth -o jsonpath='{.data.\username}' | base64 -d +root + +$ kubectl get secrets -n demo mg-sharding-auth -o jsonpath='{.data.\password}' | base64 -d +Dv8F55zVNiEkhHM6 +``` + +Now let's connect to a mongodb instance from each type of nodes and run a mongodb internal command to check the configuration we have provided. + +```bash +$ kubectl exec -n demo mg-sharding-mongos-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet +{ + "bindIp" : "*", + "ipv6" : true, + "maxIncomingConnections" : 10000, + "port" : 27017, + "tls" : { + "mode" : "disabled" + } +} + +$ kubectl exec -n demo mg-sharding-configsvr-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet +{ + "bindIp" : "*", + "ipv6" : true, + "maxIncomingConnections" : 10000, + "port" : 27017, + "tls" : { + "mode" : "disabled" + } +} + +$ kubectl exec -n demo mg-sharding-shard0-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet +{ + "bindIp" : "*", + "ipv6" : true, + "maxIncomingConnections" : 10000, + "port" : 27017, + "tls" : { + "mode" : "disabled" + } +} +``` + +As we can see from the configuration of ready mongodb, the value of `maxIncomingConnections` has been set to `10000` in all nodes. + +### Reconfigure using new secret + +Now we will reconfigure this database to set `maxIncomingConnections` to `20000`. + +Now, we will edit the `mongod.conf` file containing required configuration settings. + +```ini +$ cat mongod.conf +net: + maxIncomingConnections: 20000 +``` + +Then, we will create a new secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo new-custom-config --from-file=./mongod.conf +secret/new-custom-config created +``` + +#### Create MongoDBOpsRequest + +Now, we will use this secret to replace the previous secret using a `MongoDBOpsRequest` CR. The `MongoDBOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-reconfigure-shard + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: mg-sharding + configuration: + shard: + configuration: + secretName: new-custom-config + configServer: + configuration: + secretName: new-custom-config + mongos: + configuration: + secretName: new-custom-config + readinessCriteria: + oplogMaxLagSeconds: 20 + objectsCountDiffPercentage: 10 + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-shard` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.shard.configuration.secretName` specifies the name of the new secret for shard nodes. +- `spec.configuration.configServer.configuration.secretName` specifies the name of the new secret for configServer nodes. +- `spec.configuration.mongos.configuration.secretName` specifies the name of the new secret for mongos nodes. +- `spec.customConfig.arbiter.configuration.secretName` could also be specified with a config-secret. +- Have a look [here](/docs/guides/mongodb/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. + +> **Note:** If you don't want to reconfigure all the components together, you can only specify the components (shard, configServer and mongos) that you want to reconfigure. + +Let's create the `MongoDBOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mongodb/reconfigure/mops-reconfigure-shard.yaml +mongodbopsrequest.ops.kubedb.com/mops-reconfigure-shard created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `MongoDB` object. + +Let's wait for `MongoDBOpsRequest` to be `Successful`. Run the following command to watch `MongoDBOpsRequest` CR, + +```bash +$ watch kubectl get mongodbopsrequest -n demo +Every 2.0s: kubectl get mongodbopsrequest -n demo +NAME TYPE STATUS AGE +mops-reconfigure-shard Reconfigure Successful 3m8s +``` + +We can see from the above output that the `MongoDBOpsRequest` has succeeded. If we describe the `MongoDBOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe mongodbopsrequest -n demo mops-reconfigure-shard + +``` + +Now let's connect to a mongodb instance from each type of nodes and run a mongodb internal command to check the new configuration we have provided. + +```bash +$ kubectl exec -n demo mg-sharding-mongos-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet + { + "bindIp" : "0.0.0.0", + "maxIncomingConnections" : 20000, + "port" : 27017, + "ssl" : { + "mode" : "disabled" + } + } + +$ kubectl exec -n demo mg-sharding-configsvr-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet + { + "bindIp" : "0.0.0.0", + "maxIncomingConnections" : 20000, + "port" : 27017, + "ssl" : { + "mode" : "disabled" + } + } + +$ kubectl exec -n demo mg-sharding-shard0-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet + { + "bindIp" : "0.0.0.0", + "maxIncomingConnections" : 20000, + "port" : 27017, + "ssl" : { + "mode" : "disabled" + } + } +``` + +As we can see from the configuration of ready mongodb, the value of `maxIncomingConnections` has been changed from `10000` to `20000` in all type of nodes. So the reconfiguration of the database is successful. + +### Reconfigure using apply config + +Now we will reconfigure this database again to set `maxIncomingConnections` to `30000`. This time we won't use a new secret. We will use the `applyConfig` field of the `MongoDBOpsRequest`. This will merge the new config in the existing secret. + +#### Create MongoDBOpsRequest + +Now, we will use the new configuration in the `data` field in the `MongoDBOpsRequest` CR. The `MongoDBOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-reconfigure-apply-shard + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: mg-sharding + configuration: + shard: + applyConfig: + mongod.conf: |- + net: + maxIncomingConnections: 30000 + configServer: + applyConfig: + mongod.conf: |- + net: + maxIncomingConnections: 30000 + mongos: + applyConfig: + mongod.conf: |- + net: + maxIncomingConnections: 30000 + readinessCriteria: + oplogMaxLagSeconds: 20 + objectsCountDiffPercentage: 10 + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-apply-shard` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.shard.applyConfig` specifies the new configuration that will be merged in the existing secret for shard nodes. +- `spec.configuration.configServer.applyConfig` specifies the new configuration that will be merged in the existing secret for configServer nodes. +- `spec.configuration.mongos.applyConfig` specifies the new configuration that will be merged in the existing secret for mongos nodes. +- `spec.customConfig.arbiter.configuration.secretName` could also be specified with a config-secret. +- Have a look [here](/docs/guides/mongodb/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. + +> **Note:** If you don't want to reconfigure all the components together, you can only specify the components (shard, configServer and mongos) that you want to reconfigure. + +Let's create the `MongoDBOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mongodb/reconfigure/mops-reconfigure-apply-shard.yaml +mongodbopsrequest.ops.kubedb.com/mops-reconfigure-apply-shard created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `MongoDBOpsRequest` to be `Successful`. Run the following command to watch `MongoDBOpsRequest` CR, + +```bash +$ watch kubectl get mongodbopsrequest -n demo +Every 2.0s: kubectl get mongodbopsrequest -n demo +NAME TYPE STATUS AGE +mops-reconfigure-apply-shard Reconfigure Successful 3m24s +``` + +We can see from the above output that the `MongoDBOpsRequest` has succeeded. If we describe the `MongoDBOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe mongodbopsrequest -n demo mops-reconfigure-apply-shard +Name: mops-reconfigure-apply-shard +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: MongoDBOpsRequest +Metadata: + Creation Timestamp: 2021-03-02T13:08:25Z + Generation: 1 + Managed Fields: + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:apply: + f:configuration: + .: + f:configServer: + .: + f:configSecret: + .: + f:name: + f:mongos: + .: + f:configSecret: + .: + f:name: + f:shard: + .: + f:configSecret: + .: + f:name: + f:databaseRef: + .: + f:name: + f:readinessCriteria: + .: + f:objectsCountDiffPercentage: + f:oplogMaxLagSeconds: + f:timeout: + f:type: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2021-03-02T13:08:25Z + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:spec: + f:configuration: + f:configServer: + f:podTemplate: + .: + f:controller: + f:metadata: + f:spec: + .: + f:resources: + f:mongos: + f:podTemplate: + .: + f:controller: + f:metadata: + f:spec: + .: + f:resources: + f:shard: + f:podTemplate: + .: + f:controller: + f:metadata: + f:spec: + .: + f:resources: + f:status: + .: + f:conditions: + f:observedGeneration: + f:phase: + Manager: kubedb-enterprise + Operation: Update + Time: 2021-03-02T13:08:25Z + Resource Version: 103635 + Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/mongodbopsrequests/mops-reconfigure-apply-shard + UID: ab454bcb-164c-4fa2-9eaa-dd47c60fe874 +Spec: + Apply: IfReady + Configuration: + Config Server: + Apply Config: net: + maxIncomingConnections: 30000 + + Mongos: + Apply Config: net: + maxIncomingConnections: 30000 + + Shard: + Apply Config: net: + maxIncomingConnections: 30000 + + Database Ref: + Name: mg-sharding + Readiness Criteria: + Objects Count Diff Percentage: 10 + Oplog Max Lag Seconds: 20 + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2021-03-02T13:08:25Z + Message: MongoDB ops request is reconfiguring database + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2021-03-02T13:10:10Z + Message: Successfully Reconfigured MongoDB + Observed Generation: 1 + Reason: ReconfigureConfigServer + Status: True + Type: ReconfigureConfigServer + Last Transition Time: 2021-03-02T13:13:15Z + Message: Successfully Reconfigured MongoDB + Observed Generation: 1 + Reason: ReconfigureShard + Status: True + Type: ReconfigureShard + Last Transition Time: 2021-03-02T13:14:10Z + Message: Successfully Reconfigured MongoDB + Observed Generation: 1 + Reason: ReconfigureMongos + Status: True + Type: ReconfigureMongos + Last Transition Time: 2021-03-02T13:14:10Z + Message: Successfully completed the modification process. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 13m KubeDB Ops-manager operator Pausing MongoDB demo/mg-sharding + Normal PauseDatabase 13m KubeDB Ops-manager operator Successfully paused MongoDB demo/mg-sharding + Normal ReconfigureConfigServer 12m KubeDB Ops-manager operator Successfully Reconfigured MongoDB + Normal ReconfigureShard 9m7s KubeDB Ops-manager operator Successfully Reconfigured MongoDB + Normal ReconfigureMongos 8m12s KubeDB Ops-manager operator Successfully Reconfigured MongoDB + Normal ResumeDatabase 8m12s KubeDB Ops-manager operator Resuming MongoDB demo/mg-sharding + Normal ResumeDatabase 8m12s KubeDB Ops-manager operator Successfully resumed MongoDB demo/mg-sharding + Normal Successful 8m12s KubeDB Ops-manager operator Successfully Reconfigured Database +``` + +Now let's connect to a mongodb instance from each type of nodes and run a mongodb internal command to check the new configuration we have provided. + +```bash +$ kubectl exec -n demo mg-sharding-mongos-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet +{ + "bindIp" : "*", + "ipv6" : true, + "maxIncomingConnections" : 20000, + "port" : 27017, + "tls" : { + "mode" : "disabled" + } +} + +$ kubectl exec -n demo mg-sharding-configsvr-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet +{ + "bindIp" : "*", + "ipv6" : true, + "maxIncomingConnections" : 20000, + "port" : 27017, + "tls" : { + "mode" : "disabled" + } +} + +$ kubectl exec -n demo mg-sharding-shard0-0 -- mongo admin -u root -p Dv8F55zVNiEkhHM6 --eval "db._adminCommand( {getCmdLineOpts: 1}).parsed.net" --quiet +{ + "bindIp" : "*", + "ipv6" : true, + "maxIncomingConnections" : 20000, + "port" : 27017, + "tls" : { + "mode" : "disabled" + } +} +``` + +As we can see from the configuration of ready mongodb, the value of `maxIncomingConnections` has been changed from `20000` to `30000` in all nodes. So the reconfiguration of the database using the data field is successful. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete mg -n demo mg-sharding +kubectl delete mongodbopsrequest -n demo mops-reconfigure-shard mops-reconfigure-apply-shard +``` \ No newline at end of file diff --git a/docs/guides/mongodb/reconfigure/standalone.md b/docs/guides/mongodb/reconfigure/standalone.md index 36ca7f654c..131b2c2acc 100644 --- a/docs/guides/mongodb/reconfigure/standalone.md +++ b/docs/guides/mongodb/reconfigure/standalone.md @@ -60,7 +60,7 @@ $ kubectl create secret generic -n demo mg-custom-config --from-file=./mongod.co secret/mg-custom-config created ``` -In this section, we are going to create a MongoDB object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `MongoDB` CR that we are going to create, +In this section, we are going to create a MongoDB object specifying `spec.configuration` field to apply this custom configuration. Below is the YAML of the `MongoDB` CR that we are going to create, ```yaml apiVersion: kubedb.com/v1 @@ -78,8 +78,8 @@ spec: resources: requests: storage: 1Gi - configSecret: - name: mg-custom-config + configuration: + secretName: mg-custom-config ``` Let's create the `MongoDB` CR we have shown above, @@ -182,8 +182,8 @@ spec: name: mg-standalone configuration: standalone: - configSecret: - name: new-custom-config + configuration: + secretName: new-custom-config readinessCriteria: oplogMaxLagSeconds: 20 objectsCountDiffPercentage: 10 @@ -195,7 +195,7 @@ Here, - `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-standalone` database. - `spec.type` specifies that we are performing `Reconfigure` on our database. -- `spec.configuration.standalone.configSecret.name` specifies the name of the new secret. +- `spec.configuration.standalone.configuration.secretName` specifies the name of the new secret. - Have a look [here](/docs/guides/mongodb/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. Let's create the `MongoDBOpsRequest` CR we have shown above, diff --git a/docs/guides/mongodb/reconfigure/standalone.md.bak b/docs/guides/mongodb/reconfigure/standalone.md.bak new file mode 100644 index 0000000000..0d8a1dfe2a --- /dev/null +++ b/docs/guides/mongodb/reconfigure/standalone.md.bak @@ -0,0 +1,590 @@ +--- +title: Reconfigure Standalone MongoDB Database +menu: + docs_{{ .version }}: + identifier: mg-reconfigure-standalone + name: Standalone + parent: mg-reconfigure + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure MongoDB Standalone Database + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a MongoDB standalone database. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [MongoDB](/docs/guides/mongodb/concepts/mongodb.md) + - [MongoDBOpsRequest](/docs/guides/mongodb/concepts/opsrequest.md) + - [Reconfigure Overview](/docs/guides/mongodb/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/mongodb](/docs/examples/mongodb) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `MongoDB` standalone using a supported version by `KubeDB` operator. Then we are going to apply `MongoDBOpsRequest` to reconfigure its configuration. + +### Prepare MongoDB Standalone Database + +Now, we are going to deploy a `MongoDB` standalone database with version `4.4.26`. + +### Deploy MongoDB standalone + +At first, we will create `mongod.conf` file containing required configuration settings. + +```ini +$ cat mongod.conf +net: + maxIncomingConnections: 10000 +``` +Here, `maxIncomingConnections` is set to `10000`, whereas the default value is `65536`. + +Now, we will create a secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo mg-custom-config --from-file=./mongod.conf +secret/mg-custom-config created +``` + +In this section, we are going to create a MongoDB object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `MongoDB` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: MongoDB +metadata: + name: mg-standalone + namespace: demo +spec: + version: "4.4.26" + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + configuration: + secretName: mg-custom-config +``` + +Let's create the `MongoDB` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mongodb/reconfigure/mg-standalone-config.yaml +mongodb.kubedb.com/mg-standalone created +``` + +Now, wait until `mg-standalone` has status `Ready`. i.e, + +```bash +$ kubectl get mg -n demo +NAME VERSION STATUS AGE +mg-standalone 4.4.26 Ready 23s +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +First we need to get the username and password to connect to a mongodb instance, +```bash +$ kubectl get secrets -n demo mg-standalone-auth -o jsonpath='{.data.\username}' | base64 -d +root + +$ kubectl get secrets -n demo mg-standalone-auth -o jsonpath='{.data.\password}' | base64 -d +m6lXjZugrC4VEpB8 +``` + +Now let's connect to a mongodb instance and run a mongodb internal command to check the configuration we have provided. + +```bash +$ kubectl exec -n demo mg-standalone-0 -- mongo admin -u root -p m6lXjZugrC4VEpB8 --eval "db._adminCommand( {getCmdLineOpts: 1})" --quiet +{ + "argv" : [ + "mongod", + "--dbpath=/data/db", + "--auth", + "--ipv6", + "--bind_ip_all", + "--port=27017", + "--tlsMode=disabled", + "--config=/data/configdb/mongod.conf" + ], + "parsed" : { + "config" : "/data/configdb/mongod.conf", + "net" : { + "bindIp" : "*", + "ipv6" : true, + "maxIncomingConnections" : 10000, + "port" : 27017, + "tls" : { + "mode" : "disabled" + } + }, + "security" : { + "authorization" : "enabled" + }, + "storage" : { + "dbPath" : "/data/db" + } + }, + "ok" : 1 +} +``` + +As we can see from the configuration of running mongodb, the value of `maxIncomingConnections` has been set to `10000`. + +### Reconfigure using new secret + +Now we will reconfigure this database to set `maxIncomingConnections` to `20000`. + +Now, we will edit the `mongod.conf` file containing required configuration settings. + +```ini +$ cat mongod.conf +net: + maxIncomingConnections: 20000 +``` + +Then, we will create a new secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo new-custom-config --from-file=./mongod.conf +secret/new-custom-config created +``` + +#### Create MongoDBOpsRequest + +Now, we will use this secret to replace the previous secret using a `MongoDBOpsRequest` CR. The `MongoDBOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-reconfigure-standalone + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: mg-standalone + configuration: + standalone: + configuration: + secretName: new-custom-config + readinessCriteria: + oplogMaxLagSeconds: 20 + objectsCountDiffPercentage: 10 + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-standalone` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.standalone.configuration.secretName` specifies the name of the new secret. +- Have a look [here](/docs/guides/mongodb/concepts/opsrequest.md#specreadinesscriteria) on the respective sections to understand the `readinessCriteria`, `timeout` & `apply` fields. + +Let's create the `MongoDBOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mongodb/reconfigure/mops-reconfigure-standalone.yaml +mongodbopsrequest.ops.kubedb.com/mops-reconfigure-standalone created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `MongoDB` object. + +Let's wait for `MongoDBOpsRequest` to be `Successful`. Run the following command to watch `MongoDBOpsRequest` CR, + +```bash +$ watch kubectl get mongodbopsrequest -n demo +Every 2.0s: kubectl get mongodbopsrequest -n demo +NAME TYPE STATUS AGE +mops-reconfigure-standalone Reconfigure Successful 10m +``` + +We can see from the above output that the `MongoDBOpsRequest` has succeeded. If we describe the `MongoDBOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe mongodbopsrequest -n demo mops-reconfigure-standalone +Name: mops-reconfigure-standalone +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: MongoDBOpsRequest +Metadata: + Creation Timestamp: 2021-03-02T15:04:45Z + Generation: 1 + Managed Fields: + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:apply: + f:configuration: + .: + f:standalone: + .: + f:configSecret: + .: + f:name: + f:databaseRef: + .: + f:name: + f:readinessCriteria: + .: + f:objectsCountDiffPercentage: + f:oplogMaxLagSeconds: + f:timeout: + f:type: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2021-03-02T15:04:45Z + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:spec: + f:configuration: + f:standalone: + f:podTemplate: + .: + f:controller: + f:metadata: + f:spec: + .: + f:resources: + f:status: + .: + f:conditions: + f:observedGeneration: + f:phase: + Manager: kubedb-enterprise + Operation: Update + Time: 2021-03-02T15:04:45Z + Resource Version: 125826 + Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/mongodbopsrequests/mops-reconfigure-standalone + UID: f63bb606-9df5-4516-9901-97dfe5b46b15 +Spec: + Apply: IfReady + Configuration: + Standalone: + Config Secret: + Name: new-custom-config + Database Ref: + Name: mg-standalone + Readiness Criteria: + Objects Count Diff Percentage: 10 + Oplog Max Lag Seconds: 20 + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2021-03-02T15:04:45Z + Message: MongoDB ops request is reconfiguring database + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2021-03-02T15:05:10Z + Message: Successfully Reconfigured MongoDB + Observed Generation: 1 + Reason: ReconfigureStandalone + Status: True + Type: ReconfigureStandalone + Last Transition Time: 2021-03-02T15:05:10Z + Message: Successfully completed the modification process. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 60s KubeDB Ops-manager operator Pausing MongoDB demo/mg-standalone + Normal PauseDatabase 60s KubeDB Ops-manager operator Successfully paused MongoDB demo/mg-standalone + Normal ReconfigureStandalone 35s KubeDB Ops-manager operator Successfully Reconfigured MongoDB + Normal ResumeDatabase 35s KubeDB Ops-manager operator Resuming MongoDB demo/mg-standalone + Normal ResumeDatabase 35s KubeDB Ops-manager operator Successfully resumed MongoDB demo/mg-standalone + Normal Successful 35s KubeDB Ops-manager operator Successfully Reconfigured Database +``` + +Now let's connect to a mongodb instance and run a mongodb internal command to check the new configuration we have provided. + +```bash +$ kubectl exec -n demo mg-standalone-0 -- mongo admin -u root -p m6lXjZugrC4VEpB8 --eval "db._adminCommand( {getCmdLineOpts: 1})" --quiet +{ + "argv" : [ + "mongod", + "--dbpath=/data/db", + "--auth", + "--ipv6", + "--bind_ip_all", + "--port=27017", + "--tlsMode=disabled", + "--config=/data/configdb/mongod.conf" + ], + "parsed" : { + "config" : "/data/configdb/mongod.conf", + "net" : { + "bindIp" : "*", + "ipv6" : true, + "maxIncomingConnections" : 20000, + "port" : 27017, + "tls" : { + "mode" : "disabled" + } + }, + "security" : { + "authorization" : "enabled" + }, + "storage" : { + "dbPath" : "/data/db" + } + }, + "ok" : 1 +} +``` + +As we can see from the configuration of running mongodb, the value of `maxIncomingConnections` has been changed from `10000` to `20000`. So the reconfiguration of the database is successful. + + +### Reconfigure using apply config + +Now we will reconfigure this database again to set `maxIncomingConnections` to `30000`. This time we won't use a new secret. We will use the `applyConfig` field of the `MongoDBOpsRequest`. This will merge the new config in the existing secret. + +#### Create MongoDBOpsRequest + +Now, we will use the new configuration in the `data` field in the `MongoDBOpsRequest` CR. The `MongoDBOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MongoDBOpsRequest +metadata: + name: mops-reconfigure-apply-standalone + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: mg-standalone + configuration: + standalone: + applyConfig: + mongod.conf: |- + net: + maxIncomingConnections: 30000 + readinessCriteria: + oplogMaxLagSeconds: 20 + objectsCountDiffPercentage: 10 + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `mops-reconfigure-apply-standalone` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.standalone.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `MongoDBOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mongodb/reconfigure/mops-reconfigure-apply-standalone.yaml +mongodbopsrequest.ops.kubedb.com/mops-reconfigure-apply-standalone created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `MongoDBOpsRequest` to be `Successful`. Run the following command to watch `MongoDBOpsRequest` CR, + +```bash +$ watch kubectl get mongodbopsrequest -n demo +Every 2.0s: kubectl get mongodbopsrequest -n demo +NAME TYPE STATUS AGE +mops-reconfigure-apply-standalone Reconfigure Successful 38s +``` + +We can see from the above output that the `MongoDBOpsRequest` has succeeded. If we describe the `MongoDBOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe mongodbopsrequest -n demo mops-reconfigure-apply-standalone +Name: mops-reconfigure-apply-standalone +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: MongoDBOpsRequest +Metadata: + Creation Timestamp: 2021-03-02T15:09:12Z + Generation: 1 + Managed Fields: + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:apply: + f:configuration: + .: + f:standalone: + .: + f:applyConfig: + f:databaseRef: + .: + f:name: + f:readinessCriteria: + .: + f:objectsCountDiffPercentage: + f:oplogMaxLagSeconds: + f:timeout: + f:type: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2021-03-02T15:09:12Z + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:spec: + f:configuration: + f:standalone: + f:podTemplate: + .: + f:controller: + f:metadata: + f:spec: + .: + f:resources: + f:status: + .: + f:conditions: + f:observedGeneration: + f:phase: + Manager: kubedb-enterprise + Operation: Update + Time: 2021-03-02T15:09:13Z + Resource Version: 126782 + Self Link: /apis/ops.kubedb.com/v1alpha1/namespaces/demo/mongodbopsrequests/mops-reconfigure-apply-standalone + UID: 33eea32f-e2af-4e36-b612-c528549e3d65 +Spec: + Apply: IfReady + Configuration: + Standalone: + Apply Config: net: + maxIncomingConnections: 30000 + + Database Ref: + Name: mg-standalone + Readiness Criteria: + Objects Count Diff Percentage: 10 + Oplog Max Lag Seconds: 20 + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2021-03-02T15:09:13Z + Message: MongoDB ops request is reconfiguring database + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2021-03-02T15:09:38Z + Message: Successfully Reconfigured MongoDB + Observed Generation: 1 + Reason: ReconfigureStandalone + Status: True + Type: ReconfigureStandalone + Last Transition Time: 2021-03-02T15:09:38Z + Message: Successfully completed the modification process. + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal PauseDatabase 118s KubeDB Ops-manager operator Pausing MongoDB demo/mg-standalone + Normal PauseDatabase 118s KubeDB Ops-manager operator Successfully paused MongoDB demo/mg-standalone + Normal ReconfigureStandalone 93s KubeDB Ops-manager operator Successfully Reconfigured MongoDB + Normal ResumeDatabase 93s KubeDB Ops-manager operator Resuming MongoDB demo/mg-standalone + Normal ResumeDatabase 93s KubeDB Ops-manager operator Successfully resumed MongoDB demo/mg-standalone + Normal Successful 93s KubeDB Ops-manager operator Successfully Reconfigured Database +``` + +Now let's connect to a mongodb instance and run a mongodb internal command to check the new configuration we have provided. + +```bash +$ kubectl exec -n demo mg-standalone-0 -- mongo admin -u root -p m6lXjZugrC4VEpB8 --eval "db._adminCommand( {getCmdLineOpts: 1})" --quiet +{ + "argv" : [ + "mongod", + "--dbpath=/data/db", + "--auth", + "--ipv6", + "--bind_ip_all", + "--port=27017", + "--tlsMode=disabled", + "--config=/data/configdb/mongod.conf" + ], + "parsed" : { + "config" : "/data/configdb/mongod.conf", + "net" : { + "bindIp" : "*", + "ipv6" : true, + "maxIncomingConnections" : 30000, + "port" : 27017, + "tls" : { + "mode" : "disabled" + } + }, + "security" : { + "authorization" : "enabled" + }, + "storage" : { + "dbPath" : "/data/db" + } + }, + "ok" : 1 +} +``` + +As we can see from the configuration of running mongodb, the value of `maxIncomingConnections` has been changed from `20000` to `30000`. So the reconfiguration of the database using the `applyConfig` field is successful. + + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete mg -n demo mg-standalone +kubectl delete mongodbopsrequest -n demo mops-reconfigure-standalone mops-reconfigure-apply-standalone +``` \ No newline at end of file diff --git a/docs/guides/mongodb/vault-integration/kmip-encryption/examples/mg.yaml b/docs/guides/mongodb/vault-integration/kmip-encryption/examples/mg.yaml index 110b046aca..ebfbbe66e1 100644 --- a/docs/guides/mongodb/vault-integration/kmip-encryption/examples/mg.yaml +++ b/docs/guides/mongodb/vault-integration/kmip-encryption/examples/mg.yaml @@ -24,5 +24,5 @@ spec: storageType: Durable deletionPolicy: WipeOut version: "percona-5.0.23" - configSecret: - name: mg-configuration \ No newline at end of file + configuration: + secretName: mg-configuration \ No newline at end of file diff --git a/docs/guides/mongodb/vault-integration/kmip-encryption/examples/mg.yaml.bak b/docs/guides/mongodb/vault-integration/kmip-encryption/examples/mg.yaml.bak new file mode 100644 index 0000000000..110b046aca --- /dev/null +++ b/docs/guides/mongodb/vault-integration/kmip-encryption/examples/mg.yaml.bak @@ -0,0 +1,28 @@ +apiVersion: kubedb.com/v1 +kind: MongoDB +metadata: + name: mg-kmip + namespace: demo +spec: + podTemplate: + spec: + containers: + - name: "mongodb" + volumeMounts: + - name: certs + mountPath: /etc/certs + volumes: + - name: certs + secret: + secretName: vault-tls-secret + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageType: Durable + deletionPolicy: WipeOut + version: "percona-5.0.23" + configSecret: + name: mg-configuration \ No newline at end of file diff --git a/docs/guides/mongodb/vault-integration/kmip-encryption/index.md b/docs/guides/mongodb/vault-integration/kmip-encryption/index.md index 800cb1c4e6..08feea2843 100644 --- a/docs/guides/mongodb/vault-integration/kmip-encryption/index.md +++ b/docs/guides/mongodb/vault-integration/kmip-encryption/index.md @@ -173,8 +173,8 @@ spec: storageType: Durable deletionPolicy: WipeOut version: "percona-5.0.23" - configSecret: - name: mg-configuration + configuration: + secretName: mg-configuration ``` ```bash diff --git a/docs/guides/mongodb/vault-integration/kmip-encryption/index.md.bak b/docs/guides/mongodb/vault-integration/kmip-encryption/index.md.bak new file mode 100644 index 0000000000..800cb1c4e6 --- /dev/null +++ b/docs/guides/mongodb/vault-integration/kmip-encryption/index.md.bak @@ -0,0 +1,297 @@ +--- +title: Encryption with Vault KMIP +menu: + docs_{{ .version }}: + identifier: guides-mongodb-integration-with-vault-kmip + name: Encryption with Vault KMIP + parent: guides-mongodb-integration-with-vault + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +# Encrypt data in KubeDB MongoDB with Hashicorp Vault KMIP Secret Engine + +MongoDB uses [data encryption at rest](https://www.mongodb.com/docs/manual/tutorial/configure-encryption/) to protect sensitive data from unauthorized access and meet regulatory compliance. +Encryption safeguards data at rest and in transit, reducing the risk of breaches. + +[KMIP](https://en.wikipedia.org/wiki/Key_Management_Interoperability_Protocol) is chosen for its standardized approach to encryption key management, allowing secure generation, storage, and rotation of keys across various platforms. +It ensures interoperability and strengthens overall data security. + +[HashiCorp Vault KMIP secret engine](https://developer.hashicorp.com/vault/docs/secrets/kmip) is a powerful solution for managing encryption keys. +It offers automated key rotation, fine-grained access controls, and audit logging, making it a scalable and secure choice for MongoDB's encryption needs. + +To demonstrate how to configure KubeDB MongoDB with [HashiCorp Vault KMIP secret engine](https://developer.hashicorp.com/vault/docs/secrets/kmip) for encryption, you can follow this step-by-step example. This documentation will guide you through setting up Vault, configuring the KMIP secret engine, and then configuring KubeDB to use it for MongoDB data encryption. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install KubeDB in your cluster following the steps [here](/docs/setup/README.md). + +- Install HashiCorp Vault CLI. You can follow this procedure [here](https://developer.hashicorp.com/vault/install) + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. Run the following command to prepare your cluster for this tutorial: + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +### Setup Hashicorp Vault KMIP secret engine + +User can setup Vault KMIP secret engine with [Vault Enterprise](https://developer.hashicorp.com/vault/tutorials/adp/kmip-engine?variants=vault-deploy%3Aenterprise) or [HCP Vault Dedicated](https://developer.hashicorp.com/vault/tutorials/adp/kmip-engine?variants=vault-deploy%3Ahcp). +For this demo we will use [Hashicorp Cloud Provider(HCP)](https://portal.cloud.hashicorp.com/) Vault Dedicated. + +So First we created a `Vault Plus` cluster in HCP. Then we need to configure Vault KMIP according to [this](https://developer.hashicorp.com/vault/tutorials/adp/kmip-engine?variants=vault-deploy%3Ahcp) documentation step by step. + +```bash +# setup vault environment +$ export VAULT_ADDR= +$ export VAULT_TOKEN= +$ export VAULT_NAMESPACE=admin + +# configure kmip secret engine +$ vault secrets enable kmip +Success! Enabled the kmip secrets engine at: kmip/ + +$ vault write kmip/config \ + listen_addrs=0.0.0.0:5696 \ + server_hostnames=$(echo ${VAULT_ADDR:8} | rev | cut -c6- | rev) +Success! Data written to: kmip/config + +# create scope +$ vault write -f kmip/scope/finance +Success! Data written to: kmip/scope/finance + +# create role +$ vault write kmip/scope/finance/role/accounting operation_all=true +Success! Data written to: kmip/scope/finance/role/accounting + +# store vault-ca.pem +$ vault read kmip/ca -format=json | jq -r '.data | .ca_pem' >> vault-ca.pem + +# generate and store client.pem +$ vault write -format=json \ + kmip/scope/finance/role/accounting/credential/generate \ + format=pem > credential.json + +$ jq -r .data.certificate < credential.json > cert.pem + +$ jq -r .data.private_key < credential.json > key.pem + +$ cat cert.pem key.pem > client.pem +``` +We will use this `client.pem` and `vault-ca.pem` files to configure KMIP in MongoDB. + +### Create MongoDB configuration with KMIP + +Now we need to make a `mongod.conf` file to use it as configuration folder for our `MongoDB`. + +```bash +$ cat mongod.conf +security: + enableEncryption: true + kmip: + serverName: vault-cluster-doc-public-vault-a33bb761.37131dd1.z1.hashicorp.cloud + port: 5696 + clientCertificateFile: /etc/certs/client.pem + serverCAFile: /etc/certs/ca.pem +``` + +Here, +- `serverName` is the public address of our HCP Vault Plus cluster without port +- `port` is listen address of KMIP secret engine +- `clientCertificateFile` is path to the client pem file to make connection +- `serverCAFile` is path to the ca pem file to verify server. + +To set up configuration in `mongod.conf` file for KMIP encryption, you can look into [MongoDB official documentation](https://www.mongodb.com/docs/manual/tutorial/configure-encryption/#key-manager). + +Here `/etc/certs/client.pem` and `/etc/certs/ca.pem` will be mounted by secret in KubeDB MongoDB main `mongodb` container. + +Now, create the secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo mg-configuration --from-file=./mongod.conf +secret/mg-configuration created +``` + +Verify the secret has the configuration file. +```bash +$ kubectl get secret -n demo mg-configuration -o yaml +apiVersion: v1 +data: + mongod.conf: c2VjdXJpdHk6CiAgZW5hYmxlRW5jcnlwdGlvbjogdHJ1ZQogIGttaXA6CiAgICBzZXJ2ZXJOYW1lOiB2YXVsdC1jbHVzdGVyLWRvYy1wdWJsaWMtdmF1bHQtYTMzYmI3NjEuMzcxMzFkZDEuejEuaGFzaGljb3JwLmNsb3VkCiAgICBwb3J0OiA1Njk2CiAgICBjbGllbnRDZXJ0aWZpY2F0ZUZpbGU6IC9ldGMvY2VydHMvY2xpZW50LnBlbQogICAgc2VydmVyQ0FGaWxlOiAvZXRjL2NlcnRzL2NhLnBlbQ== +kind: Secret +metadata: + creationTimestamp: "2024-09-24T09:10:55Z" + name: mg-configuration + namespace: demo + resourceVersion: "322831" + uid: 005f0cac-6bbb-4fb6-a728-87b0ca55785a +type: Opaque +``` + +### Create MongoDB + +Before creating `MongoDB`, we need to create a secret with `client.pem` and `vault-ca.pem` to use as volume for our `MongoDB` +```bash +$ kubectl create secret generic vault-tls-secret -n demo \ + --from-file=client.pem=client.pem \ + --from-file=ca.pem=vault-ca.pem +secret/vault-tls-secret created +``` + +Now lets create KubeDB MongoDB. Currently, we have KMIP encryption support for `percona-4.2.24`,`percona-4.2.26`,`percona-5.0.23`,`percona-6.0.12` and `percona-7.0.4` version of KubeDB managed MongoDB. + +We will use mongodb version `percona-5.0.23` for our demo purpose. + +```yaml +apiVersion: kubedb.com/v1 +kind: MongoDB +metadata: + name: mg-kmip + namespace: demo +spec: + podTemplate: + spec: + containers: + - name: "mongodb" + volumeMounts: + - name: certs + mountPath: /etc/certs + volumes: + - name: certs + secret: + secretName: vault-tls-secret + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageType: Durable + deletionPolicy: WipeOut + version: "percona-5.0.23" + configSecret: + name: mg-configuration +``` + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guids/mongodb/vault-integration/kmip-enryption/examples/mg.yaml +mongodb.kubedb.com/mg-kmip created +``` + +Now, wait a few minutes. KubeDB operator will create necessary PVC, petset, services, secret etc. If everything goes well, we will see that a pod with the name `mg-kmip-0` has been created. + +Check that the petset's pod is running + +```bash +$ kubectl get pod -n demo mg-kmip-0 +NAME READY STATUS RESTARTS AGE +mg-kmip-0 1/1 Running 0 1m +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +To make sure that this `mg-kmip` MongoDB is KMIP encrypted, we can check the log of this `mg-kmip-0` pod + +```bash +kubectl logs -f --all-containers -n demo mg-kmip-0 +``` +We should see these logs which confirm that this `MongoDB` is setup with KMIP +```log +{"t":{"$date":"2024-09-24T09:26:13.551+00:00"},"s":"I", "c":"STORAGE", "id":29116, "ctx":"initandlisten","msg":"Master encryption key has been created on the key management facility","attr":{"keyManagementFacilityType":"KMIP server","keyIdentifier":{"kmipKeyIdentifier":"73ORm3aFQxGKZtJQ3196VXV5NmfT3AlG"}}} +{"t":{"$date":"2024-09-24T09:26:13.551+00:00"},"s":"I", "c":"STORAGE", "id":29037, "ctx":"initandlisten","msg":"Initializing KeyDB with wiredtiger_open config: {cfg}","attr":{"cfg":"create,config_base=false,extensions=[local=(entry=percona_encryption_extension_init,early_load=true,config=(cipher=AES256-CBC,rotation=false))],encryption=(name=percona,keyid=\"\"),log=(enabled,file_max=5MB),transaction_sync=(enabled=true,method=fsync),"}} +{"t":{"$date":"2024-09-24T09:26:13.799+00:00"},"s":"I", "c":"STORAGE", "id":29039, "ctx":"initandlisten","msg":"Encryption keys DB is initialized successfully"} +``` + + +Now, we can connect to this database through [mongo-shell](https://docs.mongodb.com/v4.2/mongo/). In this tutorial, we are connecting to the MongoDB server from inside the pod. + +```bash +$ kubectl get secrets -n demo mg-kmip-auth -o jsonpath='{.data.\username}' | base64 -d +root + +$ kubectl get secrets -n demo mg-kmip-auth -o jsonpath='{.data.\password}' | base64 -d +bJI!1H!)V7!2U.wJ + +$ kubectl exec -it mg-kmip-0 -n demo -- bash + +> mongo admin + +> db.auth("root","bJI!1H!)V7!2U.wJ") +1 + +> db._adminCommand( {getCmdLineOpts: 1}) +{ + "argv" : [ + "mongod", + "--dbpath=/data/db", + "--auth", + "--port=27017", + "--ipv6", + "--bind_ip=::,0.0.0.0", + "--tlsMode=disabled", + "-f", + "/data/configdb/mongod.conf" + ], + "parsed" : { + "config" : "/data/configdb/mongod.conf", + "net" : { + "bindIp" : "::,0.0.0.0", + "ipv6" : true, + "port" : 27017, + "tls" : { + "mode" : "disabled" + } + }, + "security" : { + "authorization" : "enabled", + "enableEncryption" : true, + "kmip" : { + "clientCertificateFile" : "/etc/certs/client.pem", + "port" : 5696, + "serverCAFile" : "/etc/certs/ca.pem", + "serverName" : "vault-cluster-doc-public-vault-a33bb761.37131dd1.z1.hashicorp.cloud" + } + }, + "storage" : { + "dbPath" : "/data/db" + } + }, + "ok" : 1 +} +> exit +bye +``` + +We can see that in `parsed.security` field, encryption is enabled. + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete -n demo mg/mg-kmip + +kubectl delete -n demo secret mg-configuration +kubectl delete -n demo secret vault-tls-secret + +kubectl delete ns demo +``` + +## Next Steps + +- [Backup and Restore](/docs/guides/mongodb/backup/kubestash/overview/index.md) MongoDB databases using KubeStash. +- Initialize [MongoDB with Script](/docs/guides/mongodb/initialization/using-script.md). +- Monitor your MongoDB database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/mongodb/monitoring/using-prometheus-operator.md). +- Monitor your MongoDB database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/mongodb/monitoring/using-builtin-prometheus.md). +- Use [private Docker registry](/docs/guides/mongodb/private-registry/using-private-registry.md) to deploy MongoDB with KubeDB. +- Use [kubedb cli](/docs/guides/mongodb/cli/cli.md) to manage databases like kubectl for Kubernetes. +- Detail concepts of [MongoDB object](/docs/guides/mongodb/concepts/mongodb.md). +- Detail concepts of [MongoDBVersion object](/docs/guides/mongodb/concepts/catalog.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). + + + diff --git a/docs/guides/mssqlserver/concepts/mssqlserver.md b/docs/guides/mssqlserver/concepts/mssqlserver.md index 9efdfcde89..27d01e64ee 100644 --- a/docs/guides/mssqlserver/concepts/mssqlserver.md +++ b/docs/guides/mssqlserver/concepts/mssqlserver.md @@ -254,9 +254,9 @@ To learn how to configure `spec.storage`, please visit the links below: MSSQLServer managed by KubeDB can be monitored with Prometheus operator out-of-the-box. -### spec.configSecret +### spec.configuration -`spec.configSecret` is an optional field that allows users to provide custom configuration for MSSQLServer. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). You can use Kubernetes supported volume source `secret`. +`spec.configuration` is an optional field that allows users to provide custom configuration for MSSQLServer. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). You can use Kubernetes supported volume source `secret`. ### spec.topology diff --git a/docs/guides/mssqlserver/concepts/mssqlserver.md.bak b/docs/guides/mssqlserver/concepts/mssqlserver.md.bak new file mode 100644 index 0000000000..9efdfcde89 --- /dev/null +++ b/docs/guides/mssqlserver/concepts/mssqlserver.md.bak @@ -0,0 +1,594 @@ +--- +title: MSSQLServer CRD +menu: + docs_{{ .version }}: + identifier: ms-concepts-mssqlserver + name: MSSQLServer + parent: ms-concepts + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# MSSQLServer + +## What is MSSQLServer + +`MSSQLServer` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for [Microsoft SQL Server](https://learn.microsoft.com/en-us/sql/sql-server/) in a Kubernetes native way. You only need to describe the desired database configuration in a MSSQLServer object, and the KubeDB operator will create Kubernetes objects in the desired state for you. + +## MSSQLServer Spec + +As with all other Kubernetes objects, a MSSQLServer needs `apiVersion`, `kind`, and `metadata` fields. It also needs a `.spec` section. + +Below is an example `MSSQLServer` object. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: MSSQLServer +metadata: + name: mssqlserver + namespace: demo +spec: + configuration: + secretName: mssql-custom-config + authSecret: + kind: Secret + name: mssql-admin-cred + topology: + availabilityGroup: + databases: + - agdb1 + - agdb2 + leaderElection: + electionTick: 10 + heartbeatTick: 1 + period: 300ms + transferLeadershipInterval: 1s + transferLeadershipTimeout: 1m0s + mode: AvailabilityGroup + podTemplate: + metadata: + annotations: + passMe: ToDatabasePod + controller: + annotations: + passMe: ToPetSet + spec: + serviceAccountName: my-custom-sa + schedulerName: my-scheduler + nodeSelector: + disktype: ssd + containers: + - name: mssql + resources: + limits: + memory: 4Gi + requests: + cpu: 500m + memory: 4Gi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + - name: mssql-coordinator + resources: + limits: + memory: 256Mi + requests: + cpu: 200m + memory: 256Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + - name: mssql-init + resources: + limits: + memory: 512Mi + requests: + cpu: 200m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + podPlacementPolicy: + name: default + securityContext: + fsGroup: 10001 + replicas: 3 + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard + storageType: Durable + serviceTemplates: + - alias: primary + metadata: + annotations: + passMe: ToService + spec: + type: LoadBalancer + tls: + certificates: + - alias: server + emailAddresses: + - dev@appscode.com + secretName: mssqlserver-server-cert + subject: + organizationalUnits: + - server + organizations: + - kubedb + - alias: client + emailAddresses: + - abc@appscode.com + secretName: mssqlserver-client-cert + subject: + organizationalUnits: + - client + organizations: + - kubedb + - alias: endpoint + secretName: mssqlserver-endpoint-cert + subject: + organizationalUnits: + - endpoint + organizations: + - kubedb + clientTLS: true + issuerRef: + apiGroup: cert-manager.io + kind: Issuer + name: mssqlserver-ca-issuer + healthChecker: + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 2 + disableWriteCheck: false + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + release: prometheus + interval: 10s + version: 2022-cu12 + deletionPolicy: Halt +``` + +### spec.version + +`spec.version` is a required field that specifies the name of the [MSSQLServerVersion](/docs/guides/mssqlserver/concepts/catalog.md) crd where the docker images are specified. Currently, when you install KubeDB, it creates the following `MSSQLServerVersion` resources, + +```bash +$ kubectl get msversion +NAME VERSION DB_IMAGE DEPRECATED AGE +2022-cu12 2022 mcr.microsoft.com/mssql/server:2022-CU12-ubuntu-22.04 2d +2022-cu14 2022 mcr.microsoft.com/mssql/server:2022-CU14-ubuntu-22.04 2d +``` +### spec.replicas + +`spec.replicas` specifies the total number of primary and secondary nodes in SQL Server Availability Group cluster configuration. One pod is selected as Primary and others act as secondary replicas. KubeDB uses `PodDisruptionBudget` to ensure that majority of the replicas are available during [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions). + +To learn more about how to set up a SQL Server Availability Group cluster (HA configuration) in KubeDB, please visit [here](/docs/guides/mssqlserver/clustering/ag_cluster.md). + +### spec.authSecret + +`spec.authSecret` is an optional field that points to a Secret used to hold credentials for `mssqlserver` database. If not set, KubeDB operator creates a new Secret with name `{mssqlserver-name}-auth` that hold _username_ and _password_ for `mssqlserver` database. + +If you want to use an existing or custom secret, please specify that when creating the MSSQLServer object using `spec.authSecret.name`. This Secret should contain superuser _username_ as `username` key and superuser _password_ as `password` key. Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator. + +Example: + +```bash +$ kubectl create secret generic mssqlserver-auth -n demo \ + --from-literal=username='sa' \ + --from-literal=password='Pa55w0rd!' +secret/mssqlserver-auth created +``` + +```bash +$ kubectl get secret -n demo mssqlserver-auth -oyaml +apiVersion: v1 +data: + password: UGE1NXcwcmQh + username: c2E= +kind: Secret +metadata: + creationTimestamp: "2024-10-10T06:47:06Z" + name: mssqlserver-auth + namespace: demo + resourceVersion: "315403" + uid: dafcce02-b6a2-4e65-bdd1-db6b9b6d4913 +type: Opaque +``` + +### spec.storageType + +`spec.storageType` is an optional field that specifies the type of storage to use for database. It can be either `Durable` or `Ephemeral`. The default value of this field is `Durable`. If `Ephemeral` is used then KubeDB will create MSSQLServer database using [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) volume. In this case, you don't have to specify `spec.storage` field. + +### spec.storage + +If you don't set `spec.storageType:` to `Ephemeral` then `spec.storage` field is required. This field specifies the StorageClass of PVCs dynamically allocated to store data for the database. This storage spec will be passed to the PetSet created by KubeDB operator to run database pods. You can specify any StorageClass available in your cluster with appropriate resource requests. + +- `spec.storage.storageClassName` is the name of the StorageClass used to provision PVCs. PVCs don’t necessarily have to request a class. A PVC with its storageClassName set equal to "" is always interpreted to be requesting a PV with no class, so it can only be bound to PVs with no class (no annotation or one set equal to ""). A PVC with no storageClassName is not quite the same and is treated differently by the cluster depending on whether the DefaultStorageClass admission plugin is turned on. +- `spec.storage.accessModes` uses the same conventions as Kubernetes PVCs when requesting storage with specific access modes. +- `spec.storage.resources` can be used to request specific quantities of storage. This follows the same resource model used by PVCs. + +To learn how to configure `spec.storage`, please visit the links below: + +- https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + +### spec.init + +`spec.init` is an optional section that can be used to initialize a newly created MSSQLServer database. MSSQLServer databases can be initialized from Snapshots. + +### spec.monitor + +MSSQLServer managed by KubeDB can be monitored with Prometheus operator out-of-the-box. + +### spec.configSecret + +`spec.configSecret` is an optional field that allows users to provide custom configuration for MSSQLServer. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). You can use Kubernetes supported volume source `secret`. + +### spec.topology + +The spec.topology field is the core of configuring your SQL Server cluster's architecture. It defines the operational mode, high-availability settings, and disaster recovery configurations of the SQL Server cluster. It defines how the cluster should behave, including the databases that should be included in the setup, and the leader election process for managing the primary-secondary roles. +```yaml +spec: + topology: + mode: DistributedAG + availabilityGroup: + # ... local AG settings ... + distributedAG: + # ... DAG settings ... +``` +#### spec.topology.mode + +The `spec.topology.mode` field determines the mode in which the SQL Server cluster operates. Currently, the supported mode is: + +`AvailabilityGroup`: Configures a standard SQL Server Always On Availability Group within a single Kubernetes cluster. This provides high availability and automatic failover for your databases. In this mode, the KubeDB operator sets up an Availability Group with one primary replica and multiple secondary replicas for high availability. The databases specified in `spec.topology.availabilityGroup.databases` are automatically created and added to the Availability Group. Users do not need to perform these tasks manually. + +`DistributedAG`: Configures a Distributed Availability Group. This mode links two separate AvailabilityGroup clusters, typically in different geographic locations or Kubernetes clusters, to provide a robust disaster recovery solution. + + + +#### spec.topology.availabilityGroup + +This section defines the configuration for the local SQL Server Availability Group (AG). It is required for both AvailabilityGroup and DistributedAG modes. It includes details about the databases to be added to the group and the leader election configurations. + +##### spec.topology.availabilityGroup.databases + +(string[]) An array of database names to be included in the Availability Group. KubeDB will automatically create these databases (if they don't exist) and add them to the AG during cluster initialization. For a DistributedAG in the Secondary role, this field must be empty, as databases will be seeded from the primary site. Users can modify this list later to add databases as needed. + +Example: + +```yaml +availabilityGroup: + databases: + - "sales_db" + - "inventory_db" +``` +In this example: agdb1 and agdb2 are added to the Availability Group upon cluster setup. + +##### spec.topology.availabilityGroup.secondaryAccessMode +(string) Controls how secondary replicas handle incoming connections. Default is Passive. +We have support for active and passive secondary replicas in Microsoft SQL Server Availability Groups, enabling cost-efficient deployments by supporting passive replicas that avoid licensing costs. + +Active/Passive Secondary Replicas: +The secondaryAccessMode field in the MSSQLServer CRD under spec.topology.availabilityGroup allows control over secondary replica connection modes: +- Passive: No client connections (default, ideal for DR or failover without licensing costs). +- ReadOnly: Accepts read-intent connections only. +- All: Allows all read-only connections. + +```yaml +spec: +topology: +availabilityGroup: +secondaryAccessMode: Passive | ReadOnly | All +``` + +T-SQL Mapping: +- Passive: `SECONDARY_ROLE (ALLOW_CONNECTIONS = NO)` +- ReadOnly: `SECONDARY_ROLE (ALLOW_CONNECTIONS = READ_ONLY)` +- All: `SECONDARY_ROLE (ALLOW_CONNECTIONS = ALL)` + + +### spec.topology.availabilityGroup.leaderElection + +There are five fields under MSSQLServer CRD's `spec.leaderElection`. These values define how fast the leader election can happen. + +- `Period`: This is the period between each invocation of `Node.Tick`. It represents the time base for election actions. Default is `100ms`. + +- `ElectionTick`: This is the number of `Node.Tick` invocations that must pass between elections. If a follower does not receive any message from the leader during this period, it becomes a candidate and starts an election. It is recommended to set `ElectionTick = 10 * HeartbeatTick` to prevent unnecessary leader switching. Default is `10`. + +- `HeartbeatTick`: This defines the interval between heartbeats sent by the leader to maintain its leadership. A leader sends heartbeat messages every `HeartbeatTick` ticks. Default is `1`. + +- `TransferLeadershipInterval`: This specifies retry interval to transfer leadership to the healthiest node. Default is `1s`. + +- `TransferLeadershipTimeout`: This specifies the retry timeout for transferring leadership to the healthiest node. Default is `60s`. + +You can increase the period and the electionTick if the system has high network latency. + + +### spec.topology.distributedAG +This section is required when spec.topology.mode is set to DistributedAG. It defines the configuration for the Distributed Availability Group. + +`spec.topology.distributedAG.self` +This object defines the configuration for the local Availability Group's participation in the DAG. +- role: (string) Specifies whether this local AG is the Primary or Secondary in the Distributed AG. +- url: (string) The listener endpoint URL of this local AG (e.g., a LoadBalancer IP and port). This URL must be reachable from the remote site. + +`spec.topology.distributedAG.remote` +This object defines the connection details for the remote Availability Group that this cluster will connect to. +- name: (string) The actual name of the Availability Group on the remote cluster. +- url: (string) The listener endpoint URL of the remote AG. This URL must be reachable from the SQL Server instances in the local cluster. + + +### spec.podTemplate + +KubeDB allows providing a template for database pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the PetSet created for MSSQLServer. + +KubeDB accept following fields to set in `spec.podTemplate:` + +- metadata + - annotations (pod's annotation) +- controller + - annotations (petset's annotation) +- spec: + - containers + - volumes + - podPlacementPolicy + - serviceAccountName + - initContainers + - imagePullSecrets + - nodeSelector + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + +You can check out the full list [here](https://github.com/kmodules/offshoot-api/blob/master/api/v2/types.go#L26C1-L279C1). +Uses of some field of `spec.podTemplate` is described below, + +#### spec.podTemplate.spec.tolerations + +The `spec.podTemplate.spec.tolerations` is an optional field. This can be used to specify the pod's tolerations. + +#### spec.podTemplate.spec.volumes + +The `spec.podTemplate.spec.volumes` is an optional field. This can be used to provide the list of volumes that can be mounted by containers belonging to the pod. + +#### spec.podTemplate.spec.podPlacementPolicy + +`spec.podTemplate.spec.podPlacementPolicy` is an optional field. This can be used to provide the reference of the `podPlacementPolicy`. `name` of the podPlacementPolicy is referred under this attribute. This will be used by our Petset controller to place the db pods throughout the region, zone & nodes according to the policy. It utilizes kubernetes affinity & podTopologySpreadContraints feature to do so. +```yaml +spec: + podPlacementPolicy: + name: default +``` + +#### spec.podTemplate.spec.containers + +The `spec.podTemplate.spec.containers` can be used to provide the list of containers and their configurations for to the database pod. some of the fields are described below, + +##### spec.podTemplate.spec.containers[].name +The `spec.podTemplate.spec.containers[].name` field used to specify the name of the container specified as a `DNS_LABEL`. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + +##### spec.podTemplate.spec.containers[].args +`spec.podTemplate.spec.containers[].args` is an optional field. This can be used to provide additional arguments to database installation. + +##### spec.podTemplate.spec.containers[].env + +`spec.podTemplate.spec.containers[].env` is an optional field that specifies the environment variables to pass to the MSSQLServer docker image. To know about supported environment variables, please visit [here](https://hub.docker.com/r/microsoft/mssql-server). + +Note that, the KubeDB operator does not allow `MSSQL_SA_USERNAME` and `MSSQL_SA_PASSWORD` environment variable to set in `spec.podTemplate.spec.env`. If you want to set the superuser _username_ and _password_, please use `spec.authSecret` instead described earlier. + +If you try to set `MSSQL_SA_USERNAME` or `MSSQL_SA_PASSWORD` environment variable in MSSQLServer CR, KubeDB operator will reject the request with following error, + +```ini +The MSSQLServer "mssqlserver" is invalid: spec.podTemplate: Invalid value: "mssqlserver": environment variable MSSQL_SA_PASSWORD is forbidden to use in MSSQLServer spec +``` + +Also, note that KubeDB does not allow to update the environment variables as updating them does not have any effect once the database is created. + +##### spec.podTemplate.spec.containers[].resources + +`spec.podTemplate.spec.containers[].resources` is an optional field. This can be used to request compute resources required by containers of the database pods. To learn more, visit [here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +#### spec.podTemplate.spec.serviceAccountName + +`serviceAccountName` is an optional field supported by KubeDB Operator that can be used to specify a custom service account to fine tune role based access control. + +If this field is left empty, the KubeDB operator will create a service account name matching MSSQLServer CR name. Role and RoleBinding that provide necessary access permissions will also be generated automatically for this service account. + +If a service account name is given, but there's no existing service account by that name, the KubeDB operator will create one, and Role and RoleBinding that provide necessary access permissions will also be generated for this service account. + +If a service account name is given, and there's an existing service account by that name, the KubeDB operator will use that existing service account. Since this service account is not managed by KubeDB, users are responsible for providing necessary access permissions manually. + + +#### spec.podTemplate.spec.nodeSelector + +`spec.podTemplate.spec.nodeSelector` is an optional field that specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). To learn more, see [here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) . + + +### spec.tls + +`spec.tls` specifies the TLS/SSL configurations for the MSSQLServer. KubeDB uses [cert-manager](https://cert-manager.io/) v1 api to provision and manage TLS certificates. + +The following fields are configurable in the `spec.tls` section: + +- `issuerRef` is a reference to the `Issuer` or `ClusterIssuer` CR of [cert-manager](https://cert-manager.io/docs/concepts/issuer/) that will be used by `KubeDB` to generate necessary certificates. + + - `apiGroup` is the group name of the resource that is being referenced. Currently, the only supported value is `cert-manager.io`. + - `kind` is the type of resource that is being referenced. KubeDB supports both `Issuer` and `ClusterIssuer` as values for this field. + - `name` is the name of the resource (`Issuer` or `ClusterIssuer`) being referenced. + + +- `clientTLS` This setting determines whether TLS (Transport Layer Security) is enabled for the MS SQL Server. + - If set to `true`, the sql server will be provisioned with `TLS`, and you will need to install the [csi-driver-cacerts](https://github.com/kubeops/csi-driver-cacerts) which will be used to add self-signed ca certificates to the OS trusted certificate store (/etc/ssl/certs/ca-certificates.crt). + - If set to `false`, TLS will not be enabled for SQL Server. However, the Issuer will still be used to configure a TLS-enabled WAL-G proxy server, which is necessary for performing SQL Server backup operations. + + +- `certificates` (optional) are a list of certificates used to configure the server and/or client certificate. It has the following fields: + - `alias` represents the identifier of the certificate. It has the following possible value: + - `server` is used for server certificate identification. + - `client` is used for client certificate identification. + - `endpoint`: For endpoint certificate identification + - `exporter` is used for metrics exporter certificate identification. + - `secretName` (optional) specifies the k8s secret name that holds the certificates. + This field is optional. If the user does not specify this field, the default secret name will be created in the following format: `--cert`. + + - `subject` (optional) specifies an `X.509` distinguished name. It has the following possible field, + - `organizations` (optional) are the list of different organization names to be used on the Certificate. + - `organizationalUnits` (optional) are the list of different organization unit name to be used on the Certificate. + - `countries` (optional) are the list of country names to be used on the Certificate. + - `localities` (optional) are the list of locality names to be used on the Certificate. + - `provinces` (optional) are the list of province names to be used on the Certificate. + - `streetAddresses` (optional) are the list of a street address to be used on the Certificate. + - `postalCodes` (optional) are the list of postal code to be used on the Certificate. + - `serialNumber` (optional) is a serial number to be used on the Certificate. + You can find more details from [Here](https://golang.org/pkg/crypto/x509/pkix/#Name) + - `duration` (optional) is the period during which the certificate is valid. + - `renewBefore` (optional) is a specifiable time before expiration duration. + - `dnsNames` (optional) is a list of subject alt names to be used in the Certificate. + - `ipAddresses` (optional) is a list of IP addresses to be used in the Certificate. + - `uris` (optional) is a list of URI Subject Alternative Names to be set in the Certificate. + - `emailAddresses` (optional) is a list of email Subject Alternative Names to be set in the Certificate. + - `privateKey` (optional) specifies options to control private keys used for the Certificate. + - `encoding` (optional) is the private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8, respectively. It defaults to PKCS#1 if not specified. + + +### spec.serviceTemplates + +KubeDB creates two different services for each MSSQLServer instance. One of them is a primary service named `` and points to the MSSQLServer `Primary` pod/node. Another one is a secondary service named `-secondary` and points to MSSQLServer `secondary` replica pods/nodes. + +These `primary` and `secondary` services can be customized using [spec.serviceTemplates](#spec.servicetemplate). + +You can provide template for the services using `spec.serviceTemplates`. This will allow you to set the type and other properties of the service. If `spec.serviceTemplates` is not provided, KubeDB will create a `primary` service of type `ClusterIP` with minimal settings. + +KubeDB allows following fields to set in `spec.serviceTemplates`: +- `alias` represents the identifier of the service. It has the following possible value: + - `primary` is used for the primary service identification. + - `secondary` is used for the secondary service identification. + - `stats` is used for the exporter service identification. +- metadata: + - labels + - annotations +- spec: + - type + - ports + - clusterIP + - externalIPs + - loadBalancerIP + - loadBalancerSourceRanges + - externalTrafficPolicy + - healthCheckNodePort + - sessionAffinityConfig + +See [here](https://github.com/kmodules/offshoot-api/blob/kubernetes-1.21.1/api/v1/types.go#L237) to understand these fields in detail. + +## spec.healthChecker +It defines the attributes for the health checker. +- `spec.healthChecker.periodSeconds` specifies how often to perform the health check. +- `spec.healthChecker.timeoutSeconds` specifies the number of seconds after which the probe times out. +- `spec.healthChecker.failureThreshold` specifies minimum consecutive failures for the healthChecker to be considered failed. +- `spec.healthChecker.disableWriteCheck` specifies whether to disable the writeCheck or not. + +Know details about KubeDB Health checking from this [blog post](https://appscode.com/blog/post/kubedb-health-checker/). + + +### spec.deletionPolicy + +`deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `MSSQLServer` crd or which resources KubeDB should keep or delete when you delete `MSSQLServer` crd. KubeDB provides following four termination policies: + +- DoNotTerminate +- Halt +- Delete (`Default`) +- WipeOut + +When `deletionPolicy` is `DoNotTerminate`, KubeDB takes advantage of `ValidationWebhook` feature in Kubernetes 1.9.0 or later clusters to implement `DoNotTerminate` feature. If admission webhook is enabled, `DoNotTerminate` prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. + +Following table show what KubeDB does when you delete MSSQLServer crd for different termination policies, + +| Behavior | DoNotTerminate | Halt | Delete | WipeOut | +|-------------------------------------| :------------: | :------: | :------: | :------: | +| 1. Block Delete operation | ✓ | ✗ | ✗ | ✗ | +| 2. Delete PetSet | ✗ | ✓ | ✓ | ✓ | +| 3. Delete Services | ✗ | ✓ | ✓ | ✓ | +| 4. Delete PVCs | ✗ | ✗ | ✓ | ✓ | +| 5. Delete Secrets | ✗ | ✗ | ✗ | ✓ | +| 6. Delete Snapshots | ✗ | ✗ | ✗ | ✓ | +| 7. Delete Snapshot data from bucket | ✗ | ✗ | ✗ | ✓ | + +If you don't specify `spec.deletionPolicy` KubeDB uses `Delete` termination policy by default. + +> For more details you can visit [here](https://appscode.com/blog/post/deletion-policy/) + +### spec.halted +Indicates that the database is halted and all offshoot Kubernetes resources except PVCs are deleted. + +### Configuring Environment Variables for SQL Server on Linux +You can use environment variables to configure SQL Server on Linux containers. +When deploying `Microsoft SQL Server` on Linux using `containers`, you need to specify the `product edition` through the [MSSQL_PID](https://mcr.microsoft.com/en-us/product/mssql/server/about#configuration:~:text=MSSQL_PID%20is%20the,documentation%20here.) environment variable. This variable determines which `SQL Server edition` will run inside the container. The acceptable values for `MSSQL_PID` are: +`Developer`: This will run the container using the Developer Edition (this is the default if no MSSQL_PID environment variable is supplied) +`Express`: This will run the container using the Express Edition +`Standard`: This will run the container using the Standard Edition +`Enterprise`: This will run the container using the Enterprise Edition +`EnterpriseCore`: This will run the container using the Enterprise Edition Core +``: This will run the container with the edition that is associated with the PID + +`ACCEPT_EULA` confirms your acceptance of the [End-User Licensing Agreement](https://go.microsoft.com/fwlink/?linkid=857698). + +For a complete list of environment variables that can be used, refer to the documentation [here](https://learn.microsoft.com/en-us/sql/linux/sql-server-linux-configure-environment-variables?view=sql-server-2017). + +Below is an example of how to configure the `MSSQL_PID` and `ACCEPT_EULA` environment variable in the KubeDB MSSQLServer Custom Resource Definition (CRD): +```bash +metadata: + name: mssqlserver + namespace: demo +spec: + podTemplate: + spec: + containers: + - name: mssql + env: + - name: ACCEPT_EULA + value: "Y" + - name: MSSQL_PID + value: Enterprise +``` +In this example, the SQL Server container will run the Enterprise Edition. + +## Next Steps + +- Learn how to use KubeDB to run a MSSQLServer database [here](/docs/guides/mssqlserver/README.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/mssqlserver/configuration/using-config-file.md b/docs/guides/mssqlserver/configuration/using-config-file.md index aae6e75a39..aad051746e 100644 --- a/docs/guides/mssqlserver/configuration/using-config-file.md +++ b/docs/guides/mssqlserver/configuration/using-config-file.md @@ -39,7 +39,7 @@ SQL Server allows configuring database via configuration file. The default confi > To learn available configuration option of SQL Server see [Configure SQL Server on Linux](https://learn.microsoft.com/en-us/sql/linux/sql-server-linux-configure-mssql-conf?view=sql-server-ver16). -At first, you have to create a config file named `mssql.conf` with your desired configuration. Then you have to create a [secret](https://kubernetes.io/docs/concepts/configuration/secret/) using this file. Then specify this secret name in `spec.configSecret.name` section while creating MSSQLServer CR. +At first, you have to create a config file named `mssql.conf` with your desired configuration. Then you have to create a [secret](https://kubernetes.io/docs/concepts/configuration/secret/) using this file. Then specify this secret name in `spec.configuration.secretName` section while creating MSSQLServer CR. KubeDB will create a secret named `{mssqlserver-name}-config` with configuration file contents as the value of the key `mssql.conf` and mount this secret into `/var/opt/mssql/` directory of the database pod. The secret named `{mssqlserver-name}-config` will contain your desired configurations with some default configurations. @@ -134,7 +134,7 @@ issuer.cert-manager.io/mssqlserver-ca-issuer created -Now, create MSSQLServer CR specifying `spec.configSecret` field. +Now, create MSSQLServer CR specifying `spec.configuration` field. ```yaml apiVersion: kubedb.com/v1alpha2 diff --git a/docs/guides/mssqlserver/configuration/using-config-file.md.bak b/docs/guides/mssqlserver/configuration/using-config-file.md.bak new file mode 100644 index 0000000000..41399ff81e --- /dev/null +++ b/docs/guides/mssqlserver/configuration/using-config-file.md.bak @@ -0,0 +1,259 @@ +--- +title: Run MSSQLServer with Custom Configuration +menu: + docs_{{ .version }}: + identifier: ms-configuration-config-file + name: Config File + parent: ms-configuration + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Using Custom Configuration File + +KubeDB supports providing custom configuration for MSSQLServer. This tutorial will show you how to use KubeDB to run SQL Server with custom configuration. + +## Before You Begin + +- You need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). Make sure install with helm command including `--set global.featureGates.MSSQLServer=true` to ensure MSSQLServer CRD installation. + +- To configure TLS/SSL in `MSSQLServer`, `KubeDB` uses `cert-manager` to issue certificates. So first you have to make sure that the cluster has `cert-manager` installed. To install `cert-manager` in your cluster following steps [here](https://cert-manager.io/docs/installation/kubernetes/). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. Run the following command to prepare your cluster for this tutorial: + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: The yaml files used in this tutorial are stored in [docs/examples/mssqlserver](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/mssqlserver) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +SQL Server allows configuring database via configuration file. The default configuration file for SQL Server deployed by `KubeDB` can be found in `/var/opt/mssql/mssql.conf`. When SQL Server starts, it will look for configuration file in `/var/opt/mssql/mssql.conf`. If configuration file exist, this configuration will overwrite the existing defaults. + +> To learn available configuration option of SQL Server see [Configure SQL Server on Linux](https://learn.microsoft.com/en-us/sql/linux/sql-server-linux-configure-mssql-conf?view=sql-server-ver16). + +At first, you have to create a config file named `mssql.conf` with your desired configuration. Then you have to create a [secret](https://kubernetes.io/docs/concepts/configuration/secret/) using this file. Then specify this secret name in `spec.configuration.secretName` section while creating MSSQLServer CR. + +KubeDB will create a secret named `{mssqlserver-name}-config` with configuration file contents as the value of the key `mssql.conf` and mount this secret into `/var/opt/mssql/` directory of the database pod. The secret named `{mssqlserver-name}-config` will contain your desired configurations with some default configurations. + +In this tutorial, we will configure sql server via a custom config file. + +## Custom Configuration + +At first, create `mssql.conf` file containing required configuration settings. + +```ini +$ cat mssql.conf +[network] +tlsprotocols = 1.2 +forceencryption = 1 + +[language] +lcid = 1036 + +[memory] +memorylimitmb = 2304 +``` + +Here we have set +- [memory limit](https://learn.microsoft.com/en-us/sql/linux/sql-server-linux-configure-mssql-conf?view=sql-server-ver16#memorylimit), The `memory.memorylimitmb` setting controls the amount of physical memory (in MB) available to SQL Server. The default is 80% of the physical memory, to prevent out-of-memory (OOM) conditions. The above configuration changes the memory available to SQL Server to 2.25 GB (2,304 MB). +- [SQL Server Locale](https://learn.microsoft.com/en-us/sql/linux/sql-server-linux-configure-mssql-conf?view=sql-server-ver16#lcid), The language.lcid setting changes the SQL Server locale to any supported language identifier (LCID). The above example changes the locale to French (1036): +- [TLS](https://learn.microsoft.com/en-us/sql/linux/sql-server-linux-configure-mssql-conf?view=sql-server-ver16#tls) The `network.forceencryption` If 1, then SQL Server forces all connections to be encrypted. By default, this option is 0. The `network.tlsprotocols` A comma-separated list of which TLS protocols are allowed by SQL Server. SQL Server always attempts to negotiate the strongest allowed protocol. If a client doesn't support any allowed protocol, SQL Server rejects the connection attempt. For compatibility, all supported protocols are allowed by default (1.2, 1.1, 1.0). If your clients support TLS 1.2, Microsoft recommends allowing only TLS 1.2. + + + +Now, create the secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo ms-custom-config --from-file=./mssql.conf +secret/ms-custom-config created +``` + +Verify the secret has the configuration file. +```bash +$ kubectl get secret -n demo ms-custom-config -oyaml +``` + +```yaml +apiVersion: v1 +data: + mssql.conf: W25ldHdvcmtdCnRsc3Byb3RvY29scyA9IDEuMgpmb3JjZWVuY3J5cHRpb24gPSAxCgpbbGFuZ3VhZ2VdCmxjaWQgPSAxMDM2CgpbbWVtb3J5XQptZW1vcnlsaW1pdG1iID0gMjMwNA== +kind: Secret +metadata: + creationTimestamp: "2024-10-16T06:12:28Z" + name: ms-custom-config + namespace: demo + resourceVersion: "451820" + uid: e7242e3a-d5dc-4705-a0f3-20b0ff0a59d3 +type: Opaque +``` + + + +Now, we need to create an Issuer/ClusterIssuer which will be used to generate the certificate used for TLS configurations. + +### Create Issuer/ClusterIssuer + +Now, we are going to create an example `Issuer` that will be used throughout the duration of this tutorial. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. By following the below steps, we are going to create our desired issuer, + +- Start off by generating our ca-certificates using openssl, +```bash +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=MSSQLServer/O=kubedb" +``` +- +- Create a secret using the certificate files we have just generated, +```bash +$ kubectl create secret tls mssqlserver-ca --cert=ca.crt --key=ca.key --namespace=demo +secret/mssqlserver-ca created +``` +Now, we are going to create an `Issuer` using the `mssqlserver-ca` secret that contains the ca-certificate we have just created. Below is the YAML of the `Issuer` CR that we are going to create, + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: mssqlserver-ca-issuer + namespace: demo +spec: + ca: + secretName: mssqlserver-ca +``` + +Let’s create the `Issuer` CR we have shown above, +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mssqlserver/standalone/mssqlserver-ca-issuer.yaml +issuer.cert-manager.io/mssqlserver-ca-issuer created +``` + + + +Now, create MSSQLServer CR specifying `spec.configSecret` field. + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: MSSQLServer +metadata: + name: mssql-custom-config + namespace: demo +spec: + version: "2022-cu12" + configuration: + secretName: ms-custom-config + replicas: 1 + tls: + issuerRef: + name: mssqlserver-ca-issuer + kind: Issuer + apiGroup: "cert-manager.io" + clientTLS: false + podTemplate: + spec: + containers: + - name: mssql + env: + - name: ACCEPT_EULA + value: "Y" + - name: MSSQL_PID + value: Evaluation # Change it + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut +``` + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mssqlserver/configuration/mssql-custom-config.yaml +mssqlserver.kubedb.com/mssql-custom-config created +``` + +Now, wait a few minutes. KubeDB operator will create necessary PVC, petset, services, secrets etc. If everything goes well, we will see that a pod with the name `mssql-custom-config-0` has been created. + +Check that the petset's pod is running + +```bash +$ kubectl get pod -n demo mssql-custom-config-0 +NAME READY STATUS RESTARTS AGE +mssql-custom-config-0 1/1 Running 0 94s +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +Now, Let's connect to the MSSQLServer from inside the pod. + +```bash +$ kubectl get secrets -n demo mssql-custom-config-auth -o jsonpath='{.data.\username}' | base64 -d +sa + +$ kubectl get secrets -n demo mssql-custom-config-auth -o jsonpath='{.data.\password}' | base64 -d +AqRe6WIuqwKXLaWc + +$ kubectl exec -it mssql-custom-config-0 -n demo -c mssql -- bash +mssql@mssql-custom-config-0:/$ cat /var/opt/mssql/mssql.conf +[language] +lcid = 1036 +[network] +tlsprotocols = 1.2 +forceencryption = 1 +[memory] +memorylimitmb = 2304 +mssql@mssql-custom-config-0:/$ /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P AqRe6WIuqwKXLaWc +1> SELECT encrypt_option FROM sys.dm_exec_connections WHERE session_id = @@SPID; +2> go +encrypt_option +---------------------------------------- +TRUE + +(1 rows affected) +1> SELECT default_language_name FROM sys.server_principals WHERE name = 'sa'; +2> go +default_language_name +----------------------------------------------------------- +Français + +(1 rows affected) +1> SELECT physical_memory_kb / 1024 AS physical_memory_mb FROM sys.dm_os_sys_info; +2> go +physical_memory_mb +-------------------- +2304 +(1 rows affected) +1> +``` + + +As we can see from the configuration of running sql server, the configuration given in the config secret has been set successfully. + +## Cleaning up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +$ kubectl patch -n demo ms/mssql-custom-config -p '{"spec":{"deletionPolicy":"WipeOut"}}' --type="merge" + +$ kubectl delete -n demo ms/mssql-custom-config +mssqlserver.kubedb.com "mssql-custom-config" deleted + +$ kubectl delete -n demo secret ms-custom-config +mssqlserver.kubedb.com "mssql-custom-config" deleted + +kubectl delete ns demo +``` + +## Next Steps + +- [Backup and Restore](/docs/guides/mssqlserver/backup/overview/index.md) MSSQLServer databases using KubeStash. +- Detail concepts of [MSSQLServer object](/docs/guides/mssqlserver/concepts/mssqlserver.md). +- Detail concepts of [MSSQLServerVersion object](/docs/guides/mssqlserver/concepts/catalog.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/mssqlserver/reconfigure/ag_cluster.md b/docs/guides/mssqlserver/reconfigure/ag_cluster.md index 7ce8d9289d..701030ee3f 100644 --- a/docs/guides/mssqlserver/reconfigure/ag_cluster.md +++ b/docs/guides/mssqlserver/reconfigure/ag_cluster.md @@ -97,7 +97,7 @@ $ kubectl create secret generic -n demo ms-custom-config --from-file=./mssql.con secret/ms-custom-config created ``` -In this section, we are going to create a MSSQLServer object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `MSSQLServer` CR that we are going to create, +In this section, we are going to create a MSSQLServer object specifying `spec.configuration` field to apply this custom configuration. Below is the YAML of the `MSSQLServer` CR that we are going to create, ```yaml apiVersion: kubedb.com/v1alpha2 diff --git a/docs/guides/mssqlserver/reconfigure/ag_cluster.md.bak b/docs/guides/mssqlserver/reconfigure/ag_cluster.md.bak new file mode 100644 index 0000000000..7ce8d9289d --- /dev/null +++ b/docs/guides/mssqlserver/reconfigure/ag_cluster.md.bak @@ -0,0 +1,570 @@ +--- +title: Reconfigure MSSQLServer Availability Group +menu: + docs_{{ .version }}: + identifier: ms-reconfigure-ag-cluster + name: Availability Group + parent: ms-reconfigure + weight: 30 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure MSSQLServer Availability Group + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a SQL Server Availability Group cluster. + +## Before You Begin + +- You need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). Make sure install with helm command including `--set global.featureGates.MSSQLServer=true` to ensure MSSQLServer CRD installation. + +- To configure TLS/SSL in `MSSQLServer`, `KubeDB` uses `cert-manager` to issue certificates. So first you have to make sure that the cluster has `cert-manager` installed. To install `cert-manager` in your cluster following steps [here](https://cert-manager.io/docs/installation/kubernetes/). + +- You should be familiar with the following `KubeDB` concepts: + - [MSSQLServer](/docs/guides/mssqlserver/concepts/mssqlserver.md) + - [Availabilty Group](/docs/guides/mssqlserver/clustering/ag_cluster.md) + - [MSSQLServerOpsRequest](/docs/guides/mssqlserver/concepts/opsrequest.md) + - [Reconfigure Overview](/docs/guides/mssqlserver/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/mssqlserver](/docs/examples/mssqlserver/reconfigure) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `MSSQLServer` Availability Group using a supported version by `KubeDB` operator. Then we are going to apply `MSSQLServerOpsRequest` to reconfigure its configuration. + +### Prepare MSSQLServer Availability Group + +Now, we are going to deploy a `MSSQLServer` Availability Group with version `2022-cu12`. + +### Deploy MSSQLServer Availability Group Cluster + +At first, we need to create an Issuer/ClusterIssuer which will be used to generate the certificate used for TLS configurations. + +### Create Issuer/ClusterIssuer + +Now, we are going to create an example `Issuer` that will be used throughout the duration of this tutorial. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. By following the below steps, we are going to create our desired issuer, + +- Start off by generating our ca-certificates using openssl, +```bash +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=MSSQLServer/O=kubedb" +``` +- +- Create a secret using the certificate files we have just generated, +```bash +$ kubectl create secret tls mssqlserver-ca --cert=ca.crt --key=ca.key --namespace=demo +secret/mssqlserver-ca created +``` +Now, we are going to create an `Issuer` using the `mssqlserver-ca` secret that contains the ca-certificate we have just created. Below is the YAML of the `Issuer` CR that we are going to create, + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: mssqlserver-ca-issuer + namespace: demo +spec: + ca: + secretName: mssqlserver-ca +``` + +Let’s create the `Issuer` CR we have shown above, +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mssqlserver/standalone/mssqlserver-ca-issuer.yaml +issuer.cert-manager.io/mssqlserver-ca-issuer created +``` +Now, we will create `mssql.conf` file containing required configuration settings. + +```ini +$ cat mssql.conf +[memory] +memorylimitmb = 2048 +``` +Here, `memorylimitmb` is set to `2048`, whereas the default value is `12280`. + +Now, we will create a secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo ms-custom-config --from-file=./mssql.conf +secret/ms-custom-config created +``` + +In this section, we are going to create a MSSQLServer object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `MSSQLServer` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: MSSQLServer +metadata: + name: mssqlserver-ag-cluster + namespace: demo +spec: + version: "2022-cu12" + configuration: + secretName: ms-custom-config + replicas: 3 + topology: + mode: AvailabilityGroup + availabilityGroup: + databases: + - agdb1 + - agdb2 + tls: + issuerRef: + name: mssqlserver-ca-issuer + kind: Issuer + apiGroup: "cert-manager.io" + clientTLS: false + podTemplate: + spec: + containers: + - name: mssql + env: + - name: ACCEPT_EULA + value: "Y" + - name: MSSQL_PID + value: Evaluation + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut +``` + +Let's create the `MSSQLServer` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mssqlserver/reconfigure/mssqlserver-ag-cluster.yaml +MSSQLServer.kubedb.com/mssqlserver-ag-cluster created +``` + +Now, wait until `mssqlserver-ag-cluster` has status `Ready`. i.e, + +```bash +$ kubectl get ms -n demo +NAME VERSION STATUS AGE +mssqlserver-ag-cluster 2022-cu12 Ready 5m47s +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +First we need to get the username and password to connect to a MSSQLServer instance, +```bash +$ kubectl get secrets -n demo mssqlserver-ag-cluster-auth -o jsonpath='{.data.\username}' | base64 -d +sa + +$ kubectl get secrets -n demo mssqlserver-ag-cluster-auth -o jsonpath='{.data.\password}' | base64 -d +gkBGX7RE0ap4yjHt +``` + +Now let's connect to the SQL Server instance and run internal command to check the configuration we have provided. + +```bash +$ kubectl exec -it -n demo mssqlserver-ag-cluster-0 -c mssql -- bash +mssql@mssqlserver-ag-cluster-0:/$ cat /var/opt/mssql/mssql.conf +[language] +lcid = 1033 +[memory] +memorylimitmb = 2048 +mssql@mssqlserver-ag-cluster-0:/$ /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P gkBGX7RE0ap4yjHt +1> SELECT physical_memory_kb / 1024 AS physical_memory_mb FROM sys.dm_os_sys_info; +2> go +physical_memory_mb +-------------------- + 2048 + +(1 rows affected) +``` + +As we can see from the configuration of running MSSQLServer, the value of `physical_memory_mb` has been set to `2048`. + +### Reconfigure using new config secret + +Now we will reconfigure this database to set `memorylimitmb` to `2560`. + +Now, we will edit the `mssql.conf` file containing required configuration settings. + +```ini +$ cat mssql.conf +[memory] +memorylimitmb = 2560 +``` + +Then, we will create a new secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo new-custom-config --from-file=./mssql.conf +secret/new-custom-config created +``` + +#### Create MSSQLServerOpsRequest + +Now, we will use this secret to replace the previous secret using a `MSSQLServerOpsRequest` CR. The `MSSQLServerOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MSSQLServerOpsRequest +metadata: + name: msops-reconfigure-ag + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: mssqlserver-ag-cluster + configuration: +- configSecret: +- name: new-custom-config ++ secretName: new-custom-config + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `mssqlserver-ag-cluster` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.secretName` specifies the name of the new secret. +- Have a look [here](/docs/guides/mssqlserver/concepts/opsrequest.md#spectimeout) on the respective sections to understand the `timeout` & `apply` fields. + +Let's create the `MSSQLServerOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mssqlserver/reconfigure/msops-reconfigure-ag.yaml +MSSQLServeropsrequest.ops.kubedb.com/msops-reconfigure-ag created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `MSSQLServer` object. + +Let's wait for `MSSQLServerOpsRequest` to be `Successful`. Run the following command to watch `MSSQLServerOpsRequest` CR, + +```bash +$ watch kubectl get MSSQLServeropsrequest -n demo +NAME TYPE STATUS AGE +msops-reconfigure-ag Reconfigure Successful 4m1s +``` + +We can see from the above output that the `MSSQLServerOpsRequest` has succeeded. If we describe the `MSSQLServerOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe MSSQLServeropsrequest -n demo msops-reconfigure-ag +Name: msops-reconfigure-ag +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: MSSQLServerOpsRequest +Metadata: + Creation Timestamp: 2024-11-11T13:07:49Z + Generation: 1 + Resource Version: 272883 + UID: 2bbc64b6-9d88-4adc-854e-de444c716f57 +Spec: + Apply: IfReady + Configuration: + Config Secret: + Name: new-custom-config + Database Ref: + Name: mssqlserver-ag-cluster + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-11-11T13:07:49Z + Message: MSSQLServer ops-request has started to reconfigure MSSQLServer nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-11-11T13:07:58Z + Message: successfully reconciled the mssqlserver with new configuration + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-11T13:08:03Z + Message: get pod; ConditionStatus:True; PodName:mssqlserver-ag-cluster-0 + Observed Generation: 1 + Status: True + Type: GetPod--mssqlserver-ag-cluster-0 + Last Transition Time: 2024-11-11T13:08:03Z + Message: evict pod; ConditionStatus:True; PodName:mssqlserver-ag-cluster-0 + Observed Generation: 1 + Status: True + Type: EvictPod--mssqlserver-ag-cluster-0 + Last Transition Time: 2024-11-11T13:08:38Z + Message: check pod running; ConditionStatus:True; PodName:mssqlserver-ag-cluster-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--mssqlserver-ag-cluster-0 + Last Transition Time: 2024-11-11T13:08:43Z + Message: get pod; ConditionStatus:True; PodName:mssqlserver-ag-cluster-1 + Observed Generation: 1 + Status: True + Type: GetPod--mssqlserver-ag-cluster-1 + Last Transition Time: 2024-11-11T13:08:43Z + Message: evict pod; ConditionStatus:True; PodName:mssqlserver-ag-cluster-1 + Observed Generation: 1 + Status: True + Type: EvictPod--mssqlserver-ag-cluster-1 + Last Transition Time: 2024-11-11T13:09:18Z + Message: check pod running; ConditionStatus:True; PodName:mssqlserver-ag-cluster-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--mssqlserver-ag-cluster-1 + Last Transition Time: 2024-11-11T13:09:23Z + Message: get pod; ConditionStatus:True; PodName:mssqlserver-ag-cluster-2 + Observed Generation: 1 + Status: True + Type: GetPod--mssqlserver-ag-cluster-2 + Last Transition Time: 2024-11-11T13:09:23Z + Message: evict pod; ConditionStatus:True; PodName:mssqlserver-ag-cluster-2 + Observed Generation: 1 + Status: True + Type: EvictPod--mssqlserver-ag-cluster-2 + Last Transition Time: 2024-11-11T13:09:58Z + Message: check pod running; ConditionStatus:True; PodName:mssqlserver-ag-cluster-2 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--mssqlserver-ag-cluster-2 + Last Transition Time: 2024-11-11T13:10:03Z + Message: Successfully Restarted Pods after reconfiguration + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-11-11T13:10:03Z + Message: Successfully completed reconfiguring for MSSQLServer + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +``` + +Now let's connect to SQL Server instance and run internal command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo mssqlserver-ag-cluster-0 -c mssql -- bash +mssql@mssqlserver-ag-cluster-0:/$ cat /var/opt/mssql/mssql.conf +[language] +lcid = 1033 +[memory] +memorylimitmb = 2560 +mssql@mssqlserver-ag-cluster-0:/$ /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P gkBGX7RE0ap4yjHt +1> SELECT physical_memory_kb / 1024 AS physical_memory_mb FROM sys.dm_os_sys_info; +2> go +physical_memory_mb +-------------------- + 2560 + +(1 rows affected) +``` + +As we can see from the configuration of running SQL Server, the value of `physical_memory_mb` has been changed from `2048` to `2560`. So the reconfiguration of the database is successful. + +### Reconfigure using apply config + +Now we will reconfigure this database again to set `memorylimitmb` to `3072`. This time we won't use a new secret. We will use the `applyConfig` field of the `MSSQLServerOpsRequest`. This will merge the new config in the existing secret. + +#### Create MSSQLServerOpsRequest + +Now, we will use the new configuration in the `applyConfig` field in the `MSSQLServerOpsRequest` CR. The `MSSQLServerOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MSSQLServerOpsRequest +metadata: + name: msops-reconfigure-ag-apply + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: mssqlserver-ag-cluster + configuration: + applyConfig: + mssql.conf: |- + [memory] + memorylimitmb = 3072 + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `mssqlserver-ag-cluster` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `MSSQLServerOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mssqlserver/reconfigure/msops-reconfigure-ag-apply.yaml +MSSQLServeropsrequest.ops.kubedb.com/msops-reconfigure-ag-apply created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `MSSQLServerOpsRequest` to be `Successful`. Run the following command to watch `MSSQLServerOpsRequest` CR, + +---- + + +```bash +$ watch kubectl get MSSQLServeropsrequest -n demo +msops-reconfigure-ag-apply Reconfigure Successful 3m34s +``` + +We can see from the above output that the `MSSQLServerOpsRequest` has succeeded. If we describe the `MSSQLServerOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe MSSQLServeropsrequest -n demo msops-reconfigure-ag-apply +Name: msops-reconfigure-ag-apply +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: MSSQLServerOpsRequest +Metadata: + Creation Timestamp: 2024-11-11T13:16:11Z + Generation: 1 + Resource Version: 273846 + UID: 434d35ef-89e5-4d1a-aac2-22941346d77e +Spec: + Apply: IfReady + Configuration: + Apply Config: + mssql.conf: [memory] +memorylimitmb = 3072 + Database Ref: + Name: mssqlserver-ag-cluster + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-11-11T13:16:11Z + Message: MSSQLServer ops-request has started to reconfigure MSSQLServer nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-11-11T13:16:14Z + Message: Successfully prepared user provided custom config secret + Observed Generation: 1 + Reason: PrepareCustomConfig + Status: True + Type: PrepareCustomConfig + Last Transition Time: 2024-11-11T13:16:19Z + Message: successfully reconciled the mssqlserver with new configuration + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-11T13:16:24Z + Message: get pod; ConditionStatus:True; PodName:mssqlserver-ag-cluster-0 + Observed Generation: 1 + Status: True + Type: GetPod--mssqlserver-ag-cluster-0 + Last Transition Time: 2024-11-11T13:16:24Z + Message: evict pod; ConditionStatus:True; PodName:mssqlserver-ag-cluster-0 + Observed Generation: 1 + Status: True + Type: EvictPod--mssqlserver-ag-cluster-0 + Last Transition Time: 2024-11-11T13:16:59Z + Message: check pod running; ConditionStatus:True; PodName:mssqlserver-ag-cluster-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--mssqlserver-ag-cluster-0 + Last Transition Time: 2024-11-11T13:17:04Z + Message: get pod; ConditionStatus:True; PodName:mssqlserver-ag-cluster-1 + Observed Generation: 1 + Status: True + Type: GetPod--mssqlserver-ag-cluster-1 + Last Transition Time: 2024-11-11T13:17:04Z + Message: evict pod; ConditionStatus:True; PodName:mssqlserver-ag-cluster-1 + Observed Generation: 1 + Status: True + Type: EvictPod--mssqlserver-ag-cluster-1 + Last Transition Time: 2024-11-11T13:17:39Z + Message: check pod running; ConditionStatus:True; PodName:mssqlserver-ag-cluster-1 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--mssqlserver-ag-cluster-1 + Last Transition Time: 2024-11-11T13:17:44Z + Message: get pod; ConditionStatus:True; PodName:mssqlserver-ag-cluster-2 + Observed Generation: 1 + Status: True + Type: GetPod--mssqlserver-ag-cluster-2 + Last Transition Time: 2024-11-11T13:17:44Z + Message: evict pod; ConditionStatus:True; PodName:mssqlserver-ag-cluster-2 + Observed Generation: 1 + Status: True + Type: EvictPod--mssqlserver-ag-cluster-2 + Last Transition Time: 2024-11-11T13:18:19Z + Message: check pod running; ConditionStatus:True; PodName:mssqlserver-ag-cluster-2 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--mssqlserver-ag-cluster-2 + Last Transition Time: 2024-11-11T13:18:24Z + Message: Successfully Restarted Pods after reconfiguration + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-11-11T13:18:24Z + Message: Successfully completed reconfiguring for MSSQLServer + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +``` + +Now let's connect to the SQL Server instance and run a internal command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo mssqlserver-ag-cluster-0 -c mssql -- bash +mssql@mssqlserver-ag-cluster-0:/$ cat /var/opt/mssql/mssql.conf +[language] +lcid = 1033 +[memory] +memorylimitmb = 3072 +mssql@mssqlserver-ag-cluster-0:/$ /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P gkBGX7RE0ap4yjHt +1> SELECT physical_memory_kb / 1024 AS physical_memory_mb FROM sys.dm_os_sys_info; +2> go +physical_memory_mb +-------------------- + 3072 + +(1 rows affected) +``` + +As we can see from the configuration of running SQL Server, the value of `physical_memory_mb` has been changed from `2560` to `3072`. So the reconfiguration of the database using the `applyConfig` field is successful. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete ms -n demo mssqlserver-ag-cluster +kubectl delete msops -n demo msops-reconfigure-ag msops-reconfigure-ag-apply +kubectl delete issuer -n demo mssqlserver-ca-issuer +kubectl delete secret -n demo mssqlserver-ca +kubectl delete ns demo +``` \ No newline at end of file diff --git a/docs/guides/mssqlserver/reconfigure/standalone.md b/docs/guides/mssqlserver/reconfigure/standalone.md index 9fe1745e4b..766b55bf4e 100644 --- a/docs/guides/mssqlserver/reconfigure/standalone.md +++ b/docs/guides/mssqlserver/reconfigure/standalone.md @@ -97,7 +97,7 @@ $ kubectl create secret generic -n demo ms-custom-config --from-file=./mssql.con secret/ms-custom-config created ``` -In this section, we are going to create a MSSQLServer object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `MSSQLServer` CR that we are going to create, +In this section, we are going to create a MSSQLServer object specifying `spec.configuration` field to apply this custom configuration. Below is the YAML of the `MSSQLServer` CR that we are going to create, ```yaml apiVersion: kubedb.com/v1alpha2 @@ -107,8 +107,8 @@ metadata: namespace: demo spec: version: "2022-cu12" - configSecret: - name: ms-custom-config + configuration: + secretName: ms-custom-config replicas: 1 tls: issuerRef: diff --git a/docs/guides/mssqlserver/reconfigure/standalone.md.bak b/docs/guides/mssqlserver/reconfigure/standalone.md.bak new file mode 100644 index 0000000000..02bfb633df --- /dev/null +++ b/docs/guides/mssqlserver/reconfigure/standalone.md.bak @@ -0,0 +1,533 @@ +--- +title: Reconfigure Standalone MSSQLServer Database +menu: + docs_{{ .version }}: + identifier: ms-reconfigure-standalone + name: Standalone + parent: ms-reconfigure + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure MSSQLServer Standalone Database + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a MSSQLServer standalone database. + +## Before You Begin + +- You need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). Make sure install with helm command including `--set global.featureGates.MSSQLServer=true` to ensure MSSQLServer CRD installation. + +- To configure TLS/SSL in `MSSQLServer`, `KubeDB` uses `cert-manager` to issue certificates. So first you have to make sure that the cluster has `cert-manager` installed. To install `cert-manager` in your cluster following steps [here](https://cert-manager.io/docs/installation/kubernetes/). + +- You should be familiar with the following `KubeDB` concepts: + - [MSSQLServer](/docs/guides/mssqlserver/concepts/mssqlserver.md) + - [MSSQLServerOpsRequest](/docs/guides/mssqlserver/concepts/opsrequest.md) + - [Reconfigure Overview](/docs/guides/mssqlserver/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/mssqlserver](/docs/examples/mssqlserver/reconfigure) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +Now, we are going to deploy a `MSSQLServer` standalone using a supported version by `KubeDB` operator. Then we are going to apply `MSSQLServerOpsRequest` to reconfigure its configuration. + +### Prepare MSSQLServer Standalone Database + +Now, we are going to deploy a `MSSQLServer` standalone database with version `2022-cu12`. + +### Deploy MSSQLServer standalone + +At first, we need to create an Issuer/ClusterIssuer which will be used to generate the certificate used for TLS configurations. + +### Create Issuer/ClusterIssuer + +Now, we are going to create an example `Issuer` that will be used throughout the duration of this tutorial. Alternatively, you can follow this [cert-manager tutorial](https://cert-manager.io/docs/configuration/ca/) to create your own `Issuer`. By following the below steps, we are going to create our desired issuer, + +- Start off by generating our ca-certificates using openssl, +```bash +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./ca.key -out ./ca.crt -subj "/CN=MSSQLServer/O=kubedb" +``` +- +- Create a secret using the certificate files we have just generated, +```bash +$ kubectl create secret tls mssqlserver-ca --cert=ca.crt --key=ca.key --namespace=demo +secret/mssqlserver-ca created +``` +Now, we are going to create an `Issuer` using the `mssqlserver-ca` secret that contains the ca-certificate we have just created. Below is the YAML of the `Issuer` CR that we are going to create, + +```yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: mssqlserver-ca-issuer + namespace: demo +spec: + ca: + secretName: mssqlserver-ca +``` + +Let’s create the `Issuer` CR we have shown above, +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mssqlserver/standalone/mssqlserver-ca-issuer.yaml +issuer.cert-manager.io/mssqlserver-ca-issuer created +``` + +Now, we will create `mssql.conf` file containing required configuration settings. + +```ini +$ cat mssql.conf +[memory] +memorylimitmb = 2048 +``` +Here, `memorylimitmb` is set to `2048`, whereas the default value is `12280`. + +Now, we will create a secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo ms-custom-config --from-file=./mssql.conf +secret/ms-custom-config created +``` + +In this section, we are going to create a MSSQLServer object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `MSSQLServer` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1alpha2 +kind: MSSQLServer +metadata: + name: ms-standalone + namespace: demo +spec: + version: "2022-cu12" + configuration: + secretName: ms-custom-config + replicas: 1 + tls: + issuerRef: + name: mssqlserver-ca-issuer + kind: Issuer + apiGroup: "cert-manager.io" + clientTLS: false + podTemplate: + spec: + containers: + - name: mssql + env: + - name: ACCEPT_EULA + value: "Y" + - name: MSSQL_PID + value: Evaluation + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut +``` + +Let's create the `MSSQLServer` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mssqlserver/reconfigure/ms-standalone.yaml +MSSQLServer.kubedb.com/ms-standalone created +``` + +Now, wait until `ms-standalone` has status `Ready`. i.e, + +```bash +$ kubectl get ms -n demo +NAME VERSION STATUS AGE +ms-standalone 4.4.26 Ready 23s +``` + +Now, we will check if the database has started with the custom configuration we have provided. + +First we need to get the username and password to connect to a MSSQLServer instance, +```bash +$ kubectl get secrets -n demo ms-standalone-auth -o jsonpath='{.data.\username}' | base64 -d +sa + +$ kubectl get secrets -n demo ms-standalone-auth -o jsonpath='{.data.\password}' | base64 -d +SERtEyH1RMMEsvE0 +``` + +Now let's connect to the SQL Server instance and run internal command to check the configuration we have provided. + +```bash +$ kubectl exec -it -n demo ms-standalone-0 -c mssql -- bash +mssql@ms-standalone-0:/$ cat /var/opt/mssql/mssql.conf +[language] +lcid = 1033 +[memory] +memorylimitmb = 2048 +mssql@ms-standalone-0:/$ /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P SERtEyH1RMMEsvE0 +1> SELECT physical_memory_kb / 1024 AS physical_memory_mb FROM sys.dm_os_sys_info; +2> go +physical_memory_mb +-------------------- + 2048 + +(1 rows affected) +1> +``` + +As we can see from the configuration of running MSSQLServer, the value of `physical_memory_mb` has been set to `2048`. + +### Reconfigure using new secret + +Now we will reconfigure this database to set `memorylimitmb` to `2560`. + +Now, we will edit the `mssql.conf` file containing required configuration settings. + +```ini +$ cat mssql.conf +[memory] +memorylimitmb = 2560 +``` + +Then, we will create a new secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo new-custom-config --from-file=./mssql.conf +secret/new-custom-config created +``` + +#### Create MSSQLServerOpsRequest + +Now, we will use this secret to replace the previous secret using a `MSSQLServerOpsRequest` CR. The `MSSQLServerOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MSSQLServerOpsRequest +metadata: + name: msops-reconfigure-standalone + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: ms-standalone + configuration: + secretName: new-custom-config + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `ms-standalone` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.secretName` specifies the name of the new secret. +- Have a look [here](/docs/guides/mssqlserver/concepts/opsrequest.md#spectimeout) on the respective sections to understand the `timeout` & `apply` fields. + +Let's create the `MSSQLServerOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mssqlserver/reconfigure/mops-reconfigure-standalone.yaml +MSSQLServeropsrequest.ops.kubedb.com/mops-reconfigure-standalone created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `MSSQLServer` object. + +Let's wait for `MSSQLServerOpsRequest` to be `Successful`. Run the following command to watch `MSSQLServerOpsRequest` CR, + +```bash +$ watch kubectl get MSSQLServeropsrequest -n demo +Every 2.0s: kubectl get MSSQLServeropsrequest -n demo + +NAME TYPE STATUS AGE +msops-reconfigure-standalone Reconfigure Successful 2m42s +``` + +We can see from the above output that the `MSSQLServerOpsRequest` has succeeded. If we describe the `MSSQLServerOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe MSSQLServeropsrequest -n demo mops-reconfigure-standalone +Name: msops-reconfigure-standalone +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: MSSQLServerOpsRequest +Metadata: + Creation Timestamp: 2024-11-07T12:46:35Z + Generation: 1 + Resource Version: 160710 + UID: a3859b47-575c-40a5-84d4-38fb9f37a8ef +Spec: + Apply: IfReady + Configuration: + Secret Name: new-custom-config + Database Ref: + Name: ms-standalone + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-11-07T12:46:35Z + Message: MSSQLServer ops-request has started to reconfigure MSSQLServer nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-11-07T12:46:49Z + Message: successfully reconciled the mssqlserver with new configuration + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-07T12:46:54Z + Message: get pod; ConditionStatus:True; PodName:ms-standalone-0 + Observed Generation: 1 + Status: True + Type: GetPod--ms-standalone-0 + Last Transition Time: 2024-11-07T12:46:54Z + Message: evict pod; ConditionStatus:True; PodName:ms-standalone-0 + Observed Generation: 1 + Status: True + Type: EvictPod--ms-standalone-0 + Last Transition Time: 2024-11-07T12:47:34Z + Message: check pod running; ConditionStatus:True; PodName:ms-standalone-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--ms-standalone-0 + Last Transition Time: 2024-11-07T12:47:39Z + Message: Successfully Restarted Pods after reconfiguration + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-11-07T12:47:39Z + Message: Successfully completed reconfiguring for MSSQLServer + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 3m45s KubeDB Ops-manager Operator Start processing for MSSQLServerOpsRequest: demo/msops-reconfigure-standalone + Normal Starting 3m45s KubeDB Ops-manager Operator Pausing MSSQLServer database: demo/ms-standalone + Normal Successful 3m45s KubeDB Ops-manager Operator Successfully paused MSSQLServer database: demo/ms-standalone for MSSQLServerOpsRequest: msops-reconfigure-standalone + Normal UpdatePetSets 3m31s KubeDB Ops-manager Operator successfully reconciled the mssqlserver with new configuration + Warning get pod; ConditionStatus:True; PodName:ms-standalone-0 3m26s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:ms-standalone-0 + Warning evict pod; ConditionStatus:True; PodName:ms-standalone-0 3m26s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:ms-standalone-0 + Warning check pod running; ConditionStatus:False; PodName:ms-standalone-0 3m21s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:ms-standalone-0 + Warning check pod running; ConditionStatus:True; PodName:ms-standalone-0 2m46s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:ms-standalone-0 + Normal RestartPods 2m41s KubeDB Ops-manager Operator Successfully Restarted Pods after reconfiguration + Normal Starting 2m41s KubeDB Ops-manager Operator Resuming MSSQLServer database: demo/ms-standalone + Normal Successful 2m41s KubeDB Ops-manager Operator Successfully resumed MSSQLServer database: demo/ms-standalone for MSSQLServerOpsRequest: msops-reconfigure-standalone +``` + +Now let's connect to SQL Server instance and run a internal command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo ms-standalone-0 -c mssql -- bash +mssql@ms-standalone-0:/$ cat /var/opt/mssql/mssql.conf +[language] +lcid = 1033 +[memory] +memorylimitmb = 2560 +mssql@ms-standalone-0:/$ /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P SERtEyH1RMMEsvE0 +1> SELECT physical_memory_kb / 1024 AS physical_memory_mb FROM sys.dm_os_sys_info; +2> go +physical_memory_mb +-------------------- + 2560 + +(1 rows affected) +1> +``` + +As we can see from the configuration of running SQL Server, the value of `physical_memory_mb` has been changed from `2048` to `2560`. So the reconfiguration of the database is successful. + + +### Reconfigure using apply config + +Now we will reconfigure this database again to set `memorylimitmb` to `3072`. This time we won't use a new secret. We will use the `applyConfig` field of the `MSSQLServerOpsRequest`. This will merge the new config in the existing secret. + +#### Create MSSQLServerOpsRequest + +Now, we will use the new configuration in the `applyConfig` field in the `MSSQLServerOpsRequest` CR. The `MSSQLServerOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: MSSQLServerOpsRequest +metadata: + name: msops-reconfigure-standalone-apply + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: ms-standalone + configuration: + applyConfig: + mssql.conf: |- + [memory] + memorylimitmb = 3072 + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `ms-standalone` database. +- `spec.type` specifies that we are performing `Reconfigure` on our database. +- `spec.configuration.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `MSSQLServerOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/mssqlserver/reconfigure/msops-reconfigure-standalone-apply.yaml +MSSQLServeropsrequest.ops.kubedb.com/msops-reconfigure-standalone-apply created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `MSSQLServerOpsRequest` to be `Successful`. Run the following command to watch `MSSQLServerOpsRequest` CR, + +```bash +$ watch kubectl get MSSQLServeropsrequest -n demo +Every 2.0s: kubectl get MSSQLServeropsrequest -n demo + +NAME TYPE STATUS AGE +msops-reconfigure-standalone-apply Reconfigure Successful 2m2s +``` + +We can see from the above output that the `MSSQLServerOpsRequest` has succeeded. If we describe the `MSSQLServerOpsRequest` we will get an overview of the steps that were followed to reconfigure the database. + +```bash +$ kubectl describe MSSQLServeropsrequest -n demo msops-reconfigure-standalone-apply +Name: msops-reconfigure-standalone-apply +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: MSSQLServerOpsRequest +Metadata: + Creation Timestamp: 2024-11-07T12:57:43Z + Generation: 1 + Resource Version: 161738 + UID: e9da170c-5d6b-41fd-ae05-b39c6c5f3029 +Spec: + Apply: IfReady + Configuration: + Apply Config: + mssql.conf: [memory] +memorylimitmb = 3072 + Database Ref: + Name: ms-standalone + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-11-07T12:57:43Z + Message: MSSQLServer ops-request has started to reconfigure MSSQLServer nodes + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-11-07T12:57:46Z + Message: Successfully prepared user provided custom config secret + Observed Generation: 1 + Reason: PrepareCustomConfig + Status: True + Type: PrepareCustomConfig + Last Transition Time: 2024-11-07T12:57:51Z + Message: successfully reconciled the mssqlserver with new configuration + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-07T12:57:56Z + Message: get pod; ConditionStatus:True; PodName:ms-standalone-0 + Observed Generation: 1 + Status: True + Type: GetPod--ms-standalone-0 + Last Transition Time: 2024-11-07T12:57:56Z + Message: evict pod; ConditionStatus:True; PodName:ms-standalone-0 + Observed Generation: 1 + Status: True + Type: EvictPod--ms-standalone-0 + Last Transition Time: 2024-11-07T12:58:31Z + Message: check pod running; ConditionStatus:True; PodName:ms-standalone-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--ms-standalone-0 + Last Transition Time: 2024-11-07T12:58:36Z + Message: Successfully Restarted Pods after reconfiguration + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-11-07T12:58:36Z + Message: Successfully completed reconfiguring for MSSQLServer + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m40s KubeDB Ops-manager Operator Start processing for MSSQLServerOpsRequest: demo/msops-reconfigure-standalone-apply + Normal Starting 2m40s KubeDB Ops-manager Operator Pausing MSSQLServer database: demo/ms-standalone + Normal Successful 2m40s KubeDB Ops-manager Operator Successfully paused MSSQLServer database: demo/ms-standalone for MSSQLServerOpsRequest: msops-reconfigure-standalone-apply + Normal UpdatePetSets 2m32s KubeDB Ops-manager Operator successfully reconciled the mssqlserver with new configuration + Warning get pod; ConditionStatus:True; PodName:ms-standalone-0 2m27s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:ms-standalone-0 + Warning evict pod; ConditionStatus:True; PodName:ms-standalone-0 2m27s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:ms-standalone-0 + Warning check pod running; ConditionStatus:False; PodName:ms-standalone-0 2m22s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:ms-standalone-0 + Warning check pod running; ConditionStatus:True; PodName:ms-standalone-0 112s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:ms-standalone-0 + Normal RestartPods 107s KubeDB Ops-manager Operator Successfully Restarted Pods after reconfiguration + Normal Starting 107s KubeDB Ops-manager Operator Resuming MSSQLServer database: demo/ms-standalone + Normal Successful 107s KubeDB Ops-manager Operator Successfully resumed MSSQLServer database: demo/ms-standalone for MSSQLServerOpsRequest: msops-reconfigure-standalone-apply +``` + +Now let's connect to the SQL Server instance and run a internal command to check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo ms-standalone-0 -c mssql -- bash +mssql@ms-standalone-0:/$ cat /var/opt/mssql/mssql.conf +[language] +lcid = 1033 +[memory] +memorylimitmb = 3072 +mssql@ms-standalone-0:/$ /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P SERtEyH1RMMEsvE0 +1> SELECT physical_memory_kb / 1024 AS physical_memory_mb FROM sys.dm_os_sys_info; +2> go +physical_memory_mb +-------------------- + 3072 + +(1 rows affected) +1> +``` + +As we can see from the configuration of running SQL Server, the value of `physical_memory_mb` has been changed from `2560` to `3072`. So the reconfiguration of the database using the `applyConfig` field is successful. + + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete ms -n demo ms-standalone +kubectl delete MSSQLServeropsrequest -n demo mops-reconfigure-standalone msops-reconfigure-standalone-apply +``` \ No newline at end of file diff --git a/docs/guides/mysql/concepts/database/index.md b/docs/guides/mysql/concepts/database/index.md index 28af5d2070..bf763696f1 100644 --- a/docs/guides/mysql/concepts/database/index.md +++ b/docs/guides/mysql/concepts/database/index.md @@ -69,8 +69,8 @@ spec: - localhost ipAddresses: - "127.0.0.1" - configSecret: - name: my-custom-config + configuration: + secretName: my-custom-config podTemplate: metadata: annotations: @@ -274,9 +274,9 @@ The following fields are configurable in the `spec.tls` section: - `uriSANs` (optional) is a list of URI Subject Alternative Names to be set in the Certificate. - `emailSANs` (optional) is a list of email Subject Alternative Names to be set in the Certificate. -### spec.configSecret +### spec.configuration -`spec.configSecret` is an optional field that allows users to provide custom configuration for MySQL. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). So you can use any Kubernetes supported volume source such as `configMap`, `secret`, `azureDisk` etc. To learn more about how to use a custom configuration file see [here](/docs/guides/mysql/configuration/config-file/index.md). +`spec.configuration` is an optional field that allows users to provide custom configuration for MySQL. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). So you can use any Kubernetes supported volume source such as `configMap`, `secret`, `azureDisk` etc. To learn more about how to use a custom configuration file see [here](/docs/guides/mysql/configuration/config-file/index.md). ### spec.podTemplate diff --git a/docs/guides/mysql/concepts/database/index.md.bak b/docs/guides/mysql/concepts/database/index.md.bak new file mode 100644 index 0000000000..569ed72cba --- /dev/null +++ b/docs/guides/mysql/concepts/database/index.md.bak @@ -0,0 +1,446 @@ +--- +title: MySQL CRD +menu: + docs_{{ .version }}: + identifier: guides-mysql-concepts-database + name: MySQL + parent: guides-mysql-concepts + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# MySQL + +## What is MySQL + +`MySQL` is a Kubernetes `Custom Resource Definitions` (CRD). It provides declarative configuration for [MySQL](https://www.mysql.com/) in a Kubernetes native way. You only need to describe the desired database configuration in a MySQL object, and the KubeDB operator will create Kubernetes objects in the desired state for you. + +## MySQL Spec + +As with all other Kubernetes objects, a MySQL needs `apiVersion`, `kind`, and `metadata` fields. It also needs a `.spec` section. Below is an example MySQL object. + +```yaml +apiVersion: kubedb.com/v1 +kind: MySQL +metadata: + name: m1 + namespace: demo +spec: + version: "9.1.0" + topology: + mode: GroupReplication + authSecret: + kind: Secret + name: m1-auth + storageType: "Durable" + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + init: + script: + configMap: + name: my-init-script + monitor: + agent: prometheus.io/operator + prometheus: + serviceMonitor: + labels: + app: kubedb + interval: 10s + requireSSL: true + tls: + issuerRef: + apiGroup: cert-manager.io + kind: Issuer + name: mysql-issuer + certificates: + - alias: server + subject: + organizations: + - kubedb:server + dnsNames: + - localhost + ipAddresses: + - "127.0.0.1" + configuration: + secretName: my-custom-config + podTemplate: + metadata: + annotations: + passMe: ToDatabasePod + controller: + annotations: + passMe: ToPetSet + spec: + serviceAccountName: my-service-account + schedulerName: my-scheduler + nodeSelector: + disktype: ssd + imagePullSecrets: + - name: myregistrykey + containers: + - name: mysql + args: + - --character-set-server=utf8mb4 + env: + - name: MYSQL_DATABASE + value: myDB + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + serviceTemplates: + - alias: primary + metadata: + annotations: + passMe: ToService + spec: + type: NodePort + ports: + - name: http + port: 9200 + deletionPolicy: Halt +``` + +### spec.version + +`spec.version` is a required field specifying the name of the [MySQLVersion](/docs/guides/mysql/concepts/catalog/index.md) crd where the docker images are specified. Currently, when you install KubeDB, it creates the following `MySQLVersion` resources, + +- `9.0.1`, `9.1.0` +- `8.0.35`, `8.0.36`, `8.1.0`, `8.2.0`, `8.4.2`, `8.4.3` +- `8.0.31-innodb` +- `5.7.44`, `5.7.42-debian` + +### Handling `mbind: Operation not permitted` +On certain platforms (e.g., when using specific security profiles), for some versions of `mysql`, you may see log messages like: +`mbind: Operation not permitted` + +This indicates that the `mysql` container needs the `SYS_NICE` kernel capability to perform CPU‐affinity or real-time scheduling operations. You can grant this capability by extending your Pod spec as follows: +```yaml +spec: + podTemplate: + spec: + containers: + - name: mysql + securityContext: + capabilities: + add: ["SYS_NICE"] + drop: ["ALL"] +``` +This ensures that only the SYS_NICE capability is added—while all others are dropped—keeping your container’s security posture minimal. + +### spec.topology + +`spec.topology` is an optional field that provides a way to configure HA, fault-tolerant MySQL cluster. This field enables you to specify the clustering mode. Currently, we support only MySQL Group Replication. KubeDB uses `PodDisruptionBudget` to ensure that majority of the group replicas are available during [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions) so that quorum is maintained and no data loss has occurred. + +You can specify the following fields in `spec.topology` field, + +- `mode` specifies the clustering mode for MySQL. For now, the supported value is `"GroupReplication"` for MySQL Group Replication. This field is required if you want to deploy MySQL cluster. + +- `group` is an optional field to configure a group replication. It contains the following fields: + - `name` is an optional field to specify the name for the group. It must be a version 4 UUID if specified. + +### spec.authSecret + +`spec.authSecret` is an optional field that points to a Secret used to hold credentials for `mysql` root user. If not set, the KubeDB operator creates a new Secret `{mysql-object-name}-auth` for storing the password for `mysql` root user for each MySQL object. If you want to use an existing secret please specify that when creating the MySQL object using `spec.authSecret.name`. + +This secret contains a `user` key and a `password` key which contains the `username` and `password` respectively for `mysql` root user. Here, the value of `user` key is fixed to be `root`. + +Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). + +Example: + +```bash +$ kubectl create secret generic m1-auth -n demo \ +--from-literal=user=root \ +--from-literal=password=6q8u_2jMOW-OOZXk +secret "m1-auth" created +``` + +```yaml +apiVersion: v1 +data: + password: NnE4dV8yak1PVy1PT1pYaw== + user: cm9vdA== +kind: Secret +metadata: + name: m1-auth + namespace: demo +type: Opaque +``` + +### spec.storageType + +`spec.storageType` is an optional field that specifies the type of storage to use for the database. It can be either `Durable` or `Ephemeral`. The default value of this field is `Durable`. If `Ephemeral` is used then KubeDB will create MySQL database using [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) volume. In this case, you don't have to specify `spec.storage` field. + +### spec.storage + +Since 0.9.0-rc.0, If you set `spec.storageType:` to `Durable`, then `spec.storage` is a required field that specifies the StorageClass of PVCs dynamically allocated to store data for the database. This storage spec will be passed to the PetSet created by KubeDB operator to run database pods. You can specify any StorageClass available in your cluster with appropriate resource requests. + +- `spec.storage.storageClassName` is the name of the StorageClass used to provision PVCs. PVCs don’t necessarily have to request a class. A PVC with its storageClassName set equal to "" is always interpreted to be requesting a PV with no class, so it can only be bound to PVs with no class (no annotation or one set equal to ""). A PVC with no storageClassName is not quite the same and is treated differently by the cluster depending on whether the DefaultStorageClass admission plugin is turned on. +- `spec.storage.accessModes` uses the same conventions as Kubernetes PVCs when requesting storage with specific access modes. +- `spec.storage.resources` can be used to request specific quantities of storage. This follows the same resource model used by PVCs. + +To learn how to configure `spec.storage`, please visit the links below: + +- https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims + +### spec.init + +`spec.init` is an optional section that can be used to initialize a newly created MySQL database. MySQL databases can be initialized in one of two ways: + +- Initialize from Script + +#### Initialize via Script + +To initialize a MySQL database using a script (shell script, sql script, etc.), set the `spec.init.script` section when creating a MySQL object. It will execute files alphabetically with extensions `.sh` , `.sql` and `.sql.gz` that is found in the repository. The scripts inside child folders will be skipped. script must have the following information: + +- [VolumeSource](https://kubernetes.io/docs/concepts/storage/volumes/#types-of-volumes): Where your script is loaded from. + +Below is an example showing how a script from a configMap can be used to initialize a MySQL database. + +```yaml +apiVersion: kubedb.com/v1 +kind: MySQL +metadata: + name: m1 + namespace: demo +spec: + version: 9.1.0 + init: + script: + configMap: + name: mysql-init-script +``` + +In the above example, KubeDB operator will launch a Job to execute all js script of `mysql-init-script` in alphabetical order once PetSet pods are running. For more details tutorial on how to initialize from script, please visit [here](/docs/guides/mysql/initialization/using_script.md). + +### spec.monitor + +MySQL managed by KubeDB can be monitored with builtin-Prometheus and Prometheus operator out-of-the-box. To learn more, + +- [Monitor MySQL with builtin Prometheus](/docs/guides/mysql/monitoring/builtin-prometheus/index.md) +- [Monitor MySQL with Prometheus operator](/docs/guides/mysql/monitoring/prometheus-operator/index.md) + +### spec.requireSSL + +`spec.requireSSL` specifies whether the client connections require SSL. If `spec.requireSSL` is `true` then the server permits only TCP/IP connections that use SSL, or connections that use a socket file (on Unix) or shared memory (on Windows). The server rejects any non-secure connection attempt. For more details, please visit [here](https://dev.mysql.com/doc/refman/5.7/en/using-encrypted-connections.html) + +### spec.tls + +`spec.tls` specifies the TLS/SSL configurations for the MySQL. + +The following fields are configurable in the `spec.tls` section: + +- `issuerRef` is a reference to the `Issuer` or `ClusterIssuer` CR of [cert-manager](https://cert-manager.io/docs/concepts/issuer/) that will be used by `KubeDB` to generate necessary certificates. + + - `apiGroup` is the group name of the resource being referenced. The value for `Issuer` or `ClusterIssuer` is "cert-manager.io" (cert-manager v0.12.0 and later). + - `kind` is the type of resource being referenced. KubeDB supports both `Issuer` and `ClusterIssuer` as values for this field. + - `name` is the name of the resource (`Issuer` or `ClusterIssuer`) being referenced. + +- `certificates` (optional) are a list of certificates used to configure the server and/or client certificate. It has the following fields: + + - `alias` represents the identifier of the certificate. It has the following possible value: + - `server` is used for server certificate identification. + - `client` is used for client certificate identification. + - `metrics-exporter` is used for metrics exporter certificate identification. + - `secretName` (optional) specifies the k8s secret name that holds the certificates. + >This field is optional. If the user does not specify this field, the default secret name will be created in the following format: `--cert`. + - `subject` (optional) specifies an `X.509` distinguished name. It has the following possible field, + - `organizations` (optional) are the list of different organization names to be used on the Certificate. + - `organizationalUnits` (optional) are the list of different organization unit name to be used on the Certificate. + - `countries` (optional) are the list of country names to be used on the Certificate. + - `localities` (optional) are the list of locality names to be used on the Certificate. + - `provinces` (optional) are the list of province names to be used on the Certificate. + - `streetAddresses` (optional) are the list of a street address to be used on the Certificate. + - `postalCodes` (optional) are the list of postal code to be used on the Certificate. + - `serialNumber` (optional) is a serial number to be used on the Certificate. + You can found more details from [Here](https://golang.org/pkg/crypto/x509/pkix/#Name) + + - `duration` (optional) is the period during which the certificate is valid. + - `renewBefore` (optional) is a specifiable time before expiration duration. + - `dnsNames` (optional) is a list of subject alt names to be used in the Certificate. + - `ipAddresses` (optional) is a list of IP addresses to be used in the Certificate. + - `uriSANs` (optional) is a list of URI Subject Alternative Names to be set in the Certificate. + - `emailSANs` (optional) is a list of email Subject Alternative Names to be set in the Certificate. + +### spec.configSecret + +`spec.configSecret` is an optional field that allows users to provide custom configuration for MySQL. This field accepts a [`VolumeSource`](https://github.com/kubernetes/api/blob/release-1.11/core/v1/types.go#L47). So you can use any Kubernetes supported volume source such as `configMap`, `secret`, `azureDisk` etc. To learn more about how to use a custom configuration file see [here](/docs/guides/mysql/configuration/config-file/index.md). + +### spec.podTemplate + +KubeDB allows providing a template for database pod through `spec.podTemplate`. KubeDB operator will pass the information provided in `spec.podTemplate` to the PetSet created for the MySQL database. + +KubeDB accepts the following fields to set in `spec.podTemplate:` + +- metadata: + - annotations (pod's annotation) +- controller: + - annotations (petset's annotation) +- spec: + - containers + - volumes + - podPlacementPolicy + - initContainers + - imagePullSecrets + - nodeSelector + - serviceAccountName + - schedulerName + - tolerations + - priorityClassName + - priority + - securityContext + +You can check out the full list [here](https://github.com/kmodules/offshoot-api/blob/master/api/v2/types.go#L26C1-L279C1). +Uses of some field of `spec.podTemplate` is described below, + +#### spec.podTemplate.spec.tolerations + +The `spec.podTemplate.spec.tolerations` is an optional field. This can be used to specify the pod's tolerations. + +#### spec.podTemplate.spec.volumes + +The `spec.podTemplate.spec.volumes` is an optional field. This can be used to provide the list of volumes that can be mounted by containers belonging to the pod. + +#### spec.podTemplate.spec.podPlacementPolicy + +`spec.podTemplate.spec.podPlacementPolicy` is an optional field. This can be used to provide the reference of the `podPlacementPolicy`. `name` of the podPlacementPolicy is referred under this attribute. This will be used by our Petset controller to place the db pods throughout the region, zone & nodes according to the policy. It utilizes kubernetes affinity & podTopologySpreadContraints feature to do so. +```yaml +spec: + podPlacementPolicy: + name: default +``` + + + +#### spec.podTemplate.spec.containers + +The `spec.podTemplate.spec.containers` can be used to provide the list containers and their configurations for to the database pod. some of the fields are described below, + +##### spec.podTemplate.spec.containers[].name +The `spec.podTemplate.spec.containers[].name` field used to specify the name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + +##### spec.podTemplate.spec.containers[].args +`spec.podTemplate.spec.containers[].args` is an optional field. This can be used to provide additional arguments to database installation. + +##### spec.podTemplate.spec.containers[].env + +`spec.podTemplate.spec.containers[].env` is an optional field that specifies the environment variables to pass to the MySQL docker image. To know about supported environment variables, please visit [here](https://hub.docker.com/_/mysql/). + +Note that, KubeDB does not allow `MYSQL_ROOT_PASSWORD`, `MYSQL_ALLOW_EMPTY_PASSWORD`, `MYSQL_RANDOM_ROOT_PASSWORD`, and `MYSQL_ONETIME_PASSWORD` environment variables to set in `spec.env`. If you want to set the root password, please use `spec.authSecret` instead described earlier. + +If you try to set any of the forbidden environment variables i.e. `MYSQL_ROOT_PASSWORD` in MySQL crd, Kubed operator will reject the request with the following error, + +```ini +Error from server (Forbidden): error when creating "./mysql.yaml": admission webhook "mysql.validators.kubedb.com" denied the request: environment variable MYSQL_ROOT_PASSWORD is forbidden to use in MySQL spec +``` + +Also, note that KubeDB does not allow to update the environment variables as updating them does not have any effect once the database is created. If you try to update environment variables, KubeDB operator will reject the request with the following error, + +```ini +Error from server (BadRequest): error when applying patch: +... +for: "./mysql.yaml": admission webhook "mysql.validators.kubedb.com" denied the request: precondition failed for: +...At least one of the following was changed: + apiVersion + kind + name + namespace + spec.authSecret + spec.init + spec.storageType + spec.storage + spec.podTemplate.spec.nodeSelector + spec.podTemplate.spec.env +``` + +##### spec.podTemplate.spec.containers[].resources + +`spec.podTemplate.spec.containers[].resources` is an optional field. This can be used to request compute resources required by containers of the database pods. To learn more, visit [here](http://kubernetes.io/docs/user-guide/compute-resources/). + +#### spec.podTemplate.spec.imagePullSecrets + +`KubeDB` provides the flexibility of deploying MySQL database from a private Docker registry. `spec.podTemplate.spec.imagePullSecrets` is an optional field that points to secrets to be used for pulling docker image if you are using a private docker registry. To learn how to deploy MySQL from a private registry, please visit [here](/docs/guides/mysql/private-registry/index.md). + +#### spec.podTemplate.spec.nodeSelector + +`spec.podTemplate.spec.nodeSelector` is an optional field that specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). To learn more, see [here](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) . + +#### spec.podTemplate.spec.serviceAccountName + + `serviceAccountName` is an optional field supported by KubeDB Operator (version 0.13.0 and higher) that can be used to specify a custom service account to fine-tune role-based access control. + + If this field is left empty, the KubeDB operator will create a service account name matching MySQL crd name. Role and RoleBinding that provide necessary access permissions will also be generated automatically for this service account. + + If a service account name is given, but there's no existing service account by that name, the KubeDB operator will create one, and Role and RoleBinding that provide necessary access permissions will also be generated for this service account. + + If a service account name is given, and there's an existing service account by that name, the KubeDB operator will use that existing service account. Since this service account is not managed by KubeDB, users are responsible for providing necessary access permissions manually. Follow the guide [here](/docs/guides/mysql/custom-rbac/index.md) to grant necessary permissions in this scenario. + +### spec.serviceTemplates + +You can also provide a template for the services created by KubeDB operator for MySQL database through `spec.serviceTemplates`. This will allow you to set the type and other properties of the services. + +KubeDB allows following fields to set in `spec.serviceTemplates`: + +- metadata: + - annotations +- spec: + - type + - ports + - clusterIP + - externalIPs + - loadBalancerIP + - loadBalancerSourceRanges + - externalTrafficPolicy + - healthCheckNodePort + - sessionAffinityConfig + +See [here](https://github.com/kmodules/offshoot-api/blob/kubernetes-1.16.3/api/v1/types.go#L163) to understand these fields in detail. + +### spec.halted + +`spec.halted` is an optional field. This field will be used to halt the kubeDB operator. When you set `spec.halted` to `true`, the KubeDB operator doesn't perform any operation on `MySQL` object. + +Suppose you want to delete the `MySQL` resources(`PetSet`, `Service` etc.) except `MySQL` object, `PVCs` and `Secret` then you need to set `spec.halted` to `true`. If you set `spec.halted` to `true` then the `deletionPolicy` in `MySQL` object will be set `Halt` by-default. + +### spec.deletionPolicy + +`deletionPolicy` gives flexibility whether to `nullify`(reject) the delete operation of `MySQL` crd or which resources KubeDB should keep or delete when you delete `MySQL` crd. KubeDB provides the following four termination policies: + +- DoNotTerminate +- Halt +- Delete (`Default`) +- WipeOut + +When `deletionPolicy` is `DoNotTerminate`, KubeDB takes advantage of `ValidationWebhook` feature in Kubernetes 1.9.0 or later clusters to implement `DoNotTerminate` feature. If admission webhook is enabled, `DoNotTerminate` prevents users from deleting the database as long as the `spec.deletionPolicy` is set to `DoNotTerminate`. + +Following table show what KubeDB does when you delete MySQL crd for different termination policies, + +| Behavior | DoNotTerminate | Halt | Delete | WipeOut | +| ----------------------------------- | :------------: | :------: | :------: | :------: | +| 1. Block Delete operation | ✓ | ✗ | ✗ | ✗ | +| 2. Delete PetSet | ✗ | ✓ | ✓ | ✓ | +| 3. Delete Services | ✗ | ✓ | ✓ | ✓ | +| 4. Delete PVCs | ✗ | ✗ | ✓ | ✓ | +| 5. Delete Secrets | ✗ | ✗ | ✗ | ✓ | +| 6. Delete Snapshots | ✗ | ✗ | ✗ | ✓ | + +If you don't specify `spec.deletionPolicy` KubeDB uses `Delete` termination policy by default. + +> For more details you can visit [here](https://appscode.com/blog/post/deletion-policy/) + +## Next Steps + +- Learn how to use KubeDB to run a MySQL database [here](/docs/guides/mysql/README.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/mysql/configuration/config-file/index.md b/docs/guides/mysql/configuration/config-file/index.md index 10b0649359..e329d0288e 100644 --- a/docs/guides/mysql/configuration/config-file/index.md +++ b/docs/guides/mysql/configuration/config-file/index.md @@ -39,7 +39,7 @@ KubeDB supports providing custom configuration for MySQL. This tutorial will sho MySQL allows to configure database via configuration file. The default configuration for MySQL can be found in `/etc/mysql/my.cnf` file. When MySQL starts, it will look for custom configuration file in `/etc/mysql/conf.d` directory. If configuration file exist, MySQL instance will use combined startup setting from both `/etc/mysql/my.cnf` and `*.cnf` files in `/etc/mysql/conf.d` directory. This custom configuration will overwrite the existing default one. To know more about configuring MySQL see [here](https://dev.mysql.com/doc/refman/8.0/en/server-configuration.html). -At first, you have to create a config file with `.cnf` extension with your desired configuration. Then you have to put this file into a [volume](https://kubernetes.io/docs/concepts/storage/volumes/). You have to specify this volume in `spec.configSecret` section while creating MySQL crd. KubeDB will mount this volume into `/etc/mysql/conf.d` directory of the database pod. +At first, you have to create a config file with `.cnf` extension with your desired configuration. Then you have to put this file into a [volume](https://kubernetes.io/docs/concepts/storage/volumes/). You have to specify this volume in `spec.configuration` section while creating MySQL crd. KubeDB will mount this volume into `/etc/mysql/conf.d` directory of the database pod. In this tutorial, we will configure [max_connections](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_connections) and [read_buffer_size](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_read_buffer_size) via a custom config file. We will use configMap as volume source. @@ -87,7 +87,7 @@ type: Opaque ``` -Now, create MySQL crd specifying `spec.configSecret` field. +Now, create MySQL crd specifying `spec.configuration` field. ```bash $ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/mysql/configuration/config-file/yamls/mysql-custom.yaml @@ -104,8 +104,8 @@ metadata: namespace: demo spec: version: "9.1.0" - configSecret: - name: my-configuration + configuration: + secretName: my-configuration storage: storageClassName: "standard" accessModes: diff --git a/docs/guides/mysql/configuration/config-file/index.md.bak b/docs/guides/mysql/configuration/config-file/index.md.bak new file mode 100644 index 0000000000..2459e5dfa7 --- /dev/null +++ b/docs/guides/mysql/configuration/config-file/index.md.bak @@ -0,0 +1,231 @@ +--- +title: Run MySQL with Custom Configuration +menu: + docs_{{ .version }}: + identifier: guides-mysql-configuration-using-config-file + name: Config File + parent: guides-mysql-configuration + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Using Custom Configuration File + +KubeDB supports providing custom configuration for MySQL. This tutorial will show you how to use KubeDB to run a MySQL database with custom configuration. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + + ```bash + $ kubectl create ns demo + namespace/demo created + + $ kubectl get ns demo + NAME STATUS AGE + demo Active 5s + ``` + +> Note: YAML files used in this tutorial are stored in [docs/guides/mysql/configuration/config-file/yamls](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/guides/mysql/configuration/config-file/yamls) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +MySQL allows to configure database via configuration file. The default configuration for MySQL can be found in `/etc/mysql/my.cnf` file. When MySQL starts, it will look for custom configuration file in `/etc/mysql/conf.d` directory. If configuration file exist, MySQL instance will use combined startup setting from both `/etc/mysql/my.cnf` and `*.cnf` files in `/etc/mysql/conf.d` directory. This custom configuration will overwrite the existing default one. To know more about configuring MySQL see [here](https://dev.mysql.com/doc/refman/8.0/en/server-configuration.html). + +At first, you have to create a config file with `.cnf` extension with your desired configuration. Then you have to put this file into a [volume](https://kubernetes.io/docs/concepts/storage/volumes/). You have to specify this volume in `spec.configSecret` section while creating MySQL crd. KubeDB will mount this volume into `/etc/mysql/conf.d` directory of the database pod. + +In this tutorial, we will configure [max_connections](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_connections) and [read_buffer_size](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_read_buffer_size) via a custom config file. We will use configMap as volume source. + +## Custom Configuration + +At first, let's create `my-config.cnf` file setting `max_connections` and `read_buffer_size` parameters. + +```bash +cat < my-config.cnf +[mysqld] +max_connections = 200 +read_buffer_size = 1048576 +EOF + +$ cat my-config.cnf +[mysqld] +max_connections = 200 +read_buffer_size = 1048576 +``` + +Here, `read_buffer_size` is set to 1MB in bytes. + +Now, create a secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo my-configuration --from-file=./my-config.cnf +configmap/my-configuration created +``` + +Verify the secret has the configuration file. + +```yaml +$ kubectl get secret -n demo my-configuration -o yaml +apiVersion: v1 +data: + my-config.cnf: W215c3FsZF0KbWF4X2Nvbm5lY3Rpb25zID0gMjAwCnJlYWRfYnVmZmVyX3NpemUgPSAxMDQ4NTc2Cg== +kind: Secret +metadata: + creationTimestamp: "2022-06-28T13:20:42Z" + name: my-configuration + namespace: demo + resourceVersion: "1601408" + uid: 82e1a722-d80f-448e-89b5-c64de81ed262 +type: Opaque + +``` + +Now, create MySQL crd specifying `spec.configSecret` field. + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/mysql/configuration/config-file/yamls/mysql-custom.yaml +mysql.kubedb.com/custom-mysql created +``` + +Below is the YAML for the MySQL crd we just created. + +```yaml +apiVersion: kubedb.com/v1 +kind: MySQL +metadata: + name: custom-mysql + namespace: demo +spec: + version: "9.1.0" + configuration: + secretName: my-configuration + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Now, wait a few minutes. KubeDB operator will create necessary PVC, petset, services, secret etc. If everything goes well, we will see that a pod with the name `custom-mysql-0` has been created. + +Check that the petset's pod is running + +```bash +$ kubectl get pod -n demo +NAME READY STATUS RESTARTS AGE +custom-mysql-0 1/1 Running 0 44s +``` + +Check the pod's log to see if the database is ready + +```bash +$ kubectl logs -f -n demo custom-mysql-0 +2022-06-28 13:22:10+00:00 [Note] [Entrypoint]: Entrypoint script for MySQL Server 9.1.0-1debian10 started. +2022-06-28 13:22:10+00:00 [Note] [Entrypoint]: Switching to dedicated user 'mysql' +.... + +2022-06-28 13:22:20+00:00 [Note] [Entrypoint]: Database files initialized +2022-06-28 13:22:20+00:00 [Note] [Entrypoint]: Starting temporary server +2022-06-28T13:22:20.233556Z 0 [System] [MY-010116] [Server] /usr/sbin/mysqld (mysqld 9.1.0) starting as process 92 +2022-06-28T13:22:20.252075Z 1 [System] [MY-013576] [InnoDB] InnoDB initialization has started. +2022-06-28T13:22:20.543772Z 1 [System] [MY-013577] [InnoDB] InnoDB initialization has ended. +... +2022-06-28 13:22:22+00:00 [Note] [Entrypoint]: Stopping temporary server +2022-06-28T13:22:22.354537Z 10 [System] [MY-013172] [Server] Received SHUTDOWN from user root. Shutting down mysqld (Version: 9.1.0). +2022-06-28T13:22:24.495121Z 0 [System] [MY-010910] [Server] /usr/sbin/mysqld: Shutdown complete (mysqld 9.1.0) MySQL Community Server - GPL. +2022-06-28 13:22:25+00:00 [Note] [Entrypoint]: Temporary server stopped + +2022-06-28 13:22:25+00:00 [Note] [Entrypoint]: MySQL init process done. Ready for start up. + +.... +2022-06-28T13:22:26.064259Z 0 [Warning] [MY-011810] [Server] Insecure configuration for --pid-file: Location '/var/run/mysqld' in the path is accessible to all OS users. Consider choosing a different directory. +2022-06-28T13:22:26.076352Z 0 [System] [MY-011323] [Server] X Plugin ready for connections. Bind-address: '::' port: 33060, socket: /var/run/mysqld/mysqlx.sock +2022-06-28T13:22:26.076407Z 0 [System] [MY-010931] [Server] /usr/sbin/mysqld: ready for connections. Version: '9.1.0' socket: '/var/run/mysqld/mysqld.sock' port: 3306 MySQL Community Server - GPL. + +.... +``` + +Once we see `[Note] /usr/sbin/mysqld: ready for connections.` in the log, the database is ready. + +Now, we will check if the database has started with the custom configuration we have provided. + +First, deploy [phpMyAdmin](https://hub.docker.com/r/phpmyadmin/phpmyadmin/) to connect with the MySQL database we have just created. + +```bash + $ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/guides/mysql/configuration/config-file/yamls/phpmyadmin.yaml +deployment.extensions/myadmin created +service/myadmin created +``` + +Then, open your browser and go to the following URL: _http://{node-ip}:{myadmin-svc-nodeport}_. For kind cluster, you can get this URL by running the following command: + +```bash +$ kubectl get svc -n demo myadmin -o json | jq '.spec.ports[].nodePort' +30942 + +$ kubectl get node -o json | jq '.items[].status.addresses[].address' +"172.18.0.3" +"kind-control-plane" +"172.18.0.4" +"kind-worker" +"172.18.0.2" +"kind-worker2" + +# expected url will be: +url: http://172.18.0.4:30942 +``` + +Now, let's connect to the database from the phpMyAdmin dashboard using the database pod IP and MySQL user password. + +```bash +$ kubectl get pods custom-mysql-0 -n demo -o yaml | grep IP + hostIP: 10.0.2.15 + podIP: 172.17.0.6 + +$ kubectl get secrets -n demo custom-mysql-auth -o jsonpath='{.data.\user}' | base64 -d +root + +$ kubectl get secrets -n demo custom-mysql-auth -o jsonpath='{.data.\password}' | base64 -d +MLO5_fPVKcqPiEu9 +``` + +Once, you have connected to the database with phpMyAdmin go to **Variables** tab and search for `max_connections` and `read_buffer_size`. Here are some screenshot showing those configured variables. +![max_connections](/docs/images/mysql/max_connection.png) + +![read_buffer_size](/docs/images/mysql/read_buffer_size.png) + +## Cleaning up + +To cleanup the Kubernetes resources created by this tutorial, run: + +```bash +kubectl patch -n demo my/custom-mysql -p '{"spec":{"deletionPolicy":"WipeOut"}}' --type="merge" +kubectl delete -n demo my/custom-mysql + +kubectl delete deployment -n demo myadmin +kubectl delete service -n demo myadmin + +kubectl delete ns demo +``` + +If you would like to uninstall KubeDB operator, please follow the steps [here](/docs/setup/README.md). + +## Next Steps + +- [Quickstart MySQL](/docs/guides/mysql/quickstart/index.md) with KubeDB Operator. +- Initialize [MySQL with Script](/docs/guides/mysql/initialization/using_script.md). +- Monitor your MySQL database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/mysql/monitoring/prometheus-operator/index.md). +- Monitor your MySQL database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/mysql/monitoring/builtin-prometheus/index.md). +- Use [private Docker registry](/docs/guides/mysql/private-registry/index.md) to deploy MySQL with KubeDB. +- Use [kubedb cli](/docs/guides/mysql/cli/index.md) to manage databases like kubectl for Kubernetes. +- Detail concepts of [MySQL object](/docs/guides/mysql/concepts/database/index.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/mysql/configuration/config-file/yamls/mysql-custom.yaml b/docs/guides/mysql/configuration/config-file/yamls/mysql-custom.yaml index 6ffa1df958..5e438f2a1b 100644 --- a/docs/guides/mysql/configuration/config-file/yamls/mysql-custom.yaml +++ b/docs/guides/mysql/configuration/config-file/yamls/mysql-custom.yaml @@ -5,8 +5,8 @@ metadata: namespace: demo spec: version: "9.1.0" - configSecret: - name: my-configuration + configuration: + secretName: my-configuration storage: storageClassName: "standard" accessModes: diff --git a/docs/guides/mysql/configuration/config-file/yamls/mysql-custom.yaml.bak b/docs/guides/mysql/configuration/config-file/yamls/mysql-custom.yaml.bak new file mode 100644 index 0000000000..6ffa1df958 --- /dev/null +++ b/docs/guides/mysql/configuration/config-file/yamls/mysql-custom.yaml.bak @@ -0,0 +1,16 @@ +apiVersion: kubedb.com/v1 +kind: MySQL +metadata: + name: custom-mysql + namespace: demo +spec: + version: "9.1.0" + configSecret: + name: my-configuration + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/docs/guides/mysql/reconfigure/reconfigure-steps/index.md b/docs/guides/mysql/reconfigure/reconfigure-steps/index.md index d7e2fe5cf7..2317dbb200 100644 --- a/docs/guides/mysql/reconfigure/reconfigure-steps/index.md +++ b/docs/guides/mysql/reconfigure/reconfigure-steps/index.md @@ -60,7 +60,7 @@ $ kubectl create secret generic -n demo my-configuration --from-file=./my-config secret/my-configuration created ``` -In this section, we are going to create a MySQL object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `MySQL` CR that we are going to create, +In this section, we are going to create a MySQL object specifying `spec.configuration` field to apply this custom configuration. Below is the YAML of the `MySQL` CR that we are going to create,