diff --git a/internal/controller/clickhouse/config.go b/internal/controller/clickhouse/config.go index d6eee8d..842493a 100644 --- a/internal/controller/clickhouse/config.go +++ b/internal/controller/clickhouse/config.go @@ -19,7 +19,9 @@ import ( var ( //go:embed templates/base.yaml.tmpl - baseConfigTemplateStr string + baseTemplateStr string + //go:embed templates/named_collections.yaml.tmpl + namedCollectionsTemplateStr string //go:embed templates/network.yaml.tmpl networkConfigTemplateStr string //go:embed templates/log_tables.yaml.tmpl @@ -33,17 +35,51 @@ var ( ) func init() { + templateFuncs := template.FuncMap{ + "yaml": func(v any) (string, error) { + data, err := yaml.Marshal(v) + return string(data), err + }, + "indent": func(countRaw any, strRaw any) (string, error) { + count, ok := countRaw.(int) + if !ok { + return "", fmt.Errorf("indent: expected int for indentation value, got %T", countRaw) + } + + str, ok := strRaw.(string) + if !ok { + return "", fmt.Errorf("indent: expected string for content value, got %T", strRaw) + } + + builder := strings.Builder{} + indentation := strings.Repeat(" ", count) + + for line := range strings.SplitSeq(str, "\n") { + if _, err := builder.WriteString(fmt.Sprintf("%s%s\n", indentation, line)); err != nil { + return "", fmt.Errorf("failed to write indented line: %w", err) + } + } + + return builder.String(), nil + }, + } + + baseTmpl := template.Must(template.New("").Funcs(templateFuncs).Parse(baseTemplateStr)) + + generators = append(generators, &templateConfigGenerator{ + path: ConfigPath, + filename: ConfigFileName, + template: baseTmpl, + generator: executeBaseConfig, + }) + for _, templateSpec := range []struct { Path string Filename string Raw string Generator configGeneratorFunc + Enabled func(r *clickhouseReconciler) bool }{{ - Path: ConfigPath, - Filename: ConfigFileName, - Raw: baseConfigTemplateStr, - Generator: baseConfigGenerator, - }, { Path: path.Join(ConfigPath, ConfigDPath), Filename: "00-network.yaml", Raw: networkConfigTemplateStr, @@ -53,6 +89,14 @@ func init() { Filename: "00-logs-tables.yaml", Raw: logTablesConfigTemplateStr, Generator: logTablesConfigGenerator, + }, { + Path: path.Join(ConfigPath, ConfigDPath), + Filename: "00-named-collections.yaml", + Raw: namedCollectionsTemplateStr, + Generator: namedCollectionsConfigGenerator, + Enabled: func(r *clickhouseReconciler) bool { + return versionAtLeast(r.Cluster.Status.Version, MinVersionNamedCollections) + }, }, { Path: ConfigPath, Filename: UsersFileName, @@ -64,43 +108,14 @@ func init() { Raw: clientConfigTemplateStr, Generator: clientConfigGenerator, }} { - tmpl := template.New("").Funcs(template.FuncMap{ - "yaml": func(v any) (string, error) { - data, err := yaml.Marshal(v) - return string(data), err - }, - "indent": func(countRaw any, strRaw any) (string, error) { - count, ok := countRaw.(int) - if !ok { - return "", fmt.Errorf("indent: expected int for indentation value, got %T", countRaw) - } - - str, ok := strRaw.(string) - if !ok { - return "", fmt.Errorf("indent: expected string for content value, got %T", strRaw) - } - - builder := strings.Builder{} - indentation := strings.Repeat(" ", count) - - for line := range strings.SplitSeq(str, "\n") { - if _, err := builder.WriteString(fmt.Sprintf("%s%s\n", indentation, line)); err != nil { - return "", fmt.Errorf("failed to write indented line: %w", err) - } - } - - return builder.String(), nil - }, - }) - if _, err := tmpl.Parse(templateSpec.Raw); err != nil { - panic(fmt.Sprintf("failed to parse template %s: %v", templateSpec.Filename, err)) - } + tmpl := template.Must(template.New("").Funcs(templateFuncs).Parse(templateSpec.Raw)) generators = append(generators, &templateConfigGenerator{ filename: templateSpec.Filename, path: templateSpec.Path, template: tmpl, generator: templateSpec.Generator, + enabled: templateSpec.Enabled, }) } @@ -108,16 +123,12 @@ func init() { &extraConfigGenerator{ Name: ExtraConfigFileName, ConfigSubPath: ConfigDPath, - Getter: func(r *clickhouseReconciler) []byte { - return r.Cluster.Spec.Settings.ExtraConfig.Raw - }, + Getter: func(r *clickhouseReconciler) []byte { return r.Cluster.Spec.Settings.ExtraConfig.Raw }, }, &extraConfigGenerator{ Name: ExtraUsersConfigFileName, ConfigSubPath: UsersDPath, - Getter: func(r *clickhouseReconciler) []byte { - return r.Cluster.Spec.Settings.ExtraUsersConfig.Raw - }, + Getter: func(r *clickhouseReconciler) []byte { return r.Cluster.Spec.Settings.ExtraUsersConfig.Raw }, }) } @@ -125,7 +136,7 @@ type configGenerator interface { Filename() string Path() string ConfigKey() string - Exists(r *clickhouseReconciler) bool + Enabled(r *clickhouseReconciler) bool Generate(r *clickhouseReconciler, id v1.ClickHouseReplicaID) (string, error) } @@ -134,6 +145,7 @@ type templateConfigGenerator struct { path string template *template.Template generator configGeneratorFunc + enabled func(r *clickhouseReconciler) bool } func (g *templateConfigGenerator) Filename() string { @@ -148,8 +160,8 @@ func (g *templateConfigGenerator) ConfigKey() string { return controllerutil.PathToName(path.Join(g.path, g.filename)) } -func (g *templateConfigGenerator) Exists(*clickhouseReconciler) bool { - return true +func (g *templateConfigGenerator) Enabled(r *clickhouseReconciler) bool { + return g.enabled == nil || g.enabled(r) } func (g *templateConfigGenerator) Generate(r *clickhouseReconciler, id v1.ClickHouseReplicaID) (string, error) { @@ -194,7 +206,7 @@ type keeperNode struct { Secure bool } -func baseConfigGenerator(tmpl *template.Template, r *clickhouseReconciler, id v1.ClickHouseReplicaID) (string, error) { +func executeBaseConfig(tmpl *template.Template, r *clickhouseReconciler, id v1.ClickHouseReplicaID) (string, error) { keeperNodes := make([]keeperNode, 0, r.keeper.Replicas()) for _, host := range r.keeper.Hostnames() { if r.keeper.Spec.Settings.TLS.Enabled { @@ -389,6 +401,25 @@ func clientConfigGenerator(tmpl *template.Template, r *clickhouseReconciler, _ v return builder.String(), nil } +type namedCollectionsConfigParams struct { + NamedCollectionsKeyEnv string + NamedCollectionsPath string +} + +func namedCollectionsConfigGenerator(tmpl *template.Template, _ *clickhouseReconciler, _ v1.ClickHouseReplicaID) (string, error) { + params := namedCollectionsConfigParams{ + NamedCollectionsKeyEnv: EnvNamedCollectionsKey, + NamedCollectionsPath: KeeperPathNamedCollections, + } + + builder := strings.Builder{} + if err := tmpl.Execute(&builder, params); err != nil { + return "", fmt.Errorf("template named collections config: %w", err) + } + + return builder.String(), nil +} + type extraConfigGenerator struct { Name string ConfigSubPath string @@ -407,12 +438,12 @@ func (g *extraConfigGenerator) ConfigKey() string { return g.Name } -func (g *extraConfigGenerator) Exists(r *clickhouseReconciler) bool { +func (g *extraConfigGenerator) Enabled(r *clickhouseReconciler) bool { return len(g.Getter(r)) > 0 } func (g *extraConfigGenerator) Generate(r *clickhouseReconciler, _ v1.ClickHouseReplicaID) (string, error) { - if !g.Exists(r) { + if !g.Enabled(r) { return "", errors.New("extra config generator called, but no extra config provided") } diff --git a/internal/controller/clickhouse/config_test.go b/internal/controller/clickhouse/config_test.go index 7f9008c..a8ae130 100644 --- a/internal/controller/clickhouse/config_test.go +++ b/internal/controller/clickhouse/config_test.go @@ -31,6 +31,9 @@ var _ = Describe("ConfigGenerator", func() { }, }, }, + Status: v1.ClickHouseClusterStatus{ + Version: "25.12.1.1", + }, }, }, keeper: v1.KeeperCluster{ @@ -42,7 +45,7 @@ var _ = Describe("ConfigGenerator", func() { for _, generator := range generators { It("should generate config: "+generator.Filename(), func() { - Expect(generator.Exists(&ctx)).To(BeTrue()) + Expect(generator.Enabled(&ctx)).To(BeTrue()) data, err := generator.Generate(&ctx, v1.ClickHouseReplicaID{}) Expect(err).ToNot(HaveOccurred()) diff --git a/internal/controller/clickhouse/constants.go b/internal/controller/clickhouse/constants.go index c049e2e..0c62439 100644 --- a/internal/controller/clickhouse/constants.go +++ b/internal/controller/clickhouse/constants.go @@ -1,9 +1,18 @@ package clickhouse import ( + "fmt" + "github.com/blang/semver/v4" + + v1 "github.com/ClickHouse/clickhouse-operator/api/v1alpha1" + "github.com/ClickHouse/clickhouse-operator/internal/controllerutil" + "github.com/ClickHouse/clickhouse-operator/internal/upgrade" ) +// MinVersionNamedCollections is the minimum ClickHouse version that supports keeper_encrypted for named collections. +var MinVersionNamedCollections = upgrade.ClickHouseVersion{Major: 25, Minor: 12} //nolint:mnd + const ( PortManagement = 9001 PortNative = 9000 @@ -32,10 +41,11 @@ const ( LogPath = "/var/log/clickhouse-server/" - DefaultClusterName = "default" - KeeperPathUsers = "/clickhouse/access" - KeeperPathUDF = "/clickhouse/user_defined" - KeeperPathDistributedDDL = "/clickhouse/task_queue/ddl" + DefaultClusterName = "default" + KeeperPathUsers = "/clickhouse/access" + KeeperPathUDF = "/clickhouse/user_defined" + KeeperPathDistributedDDL = "/clickhouse/task_queue/ddl" + KeeperPathNamedCollections = "/clickhouse/named_collections" ContainerName = "clickhouse-server" DefaultRevisionHistory = 10 @@ -49,27 +59,64 @@ const ( EnvDefaultUserPassword = "CLICKHOUSE_DEFAULT_USER_PASSWORD" EnvKeeperIdentity = "CLICKHOUSE_KEEPER_IDENTITY" EnvClusterSecret = "CLICKHOUSE_CLUSTER_SECRET" + EnvNamedCollectionsKey = "CLICKHOUSE_NAMED_COLLECTIONS_KEY" SecretKeyInterserverPassword = "interserver-password" SecretKeyManagementPassword = "management-password" SecretKeyKeeperIdentity = "keeper-identity" SecretKeyClusterSecret = "cluster-secret" + SecretKeyNamedCollectionsKey = "named-collections-key" + + // NamedCollectionsKeyByteLen is the AES-128 key size in bytes (16 bytes = 32 hex chars). + NamedCollectionsKeyByteLen = 16 ) +// versionAtLeast returns true if the actual version string is >= min. +// Returns false for empty, unparsable, or unknown version strings. +func versionAtLeast(actual string, minVersion upgrade.ClickHouseVersion) bool { + v, err := upgrade.ParseBareVersion(actual) + if err != nil { + return false + } + + return v.Compare(minVersion) >= 0 +} + +type secretSpec struct { + Key string + Env string + Format string + Generate func() any + Enabled func(status *v1.ClickHouseCluster) bool +} + +func (s *secretSpec) generate() []byte { + var arg any + if s.Generate != nil { + arg = s.Generate() + } else { + arg = controllerutil.GeneratePassword() + } + + return fmt.Appendf(nil, s.Format, arg) +} + +func (s *secretSpec) enabled(cluster *v1.ClickHouseCluster) bool { + return s.Enabled == nil || s.Enabled(cluster) +} + var ( breakingStatefulSetVersion, _ = semver.Parse("0.0.1") - secretsToGenerate = map[string]string{ - SecretKeyInterserverPassword: "%s", - SecretKeyManagementPassword: "%s", - SecretKeyKeeperIdentity: "clickhouse:%s", - SecretKeyClusterSecret: "%s", - } - secretsToEnvMapping = []struct { - Key string - Env string - }{ - {Key: SecretKeyInterserverPassword, Env: EnvInterserverPassword}, - {Key: SecretKeyKeeperIdentity, Env: EnvKeeperIdentity}, - {Key: SecretKeyClusterSecret, Env: EnvClusterSecret}, + clusterSecrets = []secretSpec{ + {Key: SecretKeyInterserverPassword, Env: EnvInterserverPassword, Format: "%s"}, + {Key: SecretKeyManagementPassword, Format: "%s"}, + {Key: SecretKeyKeeperIdentity, Env: EnvKeeperIdentity, Format: "clickhouse:%s"}, + {Key: SecretKeyClusterSecret, Env: EnvClusterSecret, Format: "%s"}, + {Key: SecretKeyNamedCollectionsKey, Env: EnvNamedCollectionsKey, Format: "%x", + Generate: func() any { return controllerutil.GenerateRandomBytes(NamedCollectionsKeyByteLen) }, + Enabled: func(status *v1.ClickHouseCluster) bool { + return versionAtLeast(status.Spec.ClusterDomain, MinVersionNamedCollections) + }, + }, } ) diff --git a/internal/controller/clickhouse/controller_test.go b/internal/controller/clickhouse/controller_test.go index d982830..87693ce 100644 --- a/internal/controller/clickhouse/controller_test.go +++ b/internal/controller/clickhouse/controller_test.go @@ -59,6 +59,9 @@ var _ = When("reconciling ClickHouseCluster", Ordered, func() { "test-annotation": "test-val", }, }, + Status: v1.ClickHouseClusterStatus{ + Version: "26.1.1.1", + }, } ) @@ -88,6 +91,8 @@ var _ = When("reconciling ClickHouseCluster", Ordered, func() { Status: metav1.ConditionTrue, Reason: string(v1.KeeperConditionReasonStandaloneReady), }) + // Unblocks CommonResources (secrets/commander); version-gated ClickHouse config uses ClickHouseCluster.status.version. + keeper.Status.Version = "26.1.1.1" Expect(suite.Client.Status().Update(ctx, keeper)).To(Succeed()) }) @@ -225,9 +230,13 @@ var _ = When("reconciling ClickHouseCluster", Ordered, func() { }) It("should generate all secret values", func() { - for key := range secretsToGenerate { - Expect(secrets.Items[0].Data).To(HaveKey(key)) - Expect(secrets.Items[0].Data[key]).To(Not(BeEmpty())) + for _, spec := range clusterSecrets { + if spec.Enabled != nil { + continue + } + + Expect(secrets.Items[0].Data).To(HaveKey(spec.Key)) + Expect(secrets.Items[0].Data[spec.Key]).To(Not(BeEmpty())) } }) diff --git a/internal/controller/clickhouse/sync.go b/internal/controller/clickhouse/sync.go index 3789958..10b96b2 100644 --- a/internal/controller/clickhouse/sync.go +++ b/internal/controller/clickhouse/sync.go @@ -231,6 +231,19 @@ func (r *clickhouseReconciler) reconcileCommonResources(ctx context.Context, log } } + var keeperForVersion v1.KeeperCluster + if err := r.GetClient().Get(ctx, types.NamespacedName{ + Namespace: r.Cluster.Namespace, + Name: r.Cluster.Spec.KeeperClusterRef.Name, + }, &keeperForVersion); err != nil { + return nil, fmt.Errorf("get keeper cluster %q: %w", r.Cluster.Spec.KeeperClusterRef.Name, err) + } + + if keeperForVersion.Status.Version == "" { + log.Info("keeper version is not yet known, waiting") + return &ctrl.Result{RequeueAfter: chctrl.RequeueOnRefreshTimeout}, nil + } + getErr := r.GetClient().Get(ctx, types.NamespacedName{ Namespace: r.Cluster.Namespace, Name: r.Cluster.SecretName(), @@ -239,7 +252,7 @@ func (r *clickhouseReconciler) reconcileCommonResources(ctx context.Context, log return nil, fmt.Errorf("get ClickHouse cluster secret %q: %w", r.Cluster.SecretName(), getErr) } - isSecretUpdated := templateClusterSecrets(r.Cluster, &r.secret) + isSecretUpdated := templateClusterSecrets(r.Cluster, &r.secret, r.Cluster) if err := ctrl.SetControllerReference(r.Cluster, &r.secret, r.GetScheme()); err != nil { return nil, fmt.Errorf("set controller reference for cluster secret %q: %w", r.Cluster.SecretName(), err) } @@ -328,7 +341,9 @@ func (r *clickhouseReconciler) reconcileClusterRevisions(ctx context.Context, lo } r.versionProbe = probeResult - r.Cluster.Status.Version = r.versionProbe.Version + if probeResult.Version != "" { + r.Cluster.Status.Version = probeResult.Version + } return nil, nil } @@ -358,7 +373,7 @@ func (r *clickhouseReconciler) reconcileActiveReplicaStatus(ctx context.Context, pinged := false version := "" - if !hasError { + if !hasError && r.commander != nil { ctx, cancel := context.WithTimeout(ctx, chctrl.LoadReplicaStateTimeout) defer cancel() @@ -397,6 +412,10 @@ func (r *clickhouseReconciler) reconcileActiveReplicaStatus(ctx context.Context, } } + if r.commander == nil { + return &ctrl.Result{RequeueAfter: chctrl.RequeueOnRefreshTimeout}, nil + } + return nil, nil } @@ -479,6 +498,11 @@ func (r *clickhouseReconciler) reconcileReplicateSchema(ctx context.Context, log return nil, nil } + if r.commander == nil { + log.Info("commander not initialized, skipping database replication") + return &ctrl.Result{RequeueAfter: chctrl.RequeueOnRefreshTimeout}, nil + } + hasNotSynced := false replicaDatabases := ctrlutil.ExecuteParallel(readyReplicas, func(id v1.ClickHouseReplicaID) (v1.ClickHouseReplicaID, map[string]databaseDescriptor, error) { if err := r.commander.EnsureDefaultDatabaseEngine(ctx, log, id); err != nil { @@ -600,6 +624,11 @@ func (r *clickhouseReconciler) reconcileCleanUp(ctx context.Context, log ctrluti replicasToRemove[id.ShardID][id.Index] = state } + if len(replicasToRemove) > 0 && r.commander == nil { + log.Info("commander not initialized, deferring scale-down cleanup that requires shard sync") + return &ctrl.Result{RequeueAfter: chctrl.RequeueOnRefreshTimeout}, nil + } + shardsInSync := ctrlutil.ExecuteParallel(slices.Collect(maps.Keys(replicasToRemove)), func(shardID int32) (int32, struct{}, error) { log.Info("Pre scale-down shard sync", "shard_id", shardID) @@ -647,7 +676,7 @@ func (r *clickhouseReconciler) reconcileCleanUp(ctx context.Context, log ctrluti } } - if r.Cluster.Spec.Settings.EnableDatabaseSync { + if r.Cluster.Spec.Settings.EnableDatabaseSync && r.commander != nil { if err := r.commander.CleanupDatabaseReplicas(ctx, log, runningStaleReplicas); err != nil { log.Warn("failed to cleanup database replicas", "error", err) diff --git a/internal/controller/clickhouse/templates.go b/internal/controller/clickhouse/templates.go index 59eeb3d..a695280 100644 --- a/internal/controller/clickhouse/templates.go +++ b/internal/controller/clickhouse/templates.go @@ -104,7 +104,7 @@ func templatePodDisruptionBudget(cr *v1.ClickHouseCluster, shardID int32) *polic return pdb } -func templateClusterSecrets(cr *v1.ClickHouseCluster, secret *corev1.Secret) bool { +func templateClusterSecrets(cr *v1.ClickHouseCluster, secret *corev1.Secret, cluster *v1.ClickHouseCluster) bool { secret.Name = cr.SecretName() secret.Namespace = cr.Namespace secret.Type = corev1.SecretTypeOpaque @@ -130,15 +130,29 @@ func templateClusterSecrets(cr *v1.ClickHouseCluster, secret *corev1.Secret) boo secret.Data = map[string][]byte{} } - for key, template := range secretsToGenerate { - if _, ok := secret.Data[key]; !ok { + knownKeys := make(map[string]struct{}, len(clusterSecrets)) + for i := range clusterSecrets { + spec := &clusterSecrets[i] + knownKeys[spec.Key] = struct{}{} + + if !spec.enabled(cluster) { + if _, ok := secret.Data[spec.Key]; ok { + changed = true + + delete(secret.Data, spec.Key) + } + + continue + } + + if _, ok := secret.Data[spec.Key]; !ok { changed = true - secret.Data[key] = fmt.Appendf(nil, template, controllerutil.GeneratePassword()) + secret.Data[spec.Key] = spec.generate() } } for key := range secret.Data { - if _, ok := secretsToGenerate[key]; !ok { + if _, ok := knownKeys[key]; !ok { changed = true delete(secret.Data, key) @@ -269,7 +283,7 @@ func templateStatefulSet(r *clickhouseReconciler, id v1.ClickHouseReplicaID) (*a func generateConfigForSingleReplica(r *clickhouseReconciler, id v1.ClickHouseReplicaID) (map[string]string, error) { configFiles := map[string]string{} for _, generator := range generators { - if !generator.Exists(r) { + if !generator.Enabled(r) { continue } @@ -292,7 +306,7 @@ func templatePodSpec(r *clickhouseReconciler, id v1.ClickHouseReplicaID) (corev1 return corev1.PodSpec{}, fmt.Errorf("build volumes: %w", err) } - container, err := templateContainer(cr, id, volumeMounts) + container, err := templateContainer(r, id, volumeMounts) if err != nil { return corev1.PodSpec{}, fmt.Errorf("template container: %w", err) } @@ -400,7 +414,8 @@ func templatePodSpec(r *clickhouseReconciler, id v1.ClickHouseReplicaID) (corev1 return podSpec, nil } -func templateContainer(cr *v1.ClickHouseCluster, id v1.ClickHouseReplicaID, volumeMounts []corev1.VolumeMount) (corev1.Container, error) { +func templateContainer(r *clickhouseReconciler, id v1.ClickHouseReplicaID, volumeMounts []corev1.VolumeMount) (corev1.Container, error) { + cr := r.Cluster containerTemplate := cr.Spec.ContainerTemplate.DeepCopy() protocols := buildProtocols(cr) @@ -471,15 +486,20 @@ func templateContainer(cr *v1.ClickHouseCluster, id v1.ClickHouseReplicaID, volu }, } - for _, secret := range secretsToEnvMapping { + for i := range clusterSecrets { + spec := &clusterSecrets[i] + if spec.Env == "" || !spec.enabled(r.Cluster) { + continue + } + container.Env = append(container.Env, corev1.EnvVar{ - Name: secret.Env, + Name: spec.Env, ValueFrom: &corev1.EnvVarSource{ SecretKeyRef: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ Name: cr.SecretName(), }, - Key: secret.Key, + Key: spec.Key, }, }, }) @@ -631,7 +651,7 @@ func buildVolumes(r *clickhouseReconciler, id v1.ClickHouseReplicaID) ([]corev1. configVolumes := map[string]corev1.Volume{} for _, generator := range generators { - if !generator.Exists(r) { + if !generator.Enabled(r) { continue } diff --git a/internal/controller/clickhouse/templates/named_collections.yaml.tmpl b/internal/controller/clickhouse/templates/named_collections.yaml.tmpl new file mode 100644 index 0000000..e620540 --- /dev/null +++ b/internal/controller/clickhouse/templates/named_collections.yaml.tmpl @@ -0,0 +1,6 @@ +named_collections_storage: + type: keeper_encrypted + key_hex: + "@from_env": {{ .NamedCollectionsKeyEnv }} + algorithm: aes_128_ctr + path: {{ .NamedCollectionsPath }} diff --git a/internal/controllerutil/common.go b/internal/controllerutil/common.go index 792c6a6..a9e62bf 100644 --- a/internal/controllerutil/common.go +++ b/internal/controllerutil/common.go @@ -168,6 +168,16 @@ const ( length = 32 ) +// GenerateRandomBytes generates n cryptographically random bytes. +func GenerateRandomBytes(n int) []byte { + b := make([]byte, n) + if _, err := rand.Read(b); err != nil { + panic(fmt.Sprintf("read random source: %v", err)) + } + + return b +} + // GeneratePassword generates a random password of fixed length using a predefined alphabet. func GeneratePassword() string { password := make([]byte, length) diff --git a/internal/upgrade/checker.go b/internal/upgrade/checker.go index 3f2e4c6..1e3d954 100644 --- a/internal/upgrade/checker.go +++ b/internal/upgrade/checker.go @@ -68,6 +68,11 @@ func (ch ClickHouseVersion) Release() ClickHouseRelease { } } +// Compare returns -1, 0, or 1 comparing v to other. +func (ch ClickHouseVersion) Compare(other ClickHouseVersion) int { + return compareVersions(ch, other) +} + func compareVersions(a, b ClickHouseVersion) int { if c := cmp.Compare(a.Major, b.Major); c != 0 { return c diff --git a/test/e2e/clickhouse_e2e_test.go b/test/e2e/clickhouse_e2e_test.go index f406174..bade92b 100644 --- a/test/e2e/clickhouse_e2e_test.go +++ b/test/e2e/clickhouse_e2e_test.go @@ -5,6 +5,7 @@ import ( "fmt" "math" "math/rand/v2" + "path" "strconv" "strings" "time" @@ -112,6 +113,71 @@ var _ = Describe("ClickHouse controller", Label("clickhouse"), func() { Entry("scale up to 2 replicas", v1.ClickHouseClusterSpec{Replicas: ptr.To[int32](2)}), ) + It("should support named collections in Keeper with encryption", func(ctx context.Context) { + cr := v1.ClickHouseCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: fmt.Sprintf("named-colls-%d", rand.Uint32()), //nolint:gosec + }, + Spec: v1.ClickHouseClusterSpec{ + Replicas: ptr.To[int32](1), + ContainerTemplate: v1.ContainerTemplateSpec{ + Image: v1.ContainerImage{Tag: ClickHouseBaseVersion}, + }, + DataVolumeClaimSpec: &defaultStorage, + KeeperClusterRef: &corev1.LocalObjectReference{Name: keeper.Name}, + }, + } + + By("creating cluster CR") + Expect(k8sClient.Create(ctx, &cr)).To(Succeed()) + DeferCleanup(func(ctx context.Context) { + Expect(k8sClient.Delete(ctx, &cr)).To(Succeed()) + }) + WaitClickHouseUpdatedAndReady(ctx, &cr, time.Minute, false) + + By("verifying named collections secret key exists") + + var secret corev1.Secret + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Namespace: cr.Namespace, + Name: cr.SecretName(), + }, &secret)).To(Succeed()) + Expect(secret.Data).To(HaveKey(chctrl.SecretKeyNamedCollectionsKey)) + Expect(secret.Data[chctrl.SecretKeyNamedCollectionsKey]).NotTo(BeEmpty()) + + By("verifying named collections config exists") + + var configMap corev1.ConfigMap + + cfgName := cr.ConfigMapNameByReplicaID(v1.ClickHouseReplicaID{ShardID: 0, Index: 0}) + Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: cr.Namespace, Name: cfgName}, &configMap)).To(Succeed()) + + ncConfigKey := controllerutil.PathToName( + path.Join(chctrl.ConfigPath, chctrl.ConfigDPath, "00-named-collections.yaml"), + ) + Expect(configMap.Data).To(HaveKey(ncConfigKey)) + Expect(configMap.Data[ncConfigKey]).NotTo(BeEmpty()) + + By("connecting to cluster") + + chClient, err := testutil.NewClickHouseClient(ctx, config, &cr) + Expect(err).NotTo(HaveOccurred()) + + defer chClient.Close() + + By("creating named collection") + Expect(chClient.Exec(ctx, "CREATE NAMED COLLECTION e2e_test_named_coll AS test_key = 'test_value'")).To(Succeed()) + + By("verifying named collection exists") + + var name string + + query := "SELECT name FROM system.named_collections WHERE name = 'e2e_test_named_coll'" + Expect(chClient.QueryRow(ctx, query, &name)).To(Succeed()) + Expect(name).To(Equal("e2e_test_named_coll")) + }) + DescribeTable("ClickHouse cluster updates", func( ctx context.Context, baseReplicas int, diff --git a/test/e2e/compatibility_e2e_test.go b/test/e2e/compatibility_e2e_test.go index 22db98f..0aae310 100644 --- a/test/e2e/compatibility_e2e_test.go +++ b/test/e2e/compatibility_e2e_test.go @@ -2,6 +2,8 @@ package e2e import ( "context" + "fmt" + "math/rand/v2" "os" "strings" "time" @@ -33,10 +35,11 @@ var _ = Context("Compatibility", Label("compatibility"), func() { Expect(err).NotTo(HaveOccurred()) By("running on Kubernetes " + serverVersion.GitVersion) + suffix := rand.Uint32() //nolint:gosec keeper := v1.KeeperCluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: testNamespace, - Name: "compat-keeper-" + version, + Name: fmt.Sprintf("compat-keeper-%s-%d", version, suffix), }, Spec: v1.KeeperClusterSpec{ Replicas: new(int32(3)), @@ -51,7 +54,7 @@ var _ = Context("Compatibility", Label("compatibility"), func() { clickhouse := v1.ClickHouseCluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: testNamespace, - Name: "compat-ch-" + version, + Name: fmt.Sprintf("compat-ch-%s-%d", version, suffix), }, Spec: v1.ClickHouseClusterSpec{ Replicas: new(int32(3)),