diff --git a/api/v1alpha1/catalog_types.go b/api/v1alpha1/catalog_types.go index 443292e40..8dd014380 100644 --- a/api/v1alpha1/catalog_types.go +++ b/api/v1alpha1/catalog_types.go @@ -8,6 +8,7 @@ import ( "time" sourcev1 "github.com/fluxcd/source-controller/api/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" greenhousemetav1alpha1 "github.com/cloudoperators/greenhouse/api/meta/v1alpha1" @@ -91,6 +92,19 @@ type CatalogOverrides struct { // Repository is the repository to override in the PluginDefinition .spec.helmChart.repository // +Optional Repository string `json:"repository,omitempty"` + // OptionsOverride are the option values to override in the PluginDefinition .spec.options[] + // +Optional + OptionsOverride []OptionsOverride `json:"optionsOverride,omitempty"` +} + +type OptionsOverride struct { + // Name is the name of the option value to override in the PluginDefinition + // +kubebuilder:validation:MinLength=1 + // +Required + Name string `json:"name"` + // Value is the value to set as Default in the PluginDefinition option + // +Required + Value *apiextensionsv1.JSON `json:"value"` } type GitRef struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 27c7fb570..feb839ebc 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -101,6 +101,13 @@ func (in *CatalogList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CatalogOverrides) DeepCopyInto(out *CatalogOverrides) { *out = *in + if in.OptionsOverride != nil { + in, out := &in.OptionsOverride, &out.OptionsOverride + *out = make([]OptionsOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogOverrides. @@ -134,7 +141,9 @@ func (in *CatalogSource) DeepCopyInto(out *CatalogSource) { if in.Overrides != nil { in, out := &in.Overrides, &out.Overrides *out = make([]CatalogOverrides, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.Interval != nil { in, out := &in.Interval, &out.Interval @@ -824,6 +833,26 @@ func (in *OIDCConfig) DeepCopy() *OIDCConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionsOverride) DeepCopyInto(out *OptionsOverride) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionsOverride. +func (in *OptionsOverride) DeepCopy() *OptionsOverride { + if in == nil { + return nil + } + out := new(OptionsOverride) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Organization) DeepCopyInto(out *Organization) { *out = *in diff --git a/charts/manager/crds/greenhouse.sap_catalogs.yaml b/charts/manager/crds/greenhouse.sap_catalogs.yaml index 8af97450b..5f27fab2d 100755 --- a/charts/manager/crds/greenhouse.sap_catalogs.yaml +++ b/charts/manager/crds/greenhouse.sap_catalogs.yaml @@ -73,6 +73,25 @@ spec: to patch with an alias minLength: 1 type: string + optionsOverride: + description: OptionsOverride are the option values to + override in the PluginDefinition .spec.options[] + items: + properties: + name: + description: Name is the name of the option value + to override in the PluginDefinition + minLength: 1 + type: string + value: + description: Value is the value to set as Default + in the PluginDefinition option + x-kubernetes-preserve-unknown-fields: true + required: + - name + - value + type: object + type: array repository: description: Repository is the repository to override in the PluginDefinition .spec.helmChart.repository diff --git a/cmd/greenhouse/README.md b/cmd/greenhouse/README.md index ae63e2949..872f25012 100644 --- a/cmd/greenhouse/README.md +++ b/cmd/greenhouse/README.md @@ -20,4 +20,20 @@ If you are using `postgres` as the dex storage backend, you need to set the foll - `PG_PORT=` ex: `5432` (defaults to `5432` if not set) - `PG_USER=` ex: `postgres` (defaults to `postgres` if not set) - `PG_HOST=` ex: `localhost` (required) -- `PG_PASSWORD=` ex: `password` (required) \ No newline at end of file +- `PG_PASSWORD=` ex: `password` (required) + +### Running Catalog Controller Locally + +To run the Catalog Controller locally, you need to port-forward flux `source-watcher` SVC to your localhost. + +Example command: + +```shell +kubectl -n flux-system port-forward svc/source-watcher 5050:80 +``` + +Then set the following environment variable when running the operator (IDE Debugger or Shell): + +```shell +ARTIFACT_DOMAIN=localhost:5050 +``` diff --git a/cmd/greenhouse/controllers.go b/cmd/greenhouse/controllers.go index 83699717d..ec037848b 100644 --- a/cmd/greenhouse/controllers.go +++ b/cmd/greenhouse/controllers.go @@ -36,9 +36,7 @@ var knownControllers = map[string]func(controllerName string, mgr ctrl.Manager) "plugin": startPluginReconciler, "pluginPreset": (&plugincontrollers.PluginPresetReconciler{}).SetupWithManager, - "catalog": (&catalog.CatalogReconciler{ - Log: ctrl.Log.WithName("controllers").WithName("catalogs"), - }).SetupWithManager, + "catalog": startCatalogReconciler, "pluginDefinition": (&plugindefinitioncontroller.PluginDefinitionReconciler{}).SetupWithManager, "clusterPluginDefinition": (&plugindefinitioncontroller.ClusterPluginDefinitionReconciler{}).SetupWithManager, @@ -107,3 +105,11 @@ func startClusterReconciler(name string, mgr ctrl.Manager) error { RenewRemoteClusterBearerTokenAfter: renewRemoteClusterBearerTokenAfter, }).SetupWithManager(name, mgr) } + +func startCatalogReconciler(name string, mgr ctrl.Manager) error { + return (&catalog.CatalogReconciler{ + Log: ctrl.Log.WithName("controllers").WithName("catalogs"), + StoragePath: artifactStoragePath, + HttpRetry: artifactRetries, + }).SetupWithManager(name, mgr) +} diff --git a/cmd/greenhouse/main.go b/cmd/greenhouse/main.go index 45d5855ec..5f0115a00 100644 --- a/cmd/greenhouse/main.go +++ b/cmd/greenhouse/main.go @@ -74,6 +74,8 @@ const ( flagLeaseDuration = "leader-election-lease-duration" flagRenewDeadline = "leader-election-renew-deadline" flagRetryPeriod = "leader-election-retry-period" + flagArtifactStoragePath = "catalog-artifact-storage-path" + flagArtifactRetries = "catalog-http-retry" ) var ( @@ -83,8 +85,10 @@ var ( enabledControllers []string remoteClusterBearerTokenValidity, renewRemoteClusterBearerTokenAfter time.Duration - kubeClientOpts clientutil.RuntimeOptions - featureFlags *features.Features + kubeClientOpts clientutil.RuntimeOptions + featureFlags *features.Features + artifactStoragePath string + artifactRetries int ) func init() { @@ -121,6 +125,8 @@ func main() { flag.DurationVar(&leaseDuration, flagLeaseDuration, 60*time.Second, "Leader election lease duration") flag.DurationVar(&renewDeadline, flagRenewDeadline, 30*time.Second, "Leader election renew deadline") flag.DurationVar(&retryPeriod, flagRetryPeriod, 5*time.Second, "Leader election retry period") + flag.StringVar(&artifactStoragePath, flagArtifactStoragePath, "/tmp/data", "The path to store catalog artifacts") + flag.IntVar(&artifactRetries, flagArtifactRetries, 5, "Max number of retries to acquire artifact, default: 5 attempts") opts := zap.Options{ Development: true, diff --git a/config/samples/catalog/extensions-catalog.yaml b/config/samples/catalog/extensions-catalog.yaml index 1950a1c07..a8040e6be 100644 --- a/config/samples/catalog/extensions-catalog.yaml +++ b/config/samples/catalog/extensions-catalog.yaml @@ -1,6 +1,5 @@ # SPDX-FileCopyrightText: 2025 SAP SE or an SAP affiliate company and Greenhouse contributors # SPDX-License-Identifier: Apache-2.0 - apiVersion: greenhouse.sap/v1alpha1 kind: Catalog metadata: @@ -8,39 +7,31 @@ metadata: namespace: greenhouse spec: sources: - - repository: https://github.com/cloudoperators/greenhouse-extensions + - ref: + branch: main + repository: https://github.com/cloudoperators/greenhouse-extensions resources: - - alerts/plugindefinition.yaml - - audit-logs/plugindefinition.yaml - cert-manager/plugindefinition.yaml - - exposed-services/plugindefinition.yaml - - external-dns/plugindefinition.yaml - - repo-guard/plugindefinition.yaml - ingress-nginx/plugindefinition.yaml - kube-monitoring/plugindefinition.yaml - - openbao/plugindefinition.yaml - - logs/plugindefinition.yaml - - perses/plugindefinition.yaml - - plutono/plugindefinition.yaml - - service-proxy/plugindefinition.yaml - - teams2slack/plugindefinition.yaml - - thanos/plugindefinition.yaml - ref: - branch: main - secretName: github-com-token overrides: - - name: perses - alias: perses-some-registry-chart - repository: oci://some-registry.io/some-repo/perses-chart - - name: kube-monitoring - alias: kube-monitoring-some-registry-chart - repository: oci://some-registry.io/some-repo/kube-monitoring-chart - - repository: https://github.com/cloudoperators/greenhouse-extensions + - name: cert-manager + alias: cert-manager-overridden + optionsOverride: + - name: "cert-manager.installCRDs" + value: true + - name: cert-manager.webhook.timeoutSeconds + value: 15 + - ref: + sha: ed3ff23bca21eef6cffc0956df338d8185340fc5 + repository: https://github.com/cloudoperators/greenhouse-extensions resources: - - disco/plugindefinition.yaml - ref: - sha: eafaee1d74774f74e20101307d25b29a3eb26102 + - perses/plugindefinition.yaml overrides: - - name: disco - alias: disco-some-registry-chart - repository: oci://some-registry.io/some-repo/perses-chart + - name: perses + alias: perses-overridden + optionsOverride: + - name: "perses.serviceMonitor.selector.matchLabels" + value: + foo: bar + baz: qux \ No newline at end of file diff --git a/config/samples/flux/kustomization.yaml b/config/samples/flux/kustomization.yaml index 3b6013e97..a6904cc29 100644 --- a/config/samples/flux/kustomization.yaml +++ b/config/samples/flux/kustomization.yaml @@ -15,6 +15,22 @@ images: newName: ghcr.io/fluxcd/source-watcher patches: + # allow greenhouse namespace to access flux artifacts + - target: + kind: NetworkPolicy + name: allow-egress + namespace: flux-system + patch: | + - op: add + path: /spec/ingress/0/from/- + value: + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: greenhouse + podSelector: + matchLabels: + app.kubernetes.io/name: greenhouse + # do not apply image-reflector-controller and image-automation-controller - target: kind: Deployment diff --git a/docs/reference/api/index.html b/docs/reference/api/index.html index 6812a38ae..660150b04 100644 --- a/docs/reference/api/index.html +++ b/docs/reference/api/index.html @@ -184,6 +184,19 @@

CatalogOverrides

Repository is the repository to override in the PluginDefinition .spec.helmChart.repository

+ + +optionsOverride
+ + +[]OptionsOverride + + + + +

OptionsOverride are the option values to override in the PluginDefinition .spec.options[]

+ + @@ -1899,6 +1912,50 @@

OIDCConfig +

OptionsOverride +

+

+(Appears on: +CatalogOverrides) +

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+name
+ +string + +
+

Name is the name of the option value to override in the PluginDefinition

+
+value
+ + +Kubernetes apiextensions/v1.JSON + + +
+

Value is the value to set as Default in the PluginDefinition option

+
+
+

Organization

Organization is the Schema for the organizations API

diff --git a/docs/reference/api/openapi.yaml b/docs/reference/api/openapi.yaml index 717ddbe14..c96cd39cb 100755 --- a/docs/reference/api/openapi.yaml +++ b/docs/reference/api/openapi.yaml @@ -54,6 +54,22 @@ components: description: Name is the name of the PluginDefinition to patch with an alias minLength: 1 type: string + optionsOverride: + description: OptionsOverride are the option values to override in the PluginDefinition .spec.options[] + items: + properties: + name: + description: Name is the name of the option value to override in the PluginDefinition + minLength: 1 + type: string + value: + description: Value is the value to set as Default in the PluginDefinition option + x-kubernetes-preserve-unknown-fields: true + required: + - name + - value + type: object + type: array repository: description: Repository is the repository to override in the PluginDefinition .spec.helmChart.repository type: string diff --git a/e2e/catalog/e2e_test.go b/e2e/catalog/e2e_test.go index b011db98a..1d9141af8 100644 --- a/e2e/catalog/e2e_test.go +++ b/e2e/catalog/e2e_test.go @@ -7,6 +7,7 @@ package catalog import ( "context" + "strings" "testing" "time" @@ -71,7 +72,7 @@ var _ = Describe("Catalog E2E", Ordered, func() { env = env.WithGitHubSecret(ctx, adminClient, secretName, secretType) } testNamespace := env.TestNamespace - scenario := scenarios.NewScenario(adminClient, catalogYamlPath) + scenario := scenarios.NewScenario(adminClient, catalogYamlPath, secretName, strings.TrimSpace(catalogYamlPath) == "") execute(scenario, testNamespace) }, Entry("Catalog Branch scenario", @@ -123,5 +124,12 @@ var _ = Describe("Catalog E2E", Ordered, func() { shared.GitHubSecretTypeFake, func(s scenarios.IScenario, ns string) { s.ExecuteGitAuthFailScenario(ctx, ns) }, ), + Entry("Catalog Options Override scenario", + e2eOrgYaml, + "", + "github-com-app", + shared.GitHubSecretTypeAPP, + func(s scenarios.IScenario, ns string) { s.ExecuteOptionsOverrideScenario(ctx, ns) }, + ), ) }) diff --git a/e2e/catalog/scenarios/catalog_option_overrides.go b/e2e/catalog/scenarios/catalog_option_overrides.go new file mode 100644 index 000000000..80cf769ba --- /dev/null +++ b/e2e/catalog/scenarios/catalog_option_overrides.go @@ -0,0 +1,121 @@ +// SPDX-FileCopyrightText: 2025 SAP SE or an SAP affiliate company and Greenhouse contributors +// SPDX-License-Identifier: Apache-2.0 + +package scenarios + +import ( + "context" + "strings" + + "github.com/google/go-cmp/cmp" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + greenhouseapis "github.com/cloudoperators/greenhouse/api" + greenhousev1alpha1 "github.com/cloudoperators/greenhouse/api/v1alpha1" + "github.com/cloudoperators/greenhouse/internal/test" +) + +// baseCatalog returns a base Catalog with a single PluginDefinition resource with no overrides. +// the PluginDefinition applied from this Catalog will be used to compare diff against Catalog that applies +// option overrides on the same PluginDefinition with an alias. +func baseCatalog(name, namespace, secretName string) (*greenhousev1alpha1.Catalog, greenhousev1alpha1.CatalogSource) { + GinkgoHelper() + catalog := test.NewCatalog(name, namespace) + source := test.NewCatalogSource( + test.WithRepository("https://github.com/cloudoperators/extensions-e2e"), + test.WithRepositoryBranch("main"), + test.WithCatalogResources([]string{ + "plugindefinitions/pd-cert-manager.yaml", + }), + ) + source.SecretName = ptr.To[string](secretName) + catalog.SetLabels(map[string]string{ + "greenhouse.sap/managed-by": "e2e", + }) + return catalog, source +} + +func overriddenCatalog(name, namespace, secretName string) (*greenhousev1alpha1.Catalog, greenhousev1alpha1.CatalogSource) { + GinkgoHelper() + catalog, source := baseCatalog(name, namespace, secretName) + source.Ref.Branch = ptr.To[string]("dev") + source.Overrides = append(source.Overrides, greenhousev1alpha1.CatalogOverrides{ + Name: "cert-manager", + Alias: "cert-manager-override", + Repository: "oci://quay.io/jetstack/charts/cert-manager", + OptionsOverride: []greenhousev1alpha1.OptionsOverride{ + { + Name: "cert-manager.installCRDs", + Value: test.MustReturnJSONFor(true), + }, + { + Name: "cert-manager.webhook.timeoutSeconds", + Value: test.MustReturnJSONFor(30), + }, + }, + }) + return catalog, source +} + +func (s *scenario) ExecuteOptionsOverrideScenario(ctx context.Context, namespace string) { + GinkgoHelper() + By("creating initial Catalog without overrides") + var initSource, overriddenSource greenhousev1alpha1.CatalogSource + initCatalog, initSource := baseCatalog("catalog-no-override", namespace, s.secretName) + initPluginDefinition := s.createCatalogAndVerifySuccess(ctx, initCatalog, initSource) + + By("creating another Catalog with option overrides") + overriddenCatalog, overriddenSource := overriddenCatalog("catalog-opt-override", namespace, s.secretName) + overriddenPluginDefinition := s.createCatalogAndVerifySuccess(ctx, overriddenCatalog, overriddenSource) + + By("comparing the PluginDefinition options before and after override") + Expect(initPluginDefinition.Spec.Options).ShouldNot(Equal(overriddenPluginDefinition.Spec.Options), "the PluginDefinition options should not be equal after override") + diff := cmp.Diff(initPluginDefinition.Spec.Options, overriddenPluginDefinition.Spec.Options) + GinkgoWriter.Printf("PluginDefinition options diff after override:\n%s\n", diff) + + By("cleaning up all PluginDefinitions created in the scenario") + s.deleteAllScenarioPluginDefinitions(ctx, []string{initCatalog.Name, overriddenCatalog.Name}, namespace) +} + +func (s *scenario) createCatalogAndVerifySuccess(ctx context.Context, catalog *greenhousev1alpha1.Catalog, source greenhousev1alpha1.CatalogSource) *greenhousev1alpha1.PluginDefinition { + GinkgoHelper() + s.catalog = catalog + s.catalog.Spec.Sources = append(s.catalog.Spec.Sources, source) + Expect(s.createCatalogIfNotExists(ctx)).ToNot(HaveOccurred(), "there should be no error creating the initial Catalog") + s.verifySuccess(ctx) + groupKey, err := getSourceGroupHash(source, catalog.Name) + Expect(err).ToNot(HaveOccurred(), "there should be no error getting the source group hash for initial catalog") + kustomization := s.getKustomizationObject(groupKey) + err = s.k8sClient.Get(ctx, client.ObjectKeyFromObject(kustomization), kustomization) + Expect(err).ToNot(HaveOccurred(), "there should be no error getting the Kustomization for initial catalog") + key := types.NamespacedName{ + Name: strings.Split(kustomization.Status.Inventory.Entries[0].ID, "_")[1], + Namespace: catalog.Namespace, + } + return checkIfPDExists(ctx, s.k8sClient, key) +} + +func (s *scenario) deleteAllScenarioPluginDefinitions(ctx context.Context, labelVals []string, namespace string) { + GinkgoHelper() + selector := labels.NewSelector() + req, err := labels.NewRequirement( + greenhouseapis.LabelKeyCatalog, + selection.In, + labelVals, + ) + Expect(err).ToNot(HaveOccurred(), "there should be no error creating label requirement for cleanup") + selector = selector.Add(*req) + err = s.k8sClient.DeleteAllOf( + ctx, + &greenhousev1alpha1.PluginDefinition{}, + client.InNamespace(namespace), + client.MatchingLabelsSelector{Selector: selector}, + ) + Expect(err).ToNot(HaveOccurred(), "there should be no error deleting all PluginDefinitions in the namespace") +} diff --git a/e2e/catalog/scenarios/catalog_success.go b/e2e/catalog/scenarios/catalog_success.go index f08797357..b313ef8c4 100644 --- a/e2e/catalog/scenarios/catalog_success.go +++ b/e2e/catalog/scenarios/catalog_success.go @@ -24,6 +24,17 @@ func (s *scenario) ExecuteSuccessScenario(ctx context.Context, namespace string) s.catalog.SetNamespace(namespace) err := s.createCatalogIfNotExists(ctx) Expect(err).ToNot(HaveOccurred(), "there should be no error creating the Catalog for multi-source scenario") + s.verifySuccess(ctx) + By("cleaning up Catalog") + for _, source := range s.catalog.Spec.Sources { + groupKey, err := getSourceGroupHash(source, s.catalog.Name) + Expect(err).ToNot(HaveOccurred(), "there should be no error getting the source group hash") + s.deletePluginDefinitions(ctx, groupKey) + } + s.deleteCatalog(ctx) +} + +func (s *scenario) verifySuccess(ctx context.Context) { for _, source := range s.catalog.Spec.Sources { groupKey, err := getSourceGroupHash(source, s.catalog.Name) Expect(err).ToNot(HaveOccurred(), "there should be no error getting the source group hash") @@ -55,14 +66,6 @@ func (s *scenario) ExecuteSuccessScenario(ctx context.Context, namespace string) g.Expect(catalogReady.Status).To(Equal(metav1.ConditionTrue), "the Ready condition status should be True") g.Expect(catalogReady.Reason).To(Equal(greenhousev1alpha1.CatalogReadyReason), "the Ready condition reason should be CatalogReady") }).Should(Succeed(), "the Catalog should have a Ready=True condition") - - By("cleaning up Catalog") - for _, source := range s.catalog.Spec.Sources { - groupKey, err := getSourceGroupHash(source, s.catalog.Name) - Expect(err).ToNot(HaveOccurred(), "there should be no error getting the source group hash") - s.deletePluginDefinitions(ctx, groupKey) - } - s.deleteCatalog(ctx) } func getSourceGroupHash(source greenhousev1alpha1.CatalogSource, catalogName string) (groupKey string, err error) { diff --git a/e2e/catalog/scenarios/scenario.go b/e2e/catalog/scenarios/scenario.go index c789ab67f..c6f730853 100644 --- a/e2e/catalog/scenarios/scenario.go +++ b/e2e/catalog/scenarios/scenario.go @@ -5,8 +5,10 @@ package scenarios import ( "context" + "os" "slices" "strings" + "time" kustomizev1 "github.com/fluxcd/kustomize-controller/api/v1" fluxmeta "github.com/fluxcd/pkg/apis/meta" @@ -21,6 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" + greenhouseapis "github.com/cloudoperators/greenhouse/api" greenhousev1alpha1 "github.com/cloudoperators/greenhouse/api/v1alpha1" "github.com/cloudoperators/greenhouse/e2e/shared" "github.com/cloudoperators/greenhouse/internal/lifecycle" @@ -31,23 +34,28 @@ type IScenario interface { ExecuteCPDFailScenario(ctx context.Context, namespace string) ExecuteArtifactFailScenario(ctx context.Context, namespace string) ExecuteGitAuthFailScenario(ctx context.Context, namespace string) + ExecuteOptionsOverrideScenario(ctx context.Context, namespace string) } type scenario struct { - k8sClient client.Client - catalog *greenhousev1alpha1.Catalog + k8sClient client.Client + catalog *greenhousev1alpha1.Catalog + secretName string } -func NewScenario(adminClient client.Client, catalogYamlPath string) IScenario { +func NewScenario(adminClient client.Client, catalogYamlPath, secretName string, skipTestData bool) IScenario { GinkgoHelper() catalog := &greenhousev1alpha1.Catalog{} - catalogBytes, err := shared.ReadFileContent(catalogYamlPath) - Expect(err).ToNot(HaveOccurred(), "there should be no error reading the catalog yaml file for branch scenario") - err = shared.FromYamlToK8sObject(string(catalogBytes), catalog) - Expect(err).ToNot(HaveOccurred(), "there should be no error converting catalog yaml to k8s object for branch scenario") + if !skipTestData { + catalogBytes, err := os.ReadFile(catalogYamlPath) + Expect(err).ToNot(HaveOccurred(), "there should be no error reading the catalog yaml file for branch scenario") + err = shared.FromYamlToK8sObject(string(catalogBytes), catalog) + Expect(err).ToNot(HaveOccurred(), "there should be no error converting catalog yaml to k8s object for branch scenario") + } return &scenario{ - k8sClient: adminClient, - catalog: catalog, + k8sClient: adminClient, + catalog: catalog, + secretName: secretName, } } @@ -359,14 +367,22 @@ func (s *scenario) expectStatusPropagationInCatalogInventory(ctx context.Context default: Fail("unsupported kind for propagation check: " + resource.Kind) } - err := s.k8sClient.Get(ctx, client.ObjectKeyFromObject(fluxObj), fluxObj) - if apierrors.IsNotFound(err) && ignoreNotFound { - // ignore not found errors - continue - } - Expect(err).ToNot(HaveOccurred(), "there should be no error getting the catalog flux resource: "+fluxObj.GetName()) By("checking if Catalog inventory contains the flux resource status for " + resource.Kind + "/" + resource.Name) Eventually(func(g Gomega) { + err := s.k8sClient.Get(ctx, client.ObjectKeyFromObject(fluxObj), fluxObj) + if apierrors.IsNotFound(err) && ignoreNotFound { + // ignore not found errors + return + } + g.Expect(err).ToNot(HaveOccurred(), "there should be no error getting the catalog flux resource: "+fluxObj.GetName()) + annotations := fluxObj.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[greenhouseapis.FluxReconcileRequestAnnotation] = time.Now().String() + fluxObj.SetAnnotations(annotations) + err = s.k8sClient.Update(ctx, fluxObj) + g.Expect(err).ToNot(HaveOccurred(), "there should be no error triggering reconcile: "+fluxObj.GetName()) fluxCondition := meta.FindStatusCondition(fluxObj.GetConditions(), fluxmeta.ReadyCondition) g.Expect(fluxCondition).ToNot(BeNil(), "the underlying resource should have a Ready condition: "+fluxObj.GetName()) fluxConditionMsg := fluxCondition.Message diff --git a/e2e/shared/e2e.go b/e2e/shared/e2e.go index a8bfaf9f1..2a6bfe49f 100644 --- a/e2e/shared/e2e.go +++ b/e2e/shared/e2e.go @@ -95,13 +95,13 @@ func NewExecutionEnv() *TestEnv { Log("Running on real cluster\n") remoteKubeCfgPath, err = fromEnv(RemoteKubeConfigPathEnv) Expect(err).NotTo(HaveOccurred(), "error getting remote kubeconfig path") - remoteKubeCfgBytes, err = ReadFileContent(remoteKubeCfgPath) + remoteKubeCfgBytes, err = os.ReadFile(remoteKubeCfgPath) Expect(err).NotTo(HaveOccurred(), "error reading remote kubeconfig file") } else { Log("Running on local cluster\n") remoteIntKubeCfgPath, err := fromEnv(remoteIntKubeConfigPathEnv) Expect(err).NotTo(HaveOccurred(), "error getting remote internal kubeconfig path") - remoteKubeCfgBytes, err = ReadFileContent(remoteIntKubeCfgPath) + remoteKubeCfgBytes, err = os.ReadFile(remoteIntKubeCfgPath) Expect(err).NotTo(HaveOccurred(), "error reading remote internal kubeconfig file") } return &TestEnv{ @@ -122,7 +122,7 @@ func isRealCluster() bool { func (env *TestEnv) WithOrganization(ctx context.Context, k8sClient client.Client, samplePath string) *TestEnv { org := &greenhousev1alpha1.Organization{} - orgBytes, err := ReadFileContent(samplePath) + orgBytes, err := os.ReadFile(samplePath) Expect(err).NotTo(HaveOccurred(), "error reading organization sample data") err = FromYamlToK8sObject(string(orgBytes), org) Expect(err).NotTo(HaveOccurred(), "error converting organization yaml to k8s object") @@ -140,7 +140,7 @@ func (env *TestEnv) WithOrganization(ctx context.Context, k8sClient client.Clien func clientGetter(kubeconfigEnv string) *clientutil.RestClientGetter { kubeconfigPath, err := fromEnv(kubeconfigEnv) Expect(err).NotTo(HaveOccurred(), "error getting kubeconfig path from env") - kubeconfigBytes, err := ReadFileContent(kubeconfigPath) + kubeconfigBytes, err := os.ReadFile(kubeconfigPath) Expect(err).NotTo(HaveOccurred(), "error reading kubeconfig file") config, err := clientcmd.RESTConfigFromKubeConfig(kubeconfigBytes) Expect(err).NotTo(HaveOccurred(), "error getting rest config from kubeconfig") diff --git a/e2e/shared/utils.go b/e2e/shared/utils.go index 89f844a96..97ecd3007 100644 --- a/e2e/shared/utils.go +++ b/e2e/shared/utils.go @@ -6,7 +6,6 @@ package shared import ( "bytes" "io" - "os" kyaml "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/klog/v2" @@ -25,10 +24,6 @@ func LogErr(format string, args ...any) { klog.InfofDepth(1, "===== 😵 "+format, args...) } -func ReadFileContent(path string) ([]byte, error) { - return os.ReadFile(path) -} - // FromYamlToK8sObject - Converts a YAML document to a Kubernetes object // if yaml contains multiple documents, then corresponding kubernetes objects should be provided func FromYamlToK8sObject(doc string, resources ...any) error { diff --git a/go.mod b/go.mod index b1ea1f378..33add8ce7 100644 --- a/go.mod +++ b/go.mod @@ -40,6 +40,7 @@ require ( github.com/google/cel-go v0.26.1 github.com/google/go-cmp v0.7.0 github.com/google/go-github/v77 v77.0.0 + github.com/hashicorp/go-retryablehttp v0.7.8 github.com/jeremywohl/flatten/v2 v2.0.0-20211013061545-07e4a09fb8e4 github.com/oklog/run v1.2.0 github.com/onsi/ginkgo/v2 v2.26.0 @@ -89,6 +90,7 @@ require ( github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mattn/go-sqlite3 v1.14.32 // indirect github.com/miekg/dns v1.1.58 // indirect diff --git a/go.sum b/go.sum index 67b2c947e..e82a9a681 100644 --- a/go.sum +++ b/go.sum @@ -219,8 +219,14 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1ns github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= diff --git a/internal/controller/catalog/artifact.go b/internal/controller/catalog/artifact.go new file mode 100644 index 000000000..65be7aa3d --- /dev/null +++ b/internal/controller/catalog/artifact.go @@ -0,0 +1,284 @@ +// SPDX-FileCopyrightText: 2025 SAP SE or an SAP affiliate company and Greenhouse contributors +// SPDX-License-Identifier: Apache-2.0 + +package catalog + +import ( + "archive/tar" + "compress/gzip" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "time" + + "github.com/go-logr/logr" + retry "github.com/hashicorp/go-retryablehttp" +) + +type artifactory struct { + log logr.Logger + client *retry.Client + artifactID string + storageBasePath string +} + +type IArtifactory interface { + Save(content []byte, digest string) error + Get(ctx context.Context, url, digest string) ([]byte, error) + DeleteAllExcept(digest string) error +} + +// noopLogger implements retryable http.LeveledLogger using logr +type noopLogger struct{ log logr.Logger } + +func (l noopLogger) Error(msg string, keysAndValues ...interface{}) { + l.log.Error(errors.New(msg), "http error", keysAndValues...) +} +func (l noopLogger) Info(msg string, keysAndValues ...interface{}) { + l.log.V(1).Info(msg, keysAndValues...) +} +func (l noopLogger) Debug(msg string, keysAndValues ...interface{}) { + l.log.V(2).Info(msg, keysAndValues...) +} +func (l noopLogger) Warn(msg string, keysAndValues ...interface{}) { + l.log.V(1).Info("WARN: "+msg, keysAndValues...) +} + +// newArtifactory creates a new Artifactory instance with retryable HTTP client +func newArtifactory(log logr.Logger, artifactID, storagePath string, retries int) IArtifactory { + retryClient := retry.NewClient() + retryClient.RetryMax = retries // retry + retryClient.RetryWaitMin = 3 * time.Second // initial delay + retryClient.RetryWaitMax = 15 * time.Second // max backoff delay + retryClient.Logger = retry.LeveledLogger(noopLogger{log}) + + // stop retrying if context is canceled + retryClient.RequestLogHook = func(_ retry.Logger, req *http.Request, retry int) { + select { + case <-req.Context().Done(): + log.Info("context canceled, aborting retries", "url", req.URL) + default: + } + } + return &artifactory{ + log: log, + client: retryClient, + artifactID: artifactID, + storageBasePath: storagePath, + } +} + +func (a *artifactory) Get(ctx context.Context, srcURL, digest string) ([]byte, error) { + // 1. Try local fetch first + data, err := a.fetchFromFileSystem(digest) + if err == nil { + a.log.V(1).Info("artifact found in local cache", "digest", digest) + return data, nil + } + if !errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("failed to fetch artifact from filesystem: %w", err) + } + + a.log.V(1).Info("artifact not found locally, fetching from remote source", "url", srcURL) + + // 2. Fetch from remote source (tar.gz → map[string][]byte) + files, err := a.fetchFromSource(ctx, srcURL) + if err != nil { + return nil, fmt.Errorf("failed to fetch artifact from source: %w", err) + } + + // 3. Marshal map into []byte and return + content, err := json.Marshal(files) + if err != nil { + return nil, fmt.Errorf("failed to marshal artifact content: %w", err) + } + + a.log.V(1).Info("artifact fetched successfully from remote", "digest", digest) + return content, nil +} + +func (a *artifactory) Save(content []byte, digest string) error { + return a.saveToFileSystem(content, digest) +} + +func (a *artifactory) DeleteAllExcept(digest string) error { + return a.deleteAllExceptFromFileSystem(digest) +} + +func (a *artifactory) saveToFileSystem(content []byte, digest string) error { + if digest == "" { + return errors.New("digest must not be empty") + } + if len(content) == 0 { + return errors.New("content must not be empty") + } + + filePath := filepath.Join(a.storageBasePath, a.artifactID, digest) + + // Ensure parent directory exists + if err := os.MkdirAll(filepath.Dir(filePath), 0o755); err != nil { + return fmt.Errorf("failed to create artifact directory: %w", err) + } + + // Direct write + if err := os.WriteFile(filePath, content, 0o644); err != nil { + return fmt.Errorf("failed to write artifact file: %w", err) + } + + a.log.V(1).Info("artifact saved to disk", + "path", filePath, + "digest", digest, + "artifactID", a.artifactID, + ) + return nil +} + +func (a *artifactory) deleteAllExceptFromFileSystem(keepDigest string) error { + dir := filepath.Join(a.storageBasePath, a.artifactID) + + entries, err := os.ReadDir(dir) + if err != nil { + if os.IsNotExist(err) { + a.log.V(1).Info("no artifact directory found to clean up", "path", dir) + return nil + } + return fmt.Errorf("failed to read artifact directory: %w", err) + } + + for _, e := range entries { + if e.IsDir() { + continue + } + + name := e.Name() + if name == keepDigest { + continue + } + + target := filepath.Join(dir, name) + if err := os.Remove(target); err != nil { + if os.IsNotExist(err) { + continue + } + return fmt.Errorf("failed to delete artifact file %s: %w", target, err) + } + + a.log.V(1).Info("deleted old artifact digest", "digest", name) + } + + return nil +} + +func (a *artifactory) fetchFromFileSystem(digest string) ([]byte, error) { + if digest == "" { + return nil, errors.New("digest must not be empty") + } + + filePath := filepath.Join(a.storageBasePath, a.artifactID, digest) + + data, err := os.ReadFile(filePath) + if err != nil { + if os.IsNotExist(err) { + return nil, os.ErrNotExist + } + return nil, fmt.Errorf("failed to read artifact file: %w", err) + } + + return data, nil +} + +func (a *artifactory) fetchFromSource(ctx context.Context, srcURL string) (map[string][]byte, error) { + srcURL = replaceArtifactDomain(srcURL) + a.log.V(1).Info("fetching artifact", "srcUrl", srcURL) + req, err := retry.NewRequestWithContext(ctx, http.MethodGet, srcURL, http.NoBody) + if err != nil { + return nil, err + } + resp, err := a.client.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to download file: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("bad response: %s", resp.Status) + } + + gzr, err := gzip.NewReader(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to create gzip reader: %w", err) + } + defer gzr.Close() + + tr := tar.NewReader(gzr) + files := make(map[string][]byte) + + for { + header, err := tr.Next() + if err == io.EOF { + break // done + } + if err != nil { + return nil, fmt.Errorf("error reading tar: %w", err) + } + + switch header.Typeflag { + case tar.TypeReg: + // Read the file contents for this entry into memory. + // NOTE: io.ReadAll streams from the tar reader; we don't buffer the whole tarball. + data, err := io.ReadAll(tr) + if err != nil { + return nil, fmt.Errorf("failed to read file %q from tar: %w", header.Name, err) + } + files[header.Name] = data + + case tar.TypeDir: + // Nothing to do; directories are implicit in map form. + continue + } + } + return files, nil +} + +// replaceArtifactDomain rewrites the host portion of the given artifact URL +// if ARTIFACT_DOMAIN is set. It preserves scheme and path. +// +// Examples: +// +// ARTIFACT_DOMAIN=localhost:5050 +// → http://localhost:5050/externalartifact/foo/bar.tar.gz +// ARTIFACT_DOMAIN=http://127.0.0.1:5050 +// → http://127.0.0.1:5050/externalartifact/foo/bar.tar.gz +func replaceArtifactDomain(artifactURL string) string { + override, ok := os.LookupEnv("ARTIFACT_DOMAIN") + if !ok || override == "" { + // not set, return original URL + return artifactURL + } + + parsed, err := url.Parse(artifactURL) + if err != nil || parsed.Host == "" { + // fallback to original if parsing fails + return artifactURL + } + + // Parse override domain (to check if it has a scheme) + overrideURL, err := url.Parse(override) + if err == nil && overrideURL.Host != "" { + // override has a scheme (e.g., http://localhost:5050) + parsed.Scheme = overrideURL.Scheme + parsed.Host = overrideURL.Host + } else { + // override is just host[:port] (e.g., localhost:5050) + parsed.Host = override + // Keep original scheme + } + + return parsed.String() +} diff --git a/internal/controller/catalog/catalog_controller.go b/internal/controller/catalog/catalog_controller.go index 5d0dd5bc1..62d0d748f 100644 --- a/internal/controller/catalog/catalog_controller.go +++ b/internal/controller/catalog/catalog_controller.go @@ -34,9 +34,11 @@ import ( type CatalogReconciler struct { client.Client - Scheme *runtime.Scheme - Log logr.Logger - recorder record.EventRecorder + Scheme *runtime.Scheme + Log logr.Logger + recorder record.EventRecorder + StoragePath string + HttpRetry int } func (r *CatalogReconciler) SetupWithManager(name string, mgr ctrl.Manager) error { @@ -258,6 +260,12 @@ func (r *CatalogReconciler) EnsureCreated(ctx context.Context, obj lifecycle.Run continue } + ready, _ := sourcer.objectReadiness(ctx, externalArtifact) + if ready != metav1.ConditionTrue { + r.Log.Info("external artifact not ready yet, retry in next reconciliation loop", "namespace", catalog.Namespace, "name", sourcer.getArtifactName()) + continue + } + if err = sourcer.reconcileKustomization(ctx, externalArtifact); err != nil { r.Log.Error(err, "failed to reconcile kustomization for catalog source", "namespace", catalog.Namespace, "name", sourcer.getKustomizationName()) allErrors = append(allErrors, err) diff --git a/internal/controller/catalog/source.go b/internal/controller/catalog/source.go index 3956b4916..212214a85 100644 --- a/internal/controller/catalog/source.go +++ b/internal/controller/catalog/source.go @@ -5,8 +5,11 @@ package catalog import ( "context" + "encoding/json" "fmt" + "slices" "strings" + "time" kustomizev1 "github.com/fluxcd/kustomize-controller/api/v1" "github.com/fluxcd/pkg/apis/kustomize" @@ -26,6 +29,7 @@ import ( greenhousev1alpha1 "github.com/cloudoperators/greenhouse/api/v1alpha1" "github.com/cloudoperators/greenhouse/internal/common" "github.com/cloudoperators/greenhouse/internal/flux" + "github.com/cloudoperators/greenhouse/internal/helm" "github.com/cloudoperators/greenhouse/internal/lifecycle" "github.com/cloudoperators/greenhouse/internal/rbac" ) @@ -51,19 +55,21 @@ const ( type source struct { client.Client - scheme *runtime.Scheme - log logr.Logger - recorder record.EventRecorder - commonLabels map[string]string - catalog *greenhousev1alpha1.Catalog - source greenhousev1alpha1.CatalogSource - sourceHash string - sourceGroup string - gitRepo greenhousev1alpha1.SourceStatus - generator greenhousev1alpha1.SourceStatus - externalArtifact greenhousev1alpha1.SourceStatus - kustomize greenhousev1alpha1.SourceStatus - lastReconciledAt string + scheme *runtime.Scheme + log logr.Logger + recorder record.EventRecorder + commonLabels map[string]string + catalog *greenhousev1alpha1.Catalog + source greenhousev1alpha1.CatalogSource + sourceHash string + sourceGroup string + gitRepo greenhousev1alpha1.SourceStatus + generator greenhousev1alpha1.SourceStatus + externalArtifact greenhousev1alpha1.SourceStatus + kustomize greenhousev1alpha1.SourceStatus + lastReconciledAt string + artifactory IArtifactory + externalArtifactManifest []byte } // validateSources - ensures there are no duplicate sources in the catalog spec @@ -99,6 +105,7 @@ func (r *CatalogReconciler) newCatalogSource(catalogSource greenhousev1alpha1.Ca if err != nil { return nil, err } + hashGroup := fmt.Sprintf("%s-%s-%s-%s", host, owner, repo, ref) s := &source{ Client: r.Client, scheme: r.Scheme, @@ -108,14 +115,16 @@ func (r *CatalogReconciler) newCatalogSource(catalogSource greenhousev1alpha1.Ca greenhouseapis.LabelKeyCatalog: catalog.Name, greenhouseapis.LabelKeyCatalogSource: gitRepoArtifactPrefix + "-" + hash, }, - catalog: catalog, - source: catalogSource, - sourceHash: hash, - sourceGroup: fmt.Sprintf("%s-%s-%s-%s", host, owner, repo, ref), - gitRepo: greenhousev1alpha1.SourceStatus{Kind: sourcev1.GitRepositoryKind, Name: gitRepoArtifactPrefix + "-" + hash}, - generator: greenhousev1alpha1.SourceStatus{Kind: sourcev2.ArtifactGeneratorKind, Name: generatorArtifactPrefix + "-" + hash}, - externalArtifact: greenhousev1alpha1.SourceStatus{Kind: sourcev1.ExternalArtifactKind, Name: externalArtifactPrefix + "-" + hash}, - kustomize: greenhousev1alpha1.SourceStatus{Kind: kustomizev1.KustomizationKind, Name: kustomizeArtifactPrefix + "-" + hash}, + catalog: catalog, + source: catalogSource, + sourceHash: hash, + sourceGroup: hashGroup, + gitRepo: greenhousev1alpha1.SourceStatus{Kind: sourcev1.GitRepositoryKind, Name: gitRepoArtifactPrefix + "-" + hash}, + generator: greenhousev1alpha1.SourceStatus{Kind: sourcev2.ArtifactGeneratorKind, Name: generatorArtifactPrefix + "-" + hash}, + externalArtifact: greenhousev1alpha1.SourceStatus{Kind: sourcev1.ExternalArtifactKind, Name: externalArtifactPrefix + "-" + hash}, + externalArtifactManifest: nil, + kustomize: greenhousev1alpha1.SourceStatus{Kind: kustomizev1.KustomizationKind, Name: kustomizeArtifactPrefix + "-" + hash}, + artifactory: newArtifactory(r.Log.WithName("artifactory"), catalog.Namespace+"/"+hashGroup+"-"+hash, r.StoragePath, r.HttpRetry), } if lastReconciledAt, ok := lifecycle.ReconcileAnnotationValue(catalog); ok { @@ -152,6 +161,14 @@ func (s *source) setInventory(kind, name, msg string, ready metav1.ConditionStat s.catalog.SetInventory(s.getSourceGroupHash(), kind, name, msg, ready) } +// hasOptionOverrides checks if the source has at least one option override +// ext artifact archive is only fetched when there exists at least one option override +func (s *source) hasOptionOverrides() (exists bool) { + return slices.ContainsFunc(s.source.Overrides, func(override greenhousev1alpha1.CatalogOverrides) bool { + return len(override.OptionsOverride) > 0 + }) +} + // getSourceSecret - retrieves the Secret resource referenced in the Catalog.Spec.Sources[].SecretName if it exists func (s *source) getSourceSecret(ctx context.Context) (*corev1.Secret, error) { if s.source.SecretName == nil { @@ -285,6 +302,16 @@ func (s *source) reconcileArtifactGeneration(ctx context.Context) (*sourcev1.Ext if err != nil { return nil, err } + labels := extArtifact.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + labels[greenhouseapis.LabelKeyCatalog] = s.commonLabels[greenhouseapis.LabelKeyCatalog] + extArtifact.SetLabels(labels) + err = s.Update(ctx, extArtifact) + if err != nil { + return nil, err + } return extArtifact, nil } @@ -294,6 +321,12 @@ func (s *source) reconcileKustomization(ctx context.Context, extArtifact *source kustomization.SetName(s.kustomize.Name) kustomization.SetNamespace(s.catalog.Namespace) var err error + if s.hasOptionOverrides() { + err = s.fetchArtifact(extArtifact) + if err != nil { + return err + } + } var patches []kustomize.Patch if len(s.source.Overrides) > 0 { if patches, err = flux.PrepareKustomizePatches(s.source.Overrides, greenhousev1alpha1.GroupVersion.Group); err != nil { @@ -303,9 +336,9 @@ func (s *source) reconcileKustomization(ctx context.Context, extArtifact *source ggvk := extArtifact.GroupVersionKind() // ServiceAccount for the organization's PluginDefinitionCatalog operations serviceAccountName := rbac.OrgCatalogServiceAccountName(s.catalog.Namespace) - spec, err := flux.NewKustomizationSpecBuilder(s.log). + builder := flux.NewKustomizationSpecBuilder(s.log). WithSourceRef(ggvk.String(), ggvk.Kind, extArtifact.Name, extArtifact.Namespace). - WithCommonLabels(s.commonLabels). // TODO: Verify labels are propagated in E2E + WithCommonLabels(s.commonLabels). WithServiceAccountName(serviceAccountName). WithPatches(patches). // this is necessary for kustomize to apply namespaced resources without errors, @@ -316,12 +349,28 @@ func (s *source) reconcileKustomization(ctx context.Context, extArtifact *source // but on kustomize deletion the label stays behind since prune policy is to Retain. // WithCommonLabels(s.commonArtifactLabels). WithPath(artifactToDir). - WithSuspend(false).Build() - if err != nil { - return err + WithSuspend(false) + + for _, override := range s.source.Overrides { + if len(override.OptionsOverride) > 0 { + optionPatches, err := s.getOptionOverridePatches(override.OptionsOverride, override.Name) + if err != nil { + return err + } + if len(optionPatches) > 0 { + patches = append(patches, optionPatches...) + } + } } + + builder = builder.WithPatches(patches) + // when flux resources is being updated by greenhouse controller and in parallel by flux controller, we need to retryOnConflict result, err := controllerutil.CreateOrPatch(ctx, s.Client, kustomization, func() error { + spec, err := builder.Build() + if err != nil { + return err + } kustomization.Spec = spec common.EnsureAnnotation(kustomization, fluxmeta.ReconcileRequestAnnotation, s.lastReconciledAt) return controllerutil.SetControllerReference(s.catalog, kustomization, s.scheme) @@ -342,6 +391,128 @@ func (s *source) reconcileKustomization(ctx context.Context, extArtifact *source return nil } +func (s *source) fetchArtifact(extArtifact *sourcev1.ExternalArtifact) error { + digest := extArtifact.GetArtifact().Digest + + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(15*time.Second)) + defer cancel() + + // Fetch manifest from artifactory + manifestBytes, err := s.artifactory.Get(ctx, extArtifact.GetArtifact().URL, digest) + if err != nil { + return err + } + + // Save manifest in artifactory + err = s.artifactory.Save(manifestBytes, digest) + if err != nil { + return err + } + + // delete all artifact from artifactory except the new digest + err = s.artifactory.DeleteAllExcept(digest) + if err != nil { + return err + } + s.externalArtifactManifest = manifestBytes + return nil +} + +func (s *source) getOptionOverridePatches(optionOverrides []greenhousev1alpha1.OptionsOverride, pluginDefinitionName string) ([]kustomize.Patch, error) { + spec, err := s.findPluginDefinition(s.externalArtifactManifest, pluginDefinitionName) + if err != nil { + s.log.Error(err, "failed to find plugin definition in store", "name", pluginDefinitionName) + return nil, err + } + if spec == nil { + s.log.Info("plugin definition not found in artifact for option overrides", "name", pluginDefinitionName) + return nil, nil + } + patches := make([]kustomize.Patch, 0, len(optionOverrides)) + + for _, ov := range optionOverrides { + idx := slices.IndexFunc(spec.Options, func(option greenhousev1alpha1.PluginOption) bool { + return option.Name == ov.Name + }) + if idx == -1 { + err = fmt.Errorf("failed to find plugin option override %s in plugin definition %s/%s", ov.Name, s.catalog.Namespace, pluginDefinitionName) + s.log.Info("option not found in plugin definition for override", "pluginDefinition", pluginDefinitionName, "option", ov.Name) + return nil, err + } + spec.Options[idx].Default = ov.Value + spec.Options[idx].Description += " (overridden by Catalog)" + p, err := flux.BuildJSONPatchReplace( + spec.Options[idx], + idx, + greenhousev1alpha1.GroupVersion.Group, + pluginDefinitionName, + ) + if err != nil { + return nil, err + } + patches = append(patches, kustomize.Patch{ + Patch: p.Patch, + Target: p.Target, + }) + } + return patches, nil +} + +func (s *source) findPluginDefinition(manifestBytes []byte, name string) (spec *greenhousev1alpha1.PluginDefinitionSpec, err error) { + var manifestMap map[string][]byte + if err = json.Unmarshal(manifestBytes, &manifestMap); err != nil { + s.log.Error(err, "failed to unmarshal manifest bytes", "name", name) + return + } + var combinedManifests strings.Builder + for _, manifest := range manifestMap { + combinedManifests.Write(manifest) + combinedManifests.WriteString("\n---\n") + } + filters := &helm.ManifestMultipleObjectFilter{ + Filters: []helm.ManifestObjectFilter{ + { + APIVersion: greenhousev1alpha1.GroupVersion.Version, + Kind: greenhousev1alpha1.PluginDefinitionKind, + Name: name, + }, + { + APIVersion: greenhousev1alpha1.GroupVersion.Version, + Kind: greenhousev1alpha1.ClusterPluginDefinitionKind, + Name: name, + }, + }, + } + objectMaps, err := helm.ObjectMapFromLocalManifest(filters, combinedManifests.String()) + if err != nil { + s.log.Error(err, "failed to get object map from manifest", "name", name) + return + } + for _, helmObj := range objectMaps { + obj := helmObj.Object + gvk := obj.GetObjectKind().GroupVersionKind() + + // Try PluginDefinition + if gvk.Kind == greenhousev1alpha1.PluginDefinitionKind { + pd := &greenhousev1alpha1.PluginDefinition{} + if err := s.scheme.Convert(obj, pd, nil); err == nil { + spec = &pd.Spec + break + } + } + + // Try ClusterPluginDefinition + if gvk.Kind == greenhousev1alpha1.ClusterPluginDefinitionKind { + cpd := &greenhousev1alpha1.ClusterPluginDefinition{} + if err := s.scheme.Convert(obj, cpd, nil); err == nil { + spec = &cpd.Spec + break + } + } + } + return +} + // objectReadiness - checks the Ready condition of a catalog object (GitRepository, ArtifactGenerator, ExternalArtifact, Kustomization) // if not Ready, then the controller adds the Catalog object to requeue func (s *source) objectReadiness(ctx context.Context, obj client.Object) (ready metav1.ConditionStatus, msg string) { diff --git a/internal/flux/kustomize_builder.go b/internal/flux/kustomize_builder.go index d33ce31ba..2527d5d5a 100644 --- a/internal/flux/kustomize_builder.go +++ b/internal/flux/kustomize_builder.go @@ -4,58 +4,35 @@ package flux import ( - "bytes" + "encoding/json" "errors" - "strings" - "text/template" + "fmt" kustomizev1 "github.com/fluxcd/kustomize-controller/api/v1" fluxkust "github.com/fluxcd/pkg/apis/kustomize" "github.com/go-logr/logr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - greenhouseapisv1alpha1 "github.com/cloudoperators/greenhouse/api/v1alpha1" + greenhousev1alpha1 "github.com/cloudoperators/greenhouse/api/v1alpha1" ) const ( - kustomizeOperationReplace = "replace" - kustomizeMetadataNamePath = "/metadata/name" - kustomizeHelmRepoPatch = "/spec/helmChart/repository" + kustomizeOperationTest = "test" + kustomizeReplacePluginOption = "/spec/options/%d" + kustomizeTestPluginOption = "/spec/options/%d/name" + kustomizeOperationReplace = "replace" + kustomizeMetadataNamePath = "/metadata/name" + kustomizeHelmRepoPatch = "/spec/helmChart/repository" ) // Operation model for patch operations type Operation struct { - Op string - Path string - Value string + Op string `json:"op"` + Path string `json:"path"` + Value any `json:"value,omitempty"` } -// Data holds all the patch ops -type Data struct { - Operations []Operation -} - -const opsTmpl = `{{- range .Operations}} -- op: {{ .Op }} - path: {{ .Path }} - value: {{ .Value }} -{{- end}}` - -// indent is a small helper to indent a multi-line string -func indent(spaces int, s string) string { - prefix := bytes.Repeat([]byte(" "), spaces) - out := &bytes.Buffer{} - for i, line := range bytes.Split([]byte(s), []byte("\n")) { - if i > 0 { - out.WriteByte('\n') - } - out.Write(prefix) - out.Write(line) - } - return out.String() -} - -func constructPatchOperations(op, path, value string) Operation { +func constructPatchOperations(op, path string, value any) Operation { return Operation{ Op: op, Path: path, @@ -63,26 +40,25 @@ func constructPatchOperations(op, path, value string) Operation { } } -func patchTemplate(ops []Operation) (string, error) { - tmpl, err := template.New("patchOps").Funcs(template.FuncMap{ - "indent": indent, - }).Parse(opsTmpl) - if err != nil { - return "", err - } - d := Data{ - Operations: ops, +func BuildJSONPatchReplace(opt greenhousev1alpha1.PluginOption, index int, group, name string) (fluxkust.Patch, error) { + ops := []Operation{ + constructPatchOperations(kustomizeOperationTest, fmt.Sprintf(kustomizeTestPluginOption, index), opt.Name), + constructPatchOperations(kustomizeOperationReplace, fmt.Sprintf(kustomizeReplacePluginOption, index), opt), } - var buf bytes.Buffer - if err := tmpl.Execute(&buf, d); err != nil { - return "", err + raw, err := json.Marshal(ops) + if err != nil { + return fluxkust.Patch{}, err } - - // Remove leading newline characters from the output - return strings.TrimLeft(buf.String(), "\n"), nil + return fluxkust.Patch{ + Patch: string(raw), + Target: &fluxkust.Selector{ + Group: group, + Name: name, + }, + }, nil } -func PrepareKustomizePatches(overrides []greenhouseapisv1alpha1.CatalogOverrides, group string) ([]fluxkust.Patch, error) { +func PrepareKustomizePatches(overrides []greenhousev1alpha1.CatalogOverrides, group string) ([]fluxkust.Patch, error) { patches := make([]fluxkust.Patch, 0) for _, override := range overrides { if override.Alias == "" && override.Repository == "" { @@ -95,12 +71,12 @@ func PrepareKustomizePatches(overrides []greenhouseapisv1alpha1.CatalogOverrides if override.Repository != "" { operations = append(operations, constructPatchOperations(kustomizeOperationReplace, kustomizeHelmRepoPatch, override.Repository)) } - patched, err := patchTemplate(operations) + patched, err := json.Marshal(operations) if err != nil { return nil, err } patch := fluxkust.Patch{ - Patch: patched, + Patch: string(patched), Target: &fluxkust.Selector{ Group: group, Name: override.Name, diff --git a/internal/helm/manifest.go b/internal/helm/manifest.go index 7254398c6..5aa505014 100644 --- a/internal/helm/manifest.go +++ b/internal/helm/manifest.go @@ -31,9 +31,8 @@ type ManifestObject struct { // ManifestObjectFilter is used to filter for objects in a Helm manifest. type ManifestObjectFilter struct { - APIVersion, - Kind string - Annotations map[string]string + APIVersion, Kind, Name string + Annotations map[string]string } type ObjectList struct { @@ -68,6 +67,9 @@ func (o *ManifestObjectFilter) Matches(obj *resource.Info) bool { if o.APIVersion != "" && o.APIVersion != gvk.Version { return false } + if o.Name != "" && o.Name != obj.Name { + return false + } if o.Annotations != nil { metaAccessor, err := meta.Accessor(obj.Object) if err != nil { @@ -98,16 +100,34 @@ func ObjectMapFromManifest(restClientGetter genericclioptions.RESTClientGetter, if err != nil { return nil, fmt.Errorf("error loading manifest: %w", err) } - allObjects := make(map[ObjectKey]*ManifestObject, 0) - err = r.Visit(func(info *resource.Info, err error) error { + return objectMapping(r, f) +} + +func ObjectMapFromLocalManifest(f ManifestFilter, manifest string) (map[ObjectKey]*ManifestObject, error) { + r, err := loadLocalManifest(manifest) + if err != nil { + return nil, fmt.Errorf("error loading local manifest: %w", err) + } + return objectMapping(r, f) +} + +func objectMapping(r *resource.Result, f ManifestFilter) (map[ObjectKey]*ManifestObject, error) { + allObjects := make(map[ObjectKey]*ManifestObject) + err := r.Visit(func(info *resource.Info, err error) error { if err != nil { return err } if f != nil && !f.Matches(info) { return nil } + var gvk schema.GroupVersionKind + if info.Mapping != nil { + gvk = info.Mapping.GroupVersionKind + } else { + gvk = info.Object.GetObjectKind().GroupVersionKind() + } key := ObjectKey{ - GVK: info.Mapping.GroupVersionKind, + GVK: gvk, Namespace: info.Namespace, Name: info.Name, } @@ -121,6 +141,22 @@ func ObjectMapFromManifest(restClientGetter genericclioptions.RESTClientGetter, return allObjects, err } +func loadLocalManifest(manifest string) (*resource.Result, error) { + reader := strings.NewReader(manifest) + r := resource. + NewLocalBuilder(). + Unstructured(). + Stream(reader, "manifest"). + ContinueOnError(). + Flatten(). + Do(). + IgnoreErrors(meta.IsNoMatchError) + if err := r.Err(); err != nil { + return nil, err + } + return r, nil +} + // loadManifest loads a manifest string into a resource.Result. It ignores unknown schema errors if the CRD is not yet present. func loadManifest(restClientGetter genericclioptions.RESTClientGetter, namespace, manifest string) (*resource.Result, error) { reader := strings.NewReader(manifest) diff --git a/types/typescript/schema.d.ts b/types/typescript/schema.d.ts index a07f11676..531e89f37 100644 --- a/types/typescript/schema.d.ts +++ b/types/typescript/schema.d.ts @@ -64,6 +64,13 @@ export interface components { alias?: string; /** @description Name is the name of the PluginDefinition to patch with an alias */ name: string; + /** @description OptionsOverride are the option values to override in the PluginDefinition .spec.options[] */ + optionsOverride?: { + /** @description Name is the name of the option value to override in the PluginDefinition */ + name: string; + /** @description Value is the value to set as Default in the PluginDefinition option */ + value: unknown; + }[]; /** @description Repository is the repository to override in the PluginDefinition .spec.helmChart.repository */ repository?: string; }[];