Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 33 additions & 0 deletions e2e/adapter/adapter_with_maestro.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,12 @@ var _ = ginkgo.Describe("[Suite: adapter][maestro-transport] Adapter Framework -
Expect(resourceBundle.Metadata.Labels).To(HaveKey(client.KeyAdapter))
Expect(resourceBundle.Metadata.Labels[client.KeyAdapter]).To(Equal(adapterName))

// Verify Go template conditional label: platformType captured from cluster spec ({{ if .platformType }})
Expect(resourceBundle.Metadata.Labels).To(HaveKey("hyperfleet.io/platform-type"),
"ManifestWork should have platform-type label from {{ if .platformType }} Go template")
Expect(resourceBundle.Metadata.Labels["hyperfleet.io/platform-type"]).To(Equal("gcp"),
"platform-type label should match cluster spec.platform.type")

// Verify annotations
Expect(resourceBundle.Metadata.Annotations).To(HaveKey(client.KeyGeneration))
Expect(resourceBundle.Metadata.Annotations[client.KeyGeneration]).To(Equal("1"))
Expand Down Expand Up @@ -182,6 +188,33 @@ var _ = ginkgo.Describe("[Suite: adapter][maestro-transport] Adapter Framework -
g.Expect(cm.Data).To(HaveKey("cluster_name"))
g.Expect(cm.Data["cluster_name"]).To(Equal(clusterName))

// Verify Go template {{ if }}/{{ else }} conditional:
// platformType is captured from spec.platform.type via CEL; cluster payload has type="gcp"
// so {{ if eq .platformType "gcp" }} renders platform_tier="cloud", else "onprem"
g.Expect(cm.Data).To(HaveKeyWithValue("platform_tier", "cloud"),
"ConfigMap should have platform_tier=cloud from {{ if eq .platformType \"gcp\" }} Go template")

// Verify Go template {{ range }} over dynamic subnet list captured from cluster spec
// Each subnet in spec.platform.gcp.subnets produces 3 keys: subnet_{id}_name, subnet_{id}_cidr, subnet_{id}_role
expectedSubnets := []struct {
id, name, cidr, role string
}{
{"subnet-control-plane-01", "control-plane", "10.0.1.0/24", "control-plane"},
{"subnet-worker-01", "worker-nodes", "10.0.2.0/24", "worker"},
{"subnet-service-01", "service-mesh", "10.0.3.0/24", "service"},
}
for _, subnet := range expectedSubnets {
nameKey := fmt.Sprintf("subnet_%s_name", subnet.id)
cidrKey := fmt.Sprintf("subnet_%s_cidr", subnet.id)
roleKey := fmt.Sprintf("subnet_%s_role", subnet.id)
g.Expect(cm.Data).To(HaveKeyWithValue(nameKey, subnet.name),
"ConfigMap should have %s=%s from {{ range .subnets }} Go template", nameKey, subnet.name)
g.Expect(cm.Data).To(HaveKeyWithValue(cidrKey, subnet.cidr),
"ConfigMap should have %s=%s from {{ range .subnets }} Go template", cidrKey, subnet.cidr)
g.Expect(cm.Data).To(HaveKeyWithValue(roleKey, subnet.role),
"ConfigMap should have %s=%s from {{ range .subnets }} Go template", roleKey, subnet.role)
}

ginkgo.GinkgoWriter.Printf("Verified K8s resources created: namespace=%s, configmap=%s\n",
namespaceName, configmapName)
}, h.Cfg.Timeouts.Adapter.Processing, h.Cfg.Polling.Interval).Should(Succeed())
Expand Down
45 changes: 42 additions & 3 deletions test-design/testcases/adapter-with-maestro-transport.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,11 @@ kubectl port-forward -n hyperfleet svc/hyperfleet-api 8000:8000 &

This test validates the complete Maestro transport happy path: creating a cluster via the HyperFleet API triggers the adapter to create a ManifestWork (resource bundle) on the Maestro server, the Maestro agent applies the ManifestWork content to the target cluster (verified via kubectl), the adapter discovers the ManifestWork and its nested sub-resources via statusFeedback, evaluates post-processing CEL expressions, and reports the final status back to the HyperFleet API.

This test also validates Go template structural syntax in manifest resource refs:
- **`{{ if .platformType }}`**: Conditional label rendered when `platformType` is captured from cluster spec
- **`{{ if eq .platformType "gcp" }}` / `{{ else }}`**: Conditional with else branch for ConfigMap data (`platform_tier`)
- **`{{ range $i, $subnet := .subnets }}`**: Iteration over a dynamic list (subnets captured from cluster spec via CEL)

---

| **Field** | **Value** |
Expand All @@ -76,7 +81,7 @@ This test validates the complete Maestro transport happy path: creating a cluste
| **Automation** | Automated |
| **Version** | MVP |
| **Created** | 2026-02-12 |
| **Updated** | 2026-03-02 |
| **Updated** | 2026-04-03 |

---

Expand All @@ -87,6 +92,8 @@ This test validates the complete Maestro transport happy path: creating a cluste
3. At least one Maestro consumer is registered (e.g., `${MAESTRO_CONSUMER}`)
4. Adapter is deployed in Maestro transport mode (`transport.client: "maestro"`)
5. Adapter task config defines nestedDiscoveries (`namespace0`, `configmap0`) and post-processing CEL expressions
6. Adapter task config captures `platformType` and `subnets` from cluster spec via CEL expressions
7. Manifest resource ref uses Go template structural syntax: `{{ if }}`, `{{ else }}`, `{{ range }}`

---

Expand All @@ -100,10 +107,24 @@ CLUSTER_ID=$(curl -s -X POST ${API_URL}/api/hyperfleet/v1/clusters \
-d '{
"kind": "Cluster",
"name": "maestro-happy-path-'$(date +%Y%m%d-%H%M%S)'",
"labels": {
"environment": "test"
},
"spec": {
"platform": {
"type": "gcp",
"gcp": {"projectID": "test-project", "region": "us-central1"}
"gcp": {
"projectID": "test-project",
"region": "us-central1",
"zone": "us-central1-a",
"network": "default",
"subnet": "default-subnet",
"subnets": [
{"id": "subnet-control-plane-01", "name": "control-plane", "cidr": "10.0.1.0/24", "role": "control-plane"},
{"id": "subnet-worker-01", "name": "worker-nodes", "cidr": "10.0.2.0/24", "role": "worker"},
{"id": "subnet-service-01", "name": "service-mesh", "cidr": "10.0.3.0/24", "role": "service"}
]
}
},
"release": {"version": "4.14.0"}
}
Expand Down Expand Up @@ -170,13 +191,18 @@ kubectl exec -n maestro deployment/maestro -- \
- Labels: `hyperfleet.io/cluster-id`, `hyperfleet.io/adapter`
- Annotations: `hyperfleet.io/managed-by`

3. **Go template conditional label** (rendered from `{{ if .platformType }}`):
- `hyperfleet.io/platform-type`: set to cluster's `spec.platform.type` value (e.g., `"gcp"`)
- This label is only present when `platformType` is non-empty (captured via CEL from cluster spec)

Example output:
```json
{
"labels": {
"hyperfleet.io/cluster-id": "${CLUSTER_ID}",
"hyperfleet.io/generation": "1, code logic: set from cluster generation",
"hyperfleet.io/adapter": "${ADAPTER_NAME}, template config: identifies the adapter"
"hyperfleet.io/adapter": "${ADAPTER_NAME}, template config: identifies the adapter",
"hyperfleet.io/platform-type": "gcp, Go template conditional: {{ if .platformType }}"
},
"annotations": {
"hyperfleet.io/generation": "1, code logic: used for idempotency check",
Expand Down Expand Up @@ -246,6 +272,19 @@ kubectl get configmap ${CLUSTER_ID}-${ADAPTER_NAME}-configmap \
**Expected Result:**
- Namespace `${CLUSTER_ID}-${ADAPTER_NAME}-namespace` exists and is `Active`
- ConfigMap `${CLUSTER_ID}-${ADAPTER_NAME}-configmap` exists in the namespace
- ConfigMap data contains Go template rendered values:
- `cluster_id`: matches `${CLUSTER_ID}`
- `cluster_name`: matches the cluster name
- `platform_tier`: `"cloud"` (from `{{ if eq .platformType "gcp" }}` Go template conditional — cluster spec has `platform.type: "gcp"`)
- `subnet_subnet-control-plane-01_name`: `"control-plane"` (from `{{ range .subnets }}` Go template iteration)
- `subnet_subnet-control-plane-01_cidr`: `"10.0.1.0/24"`
- `subnet_subnet-control-plane-01_role`: `"control-plane"`
- `subnet_subnet-worker-01_name`: `"worker-nodes"`
- `subnet_subnet-worker-01_cidr`: `"10.0.2.0/24"`
- `subnet_subnet-worker-01_role`: `"worker"`
- `subnet_subnet-service-01_name`: `"service-mesh"`
- `subnet_subnet-service-01_cidr`: `"10.0.3.0/24"`
- `subnet_subnet-service-01_role`: `"service"`

#### Step 6: Verify adapter status report to HyperFleet API
**Action:**
Expand Down
144 changes: 11 additions & 133 deletions testdata/adapter-configs/cl-maestro/adapter-task-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ params:
source: "env.NAMESPACE"
type: "string"


# Preconditions with valid operators and CEL expressions
preconditions:
- name: "clusterStatus"
Expand All @@ -41,6 +40,14 @@ preconditions:
: "False"
- name: "placementClusterName"
expression: "\"cluster1\"" # TBC coming from placement adapter
- name: "platformType"
expression: |
has(spec.platform) && has(spec.platform.type) ? spec.platform.type : ""
- name: "subnets"
expression: |
has(spec.platform) && has(spec.platform.gcp) && has(spec.platform.gcp.subnets)
? spec.platform.gcp.subnets
: []


# Structured conditions with valid operators
Expand All @@ -62,139 +69,10 @@ resources:
maestro:
target_cluster: "{{ .placementClusterName }}"

# ManifestWork is a kind of manifest that can be used to create resources on the cluster.
# It is a collection of resources that are created together.
# ManifestWork manifest ref with Go template structural syntax ({{ if }}, {{ else }})
# This validates that Go template conditionals work with Maestro transport
manifest:
apiVersion: work.open-cluster-management.io/v1
kind: ManifestWork
metadata:
# ManifestWork name - must be unique within consumer namespace
name: "{{ .clusterId }}-{{ .adapter.name }}"

# Labels for identification, filtering, and management
labels:
# HyperFleet tracking labels
hyperfleet.io/cluster-id: "{{ .clusterId }}"
hyperfleet.io/adapter: "{{ .adapter.name }}"
hyperfleet.io/component: "infrastructure"
hyperfleet.io/generation: "{{ .generation }}"
hyperfleet.io/resource-group: "cluster-setup"

# Maestro-specific labels
maestro.io/source-id: "{{ .adapter.name }}"
maestro.io/resource-type: "manifestwork"
maestro.io/priority: "normal"

# Standard Kubernetes application labels
app.kubernetes.io/name: "aro-hcp-cluster"
app.kubernetes.io/instance: "{{ .clusterId }}"
app.kubernetes.io/version: "v1.0.0"
app.kubernetes.io/component: "infrastructure"
app.kubernetes.io/part-of: "hyperfleet"
app.kubernetes.io/managed-by: "cl-maestro"
app.kubernetes.io/created-by: "{{ .adapter.name }}"

# Annotations for metadata and operational information
annotations:
# Tracking and lifecycle
hyperfleet.io/created-by: "cl-maestro-framework"
hyperfleet.io/managed-by: "{{ .adapter.name }}"
hyperfleet.io/generation: "{{ .generation }}"
hyperfleet.io/cluster-id: "{{ .clusterId }}"
hyperfleet.io/cluster-name: "{{ .clusterName }}"
hyperfleet.io/deployment-time: "{{ .timestamp }}"

# Maestro-specific annotations
maestro.io/applied-time: "{{ .timestamp }}"
maestro.io/source-adapter: "{{ .adapter.name }}"

# Documentation
description: "Complete cluster setup including namespace, configuration, and RBAC"

# ManifestWork specification
spec:
# ============================================================================
# Workload - Contains the Kubernetes manifests to deploy
# ============================================================================
workload:
# Kubernetes manifests array - injected by framework from business logic config
manifests:
- apiVersion: v1
kind: Namespace
metadata:
name: "{{ .clusterId | lower }}-{{ .adapter.name }}-namespace"
labels:
app.kubernetes.io/component: adapter-task-config
app.kubernetes.io/instance: "{{ .adapter.name }}"
app.kubernetes.io/name: cl-maestro
app.kubernetes.io/transport: maestro
annotations:
hyperfleet.io/generation: "{{ .generation }}"
- apiVersion: v1
kind: ConfigMap
data:
cluster_id: "{{ .clusterId }}"
cluster_name: "{{ .clusterName }}"
metadata:
name: "{{ .clusterId | lower }}-{{ .adapter.name }}-configmap"
namespace: "{{ .clusterId | lower }}-{{ .adapter.name }}-namespace"
labels:
app.kubernetes.io/component: adapter-task-config
app.kubernetes.io/instance: "{{ .adapter.name }}"
app.kubernetes.io/name: cl-maestro
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/transport: maestro
annotations:
hyperfleet.io/generation: "{{ .generation }}"

# ============================================================================
# Delete Options - How resources should be removed
# ============================================================================
deleteOption:
# Propagation policy for resource deletion
# - "Foreground": Wait for dependent resources to be deleted first
# - "Background": Delete immediately, let cluster handle dependents
# - "Orphan": Leave resources on cluster when ManifestWork is deleted
propagationPolicy: "Foreground"

# Grace period for graceful deletion (seconds)
gracePeriodSeconds: 30

# ============================================================================
# Manifest Configurations - Per-resource settings for update and feedback
# ============================================================================
manifestConfigs:
- resourceIdentifier:
group: "" # Core API group (empty for v1 resources)
resource: "namespaces" # Resource type
name: "{{ .clusterId | lower }}-{{ .adapter.name }}-namespace" # Specific resource name
updateStrategy:
type: "ServerSideApply" # Use server-side apply for namespaces
feedbackRules:
- type: "JSONPaths" # Use JSON path expressions for status feedback
jsonPaths:
- name: "phase"
path: ".status.phase"
# ========================================================================
# Configuration for Namespace resources
# ========================================================================
- resourceIdentifier:
group: "" # Core API group (empty for v1 resources)
resource: "configmaps" # Resource type
name: "{{ .clusterId | lower }}-{{ .adapter.name }}-configmap" # Specific resource name
namespace: "{{ .clusterId | lower }}-{{ .adapter.name }}-namespace"
updateStrategy:
type: "ServerSideApply" # Use server-side apply for namespaces
serverSideApply:
fieldManager: "cl-maestro" # Field manager name for conflict resolution
force: false # Don't force conflicts (fail on conflicts)
feedbackRules:
- type: "JSONPaths" # Use JSON path expressions for status feedback
jsonPaths:
- name: "data"
path: ".data"
- name: "resourceVersion"
path: ".metadata.resourceVersion"
ref: "/etc/adapter/manifestwork.yaml"
# Discover the ResourceBundle (ManifestWork) by name from Maestro
discovery:
by_name: "{{ .clusterId }}-{{ .adapter.name }}"
Expand Down
Loading