From 6b8c19b8d4f3855d2a27579de9d28519c3d0480a Mon Sep 17 00:00:00 2001 From: tingxueca Date: Thu, 26 Feb 2026 11:42:23 -0500 Subject: [PATCH 1/3] adopt telco-reference release 421 --- ansible/rhacm-downstream-deploy-421.yml | 43 +++ ansible/rhacm-ztp-setup-421.yml | 18 + .../rhacm-ztp-patches-421/tasks/main.yml | 39 ++ .../rhacm-ztp-setup-421/defaults/main.yml | 18 + .../roles/rhacm-ztp-setup-421/tasks/main.yml | 105 ++++++ .../telco-ran-du-ztp-421/defaults/main.yml | 74 ++++ .../roles/telco-ran-du-ztp-421/tasks/Untitled | 1 + .../roles/telco-ran-du-ztp-421/tasks/main.yml | 197 ++++++++++ .../98-var-lib-containers-partitioned.yaml | 46 +++ .../templates/999-sync-time-once-master.yaml | 26 ++ .../templates/999-sync-time-once-worker.yaml | 26 ++ .../templates/BareMetalHostOverride.yaml | 24 ++ .../KlusterletAddonConfigOverride.yaml.j2 | 25 ++ .../templates/cluster-kustomization.yaml | 15 + .../templates/cluster-ns-application.yaml | 33 ++ .../templates/enable-crun-master.yaml | 10 + .../templates/enable-crun-worker.yaml | 10 + .../templates/hub-template-maps.yaml | 22 ++ .../templates/ns-upgrade.yaml | 24 ++ .../telco-ran-du-ztp-421/templates/ns.yaml | 40 ++ .../pgt-du-4.21/common-mno-ranGen.yaml | 18 + .../templates/pgt-du-4.21/common-ranGen.yaml | 210 +++++++++++ .../pgt-du-4.21/group-du-3node-ranGen.yaml | 155 ++++++++ .../group-du-3node-validator-ranGen.yaml | 28 ++ .../pgt-du-4.21/group-du-sno-ranGen.yaml | 355 ++++++++++++++++++ .../group-du-sno-validator-ranGen.yaml | 28 ++ .../pgt-du-4.21/group-du-standard-ranGen.yaml | 145 +++++++ .../group-du-standard-validator-ranGen.yaml | 28 ++ .../templates/policy-common-and-group.yaml | 21 ++ .../templates/policy-kustomization.yaml | 78 ++++ .../templates/s3-creds.j2 | 3 + .../templates/seedgen-auth.j2 | 1 + .../templates/sno-site.yaml | 49 +++ .../source-crs/DataProtectionApplication.yaml | 36 ++ .../source-crs/ImageBasedUpgrade.yaml | 11 + .../source-crs/LcaOperatorStatus.yaml | 27 ++ .../templates/source-crs/LcaSecret.yaml | 9 + .../templates/source-crs/LcaSubscription.yaml | 16 + .../source-crs/LcaSubscriptionNS.yaml | 10 + .../source-crs/LcaSubscriptionOperGroup.yaml | 11 + .../OadpBackupStorageLocationStatus.yaml | 11 + .../templates/source-crs/OadpCm.yaml | 44 +++ .../source-crs/OadpOperatorStatus.yaml | 27 ++ .../templates/source-crs/OadpSecret.yaml | 9 + .../source-crs/OadpSubscription.yaml | 16 + .../source-crs/OadpSubscriptionNS.yaml | 10 + .../source-crs/OadpSubscriptionOperGroup.yaml | 11 + 47 files changed, 2163 insertions(+) create mode 100644 ansible/rhacm-downstream-deploy-421.yml create mode 100644 ansible/rhacm-ztp-setup-421.yml create mode 100644 ansible/roles/rhacm-ztp-patches-421/tasks/main.yml create mode 100644 ansible/roles/rhacm-ztp-setup-421/defaults/main.yml create mode 100644 ansible/roles/rhacm-ztp-setup-421/tasks/main.yml create mode 100644 ansible/roles/telco-ran-du-ztp-421/defaults/main.yml create mode 100644 ansible/roles/telco-ran-du-ztp-421/tasks/Untitled create mode 100644 ansible/roles/telco-ran-du-ztp-421/tasks/main.yml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/98-var-lib-containers-partitioned.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/999-sync-time-once-master.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/999-sync-time-once-worker.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/BareMetalHostOverride.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/KlusterletAddonConfigOverride.yaml.j2 create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/cluster-kustomization.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/cluster-ns-application.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/enable-crun-master.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/enable-crun-worker.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/hub-template-maps.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/ns-upgrade.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/ns.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/common-mno-ranGen.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/common-ranGen.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-3node-ranGen.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-3node-validator-ranGen.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-sno-ranGen.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-sno-validator-ranGen.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-standard-ranGen.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-standard-validator-ranGen.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/policy-common-and-group.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/policy-kustomization.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/s3-creds.j2 create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/seedgen-auth.j2 create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/sno-site.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/source-crs/DataProtectionApplication.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/source-crs/ImageBasedUpgrade.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaOperatorStatus.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSecret.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSubscription.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSubscriptionNS.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSubscriptionOperGroup.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpBackupStorageLocationStatus.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpCm.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpOperatorStatus.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSecret.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSubscription.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSubscriptionNS.yaml create mode 100644 ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSubscriptionOperGroup.yaml diff --git a/ansible/rhacm-downstream-deploy-421.yml b/ansible/rhacm-downstream-deploy-421.yml new file mode 100644 index 0000000..fe3b37f --- /dev/null +++ b/ansible/rhacm-downstream-deploy-421.yml @@ -0,0 +1,43 @@ +--- +# Playbook to install RHACM Downstream Build +# +# Example Usage: +# +# time ansible-playbook -i ansible/inventory/cloud30.local ansible/rhacm-downstream-deploy.yml +# + +- name: Setup downstream RHACM Mirror + hosts: bastion + vars_files: + - vars/all.yml + vars: + wait_for_machineconfigpool_update: false + roles: + - rhacm-hub-mirror + +# Konflux builds no longer require this workaround +- name: Fix ipv6/disconnected nodes /etc/containers/registries.conf for tag mirroring + hosts: fix_icsp_nodes + vars_files: + - vars/all.yml + roles: + - icsp-enable-tag-mirror + +- name: Install downstream RHACM + hosts: bastion + vars_files: + - vars/all.yml + roles: + - rhacm-downstream-deploy + - role: rhacm-observability + when: setup_rhacm_observability + - role: talm-deploy + when: setup_talm_operator or setup_talm_repo + - role: rhacm-ztp-patches-421 + when: setup_rhacm_ztp_patches + - role: rhacm-siteconfig-operator + when: acm_enable_siteconfig + - mce-assisted-installer + - role: mce-image-based-install + when: mce_enable_ibio + - mce-add-clusterimagesets diff --git a/ansible/rhacm-ztp-setup-421.yml b/ansible/rhacm-ztp-setup-421.yml new file mode 100644 index 0000000..ef6d7e5 --- /dev/null +++ b/ansible/rhacm-ztp-setup-421.yml @@ -0,0 +1,18 @@ +--- +# Playbook to setup ZTP for RHACM +# +# Example Usage: +# +# time ansible-playbook -i ansible/inventory/cloud30.local ansible/rhacm-ztp-setup.yml +# + +- name: Setup RHACM ZTP + hosts: bastion + vars_files: + - vars/all.yml + roles: + - rhacm-ztp-setup-421 + - role: telco-core-ztp + when: setup_core_ztp + - role: telco-ran-du-ztp-421 + when: setup_ran_du_ztp diff --git a/ansible/roles/rhacm-ztp-patches-421/tasks/main.yml b/ansible/roles/rhacm-ztp-patches-421/tasks/main.yml new file mode 100644 index 0000000..1466b0a --- /dev/null +++ b/ansible/roles/rhacm-ztp-patches-421/tasks/main.yml @@ -0,0 +1,39 @@ +--- +# rhacm-ztp-patches tasks + +- name: Apply the ArgoCD Deployment kustomization + shell: | + KUBECONFIG={{ hub_cluster_kubeconfig }} oc apply -k {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/deployment + +- name: Apply ArgoCD telco-reference argocd-openshift-gitops-patch.json + shell: | + KUBECONFIG={{ hub_cluster_kubeconfig }} oc patch argocd openshift-gitops -n openshift-gitops --patch-file {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/deployment/argocd-openshift-gitops-patch.json --type=merge + +- name: Wait for openshift-gitops-repo-server pod running + shell: | + KUBECONFIG={{ hub_cluster_kubeconfig }} oc get pods -n openshift-gitops -l app.kubernetes.io/name=openshift-gitops-repo-server -o jsonpath='{.items[0].status.phase}' + retries: 60 + delay: 2 + register: as_pod + until: as_pod.stdout == "Running" + +# Although we wait for the new repo-server pod to be running, we can still apply the cluster and policy applications too quickly +- name: Pause for 15s + pause: + seconds: 15 + +# View the resources with oc get argocd -n openshift-gitops openshift-gitops -o json | jq '.spec.redis.resources' +- name: Patch openshift-gitops redis memory requests/limits + when: gitops_redis_mem_patch + shell: | + KUBECONFIG={{ hub_cluster_kubeconfig }} oc patch argocd -n openshift-gitops openshift-gitops --type json -p '[{"op": "replace", "path": "/spec/redis/resources/limits/memory", "value": "8Gi"}, {"op": "replace", "path": "/spec/redis/resources/requests/memory", "value": "256Mi"}]' + +- name: Apply ArgoCD Cluster Applications + when: setup_ztp_cluster_applications + shell: | + KUBECONFIG={{ hub_cluster_kubeconfig }} oc apply -f {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster-applications/ + +- name: Apply the ArgoCD Policy Applications + when: setup_ztp_policy_application + shell: | + KUBECONFIG={{ hub_cluster_kubeconfig }} oc apply -f {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy-applications/ diff --git a/ansible/roles/rhacm-ztp-setup-421/defaults/main.yml b/ansible/roles/rhacm-ztp-setup-421/defaults/main.yml new file mode 100644 index 0000000..39776c8 --- /dev/null +++ b/ansible/roles/rhacm-ztp-setup-421/defaults/main.yml @@ -0,0 +1,18 @@ +--- +# rhacm-ztp-setup default vars + +# Base repo with branch to host both cluster and policy applications and data +telco_reference_repo: https://github.com/openshift-kni/telco-reference.git +telco_reference_branch: release-4.21 + +# Gogs service to commit repo to +gogs_host: "[fc00:1000::1]" +gogs_port: 10880 +gogs_username: testadmin +gogs_password: testadmin + +# The ztp-site-generator container image tag +ztp_site_generator_image_tag: v4.21.0-1 + +# RHACM Policy Generator Tag +rhacm_policy_generator_image_tag: v2.13.4-2 diff --git a/ansible/roles/rhacm-ztp-setup-421/tasks/main.yml b/ansible/roles/rhacm-ztp-setup-421/tasks/main.yml new file mode 100644 index 0000000..70af0d9 --- /dev/null +++ b/ansible/roles/rhacm-ztp-setup-421/tasks/main.yml @@ -0,0 +1,105 @@ +--- +# rhacm-ztp-setup tasks + +- name: Clear old telco-reference clone + file: + path: "{{ item }}" + state: absent + loop: + - "{{ install_directory }}/rhacm-ztp/telco-reference" + +- name: Create directories for rhacm-ztp + file: + path: "{{ item }}" + state: directory + loop: + - "{{ install_directory }}/rhacm-ztp" + - "{{ install_directory }}/rhacm-ztp/telco-reference" + +- name: Clone telco-reference + git: + repo: "{{ telco_reference_repo }}" + dest: "{{ install_directory }}/rhacm-ztp/telco-reference" + force: true + version: "{{ telco_reference_branch }}" + +# Perhaps we can detect if the repo exists already instead of ignoring errors +- name: Create telco-reference repo in gogs + uri: + url: "http://{{ gogs_host }}:{{ gogs_port }}/api/v1/admin/users/{{ gogs_username }}/repos" + user: "{{ gogs_username }}" + password: "{{ gogs_password }}" + force_basic_auth: true + method: POST + headers: + content-type: application/json + Accept: application/json + body: {"name": "telco-reference", "description": "test repo", "private": false} + body_format: json + validate_certs: no + status_code: 201 + return_content: yes + ignore_errors: true + +# The --force flag on the git push forces the gogs fork of the repo to be reset to the above "clone" +- name: Push telco-reference into gogs + shell: | + cd {{ install_directory }}/rhacm-ztp/telco-reference + git remote add origin-gogs http://{{ gogs_username }}:{{ gogs_password }}@{{ gogs_host }}:{{ gogs_port }}/testadmin/telco-reference.git + git push -u origin-gogs --all --force + + + +- name: Patches for telco-reference argocd-openshift-gitops-patch.json + replace: + path: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/deployment/argocd-openshift-gitops-patch.json" + replace: "{{ item.replace }}" + regexp: "{{ item.regexp }}" + when: item.enabled | default(true) + loop: + - replace: "{{ rhacm_disconnected_registry }}:{{ rhacm_disconnected_registry_port }}" + regexp: "quay.io" + enabled: "{{ rhacm_disconnected_registry and rhacm_disconnected_registry|length > 1 }}" + # - replace: "openshift-kni" + # regexp: "redhat_emp1" + # For rhacm policy generator release-4.14 branch + - replace: "{{ rhacm_disconnected_registry }}:{{ rhacm_disconnected_registry_port }}" + regexp: "registry.redhat.io" + enabled: "{{ rhacm_disconnected_registry and rhacm_disconnected_registry|length > 1 }}" + # - replace: "acm-d" + # regexp: "rhacm2" + # enabled: "{{ rhacm_disconnected_registry and rhacm_disconnected_registry|length > 1 }}" + # release-4.17 telco-reference + - replace: "{{ rhacm_policy_generator_image_tag }}" + regexp: "v2.11" + - replace: "{{ rhacm_policy_generator_image_tag }}" + regexp: "v2.7" + # release-4.11 telco-reference + - replace: "{{ ztp_site_generator_image_tag }}" + regexp: "latest" + # release-4.10 telco-reference + - replace: "{{ ztp_site_generator_image_tag }}" + regexp: "4.10.0" + +- name: Remove the cluster/policies app from telco-reference argocd deployment + file: + state: absent + path: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/deployment/{{ item }}" + loop: + - clusters-app.yaml + - policies-app.yaml + +- name: Remove clusters-app.yaml and policies-app.yaml from telco-reference argocd deployment kustomization.yaml + replace: + path: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/deployment/kustomization.yaml" + replace: "" + regexp: "{{ item }}" + loop: + - ".*- clusters-app.yaml" + - ".*- policies-app.yaml" + +- name: Commit and push ZTP initial configuration to telco-reference + shell: | + cd {{ install_directory }}/rhacm-ztp/telco-reference + git commit -a -m "Add common ZTP inital configuration" + git push origin-gogs diff --git a/ansible/roles/telco-ran-du-ztp-421/defaults/main.yml b/ansible/roles/telco-ran-du-ztp-421/defaults/main.yml new file mode 100644 index 0000000..a160461 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/defaults/main.yml @@ -0,0 +1,74 @@ +--- +# telco-ran-du-ztp default vars + +# Pre-creates manifests for the desired number of argocd cluster applications +cluster_applications_count: 40 + +siteconfigs_per_application: 100 + +siteconfigs_directories: +- /root/hv-vm/sno/ai-siteconfig +- /root/hv-vm/compact/ai-siteconfig +- /root/hv-vm/standard/ai-siteconfig + +##### +# Siteconfig extra-manifests to include: +##### +# Include the crun container runtime manifest with day-0 install via siteconfig extra-manifests directory +include_crun_extra_manifests: true +# Include a modified sync-time-once chronyd manifest with day-0 install instead of the ztp generator included version +# Used in conjunction with "siteconfig_exclude_sync_time_once" to prevent chronyd start and time drift issue +# See https://issues.redhat.com/browse/OCPBUGS-21740 +include_synctimeonce_extra_manifests: false +# Include a manifest to partition /var/lib/containers for IBU +include_varlibcontainers_partitioned_extra_manifests: false + +##### +# DU Profile Options +##### +# Currently can choose between 4.20, 4.19, 4.18, 4.17, 4.16, 4.15, 4.14, 4.13, and 4.12 +du_profile_version: 4.21 + +disconnected_operator_index_name: redhat/redhat-operator-index +operator_index_tag: v4.21 + +# Initialize the siteconfig list so it can be sorted later +siteconfig_list: [] + +# Adjust the name of the du profile catalog source so it does not conflict with default names +# https://bugzilla.redhat.com/show_bug.cgi?id=2074612 +common_catalogsource_name: rh-du-operators + +# These policy names were adjusted to increase the number of ztp generated policies to match as if performanceprofile +# was enabled. The original names are commented below. +group_policy_logforwarder_name: "config-log-policy" +group_policy_storage_name: "config-storage-policy" +# group_policy_logforwarder_name: "config-policy" +# group_policy_storage_name: "config-policy" + +# Image Based Upgrades requires the oadp operator to be installed on SNOs +include_oadp_operator: false +# Adjust URL to match your cluster's minio route +oadp_s3Url: http://minio-minio.apps.bm.example.com +s3_access_key_id: minio +s3_secret_access_key: minio123 + +# Image Based Upgrades requires the Lifecycle-agent operator to be installed on SNOs +include_lca_operator: false +# alpha for 4.15, stable for 4.16 and brew +lifecycle_agent_channel: stable +# v1alpha1 (alpha installed operator), v1 for brew (stable) +ibu_source_crs_apiversion: v1 + +# Only implemented for SNOs, organizes the CRs into 13-18 policies instead of the default of 5 policies +manyPolicies: false + +# When enabled, creates extra annotations in common PGT which use hub side templating +extraHubCommonTemplates: false +# When enabled, creates extra annotations in group PGT which use hub side templating +extraHubGroupTemplates: false +# When enabled, creates extra annotations from site specific ConfigMaps on hub in group PGT +extraHubSiteTemplates: false + +# When enabled use ACM PolicyGenerator instead of RAN PolicyGenTemplate to generate ACM policies +acm_policygenerator: false diff --git a/ansible/roles/telco-ran-du-ztp-421/tasks/Untitled b/ansible/roles/telco-ran-du-ztp-421/tasks/Untitled new file mode 100644 index 0000000..359e3d9 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/tasks/Untitled @@ -0,0 +1 @@ +source-crs \ No newline at end of file diff --git a/ansible/roles/telco-ran-du-ztp-421/tasks/main.yml b/ansible/roles/telco-ran-du-ztp-421/tasks/main.yml new file mode 100644 index 0000000..5c8cde3 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/tasks/main.yml @@ -0,0 +1,197 @@ +--- +# telco-ran-du-ztp tasks + +- name: Create cluster and policy directories for RAN DU ZTP + file: + path: "{{ item }}" + state: directory + loop: + - "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster-applications/" + - "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/" + - "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy-applications/" + - "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/" + - "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/" + - "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/source-crs" + +- name: Create override and extra-manifest files + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + loop: + - src: KlusterletAddonConfigOverride.yaml.j2 + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/KlusterletAddonConfigOverride.yaml" + - src: BareMetalHostOverride.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/BareMetalHostOverride.yaml" + - src: enable-crun-master.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/enable-crun-master.yaml" + - src: enable-crun-worker.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/enable-crun-worker.yaml" + - src: 98-var-lib-containers-partitioned.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/98-var-lib-containers-partitioned.yaml" + - src: 999-sync-time-once-master.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/999-sync-time-once-master.yaml" + - src: 999-sync-time-once-worker.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/999-sync-time-once-worker.yaml" + +- name: Create cluster application directories in telco-reference and copy files for each + shell: | + {% for item in range(1, cluster_applications_count + 1) %} + mkdir -p "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/ztp-clusters-{{ '%02d' | format(item) }}" + cp {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/BareMetalHostOverride.yaml {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/ztp-clusters-{{ '%02d' | format(item) }}/BareMetalHostOverride.yaml + cp {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/KlusterletAddonConfigOverride.yaml {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/ztp-clusters-{{ '%02d' | format(item) }}/KlusterletAddonConfigOverride.yaml + mkdir -p "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/ztp-clusters-{{ '%02d' | format(item) }}/extra-manifests" + {% if include_crun_extra_manifests %} + cp {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/enable-crun-master.yaml {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/ztp-clusters-{{ '%02d' | format(item) }}/extra-manifests/enable-crun-master.yaml + cp {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/enable-crun-worker.yaml {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/ztp-clusters-{{ '%02d' | format(item) }}/extra-manifests/enable-crun-worker.yaml + {% endif %} + {% if include_synctimeonce_extra_manifests %} + cp {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/999-sync-time-once-master.yaml {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/ztp-clusters-{{ '%02d' | format(item) }}/extra-manifests/999-sync-time-once-master.yaml + cp {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/999-sync-time-once-worker.yaml {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/ztp-clusters-{{ '%02d' | format(item) }}/extra-manifests/999-sync-time-once-worker.yaml + {% endif %} + {% if include_varlibcontainers_partitioned_extra_manifests %} + cp {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/98-var-lib-containers-partitioned.yaml {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/ztp-clusters-{{ '%02d' | format(item) }}/extra-manifests/98-var-lib-containers-partitioned.yaml + {% endif %} + {% endfor %} + +- name: Patch app-project.yaml for clusterinstances (4.15, 4.16) + blockinfile: + path: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/deployment/app-project.yaml" + insertafter: " kind: SiteConfig" + block: |2 + - group: 'siteconfig.open-cluster-management.io' + kind: ClusterInstance + marker: "# {mark} ACM-deploy-load appended" + when: du_profile_version is version('4.16', '<=') + +# ignore_errors because not all directories will exist +- name: Copy siteconfigs + copy: + src: "{{ item }}" + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/" + remote_src: true + loop: "{{ siteconfigs_directories }}" + ignore_errors: true + +- name: Get siteconfig manifest names + find: + paths: + - "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/ai-siteconfig" + use_regex: true + patterns: ['.*-siteconfig.yml'] + register: siteconfigs_find + +- name: Set siteconfig list + set_fact: + siteconfig_list: "{{ siteconfig_list + [item.path | basename | replace('-siteconfig.yml', '')] }}" + loop: "{{ siteconfigs_find.files }}" + +- name: Sort siteconfig list + set_fact: + siteconfig_list: "{{ siteconfig_list | sort }}" + +- name: Create RAN DU cluster kustomization files in telco-reference + template: + src: cluster-kustomization.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/ztp-clusters-{{ '%02d' | format(item) }}/kustomization.yaml" + loop: "{{ range(1, cluster_applications_count + 1) | list }}" + +# Similar to the above kustomization file template, every cluster app's directory must contain the actual siteconfig and not just a relative path "down" to the resource +- name: Loop through copying correct siteconfigs into each cluster application directory + shell: | + {% for sc in range((item - 1) * siteconfigs_per_application, item * siteconfigs_per_application ) %} + {% if siteconfig_list | length > sc %} + cp {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/ai-siteconfig/{{ siteconfig_list[sc] }}-siteconfig.yml {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/ztp-clusters-{{ '%02d' | format(item) }}/{{ siteconfig_list[sc] }}-siteconfig.yml + cp {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/ai-siteconfig/{{ siteconfig_list[sc] }}-resources.yml {{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster/ztp-clusters-{{ '%02d' | format(item) }}/{{ siteconfig_list[sc] }}-resources.yml + {% endif %} + {% endfor %} + loop: "{{ range(1, cluster_applications_count + 1) | list }}" + when: siteconfig_list | length > ((item - 1) * siteconfigs_per_application) + +- name: Create RAN DU cluster application files in telco-reference + template: + src: cluster-ns-application.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/cluster-applications/ztp-clusters-{{ '%02d' | format(item) }}.yaml" + loop: "{{ range(1, cluster_applications_count + 1) | list }}" + +- name: Copy files required when using PolicyGenerator + block: + - name: Copy source-crs when using PolicyGenerator + copy: + src: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/source-crs" + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/" + + - name: Copy schema.openapi when using PolicyGenerator + copy: + src: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/example/acmpolicygenerator/schema.openapi" + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/" + when: acm_policygenerator + +- name: Template the DU Profile policy manifests into telco-reference + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + loop: + - src: "{{ acm_policygenerator | ternary('pg','pgt') }}-du-{{ du_profile_version }}/common-ranGen.yaml" + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/common-ranGen.yaml" + - src: "{{ acm_policygenerator | ternary('pg','pgt') }}-du-{{ du_profile_version }}/common-mno-ranGen.yaml" + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/common-mno-ranGen.yaml" + - src: "{{ acm_policygenerator | ternary('pg','pgt') }}-du-{{ du_profile_version }}/group-du-sno-ranGen.yaml" + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/group-du-sno-ranGen.yaml" + - src: "{{ acm_policygenerator | ternary('pg','pgt') }}-du-{{ du_profile_version }}/group-du-sno-validator-ranGen.yaml" + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/group-du-sno-validator-ranGen.yaml" + - src: sno-site.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/sno-site.yaml" + - src: "{{ acm_policygenerator | ternary('pg','pgt') }}-du-{{ du_profile_version }}/group-du-3node-ranGen.yaml" + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/group-du-3node-ranGen.yaml" + - src: "{{ acm_policygenerator | ternary('pg','pgt') }}-du-{{ du_profile_version }}/group-du-3node-validator-ranGen.yaml" + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/group-du-3node-validator-ranGen.yaml" + - src: "{{ acm_policygenerator | ternary('pg','pgt') }}-du-{{ du_profile_version }}/group-du-standard-ranGen.yaml" + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/group-du-standard-ranGen.yaml" + - src: "{{ acm_policygenerator | ternary('pg','pgt') }}-du-{{ du_profile_version }}/group-du-standard-validator-ranGen.yaml" + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/group-du-standard-validator-ranGen.yaml" + - src: policy-kustomization.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/kustomization.yaml" + - src: ns.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/ns.yaml" + - src: ns-upgrade.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/ns-upgrade.yaml" + - src: policy-common-and-group.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy-applications/common-and-group.yaml" + - src: hub-template-maps.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/hub-template-maps.yaml" + - src: source-crs/OadpSubscriptionNS.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/source-crs/OadpSubscriptionNS.yaml" + - src: source-crs/OadpSubscriptionOperGroup.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/source-crs/OadpSubscriptionOperGroup.yaml" + - src: source-crs/OadpSubscription.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/source-crs/OadpSubscription.yaml" + - src: source-crs/OadpOperatorStatus.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/source-crs/OadpOperatorStatus.yaml" + - src: source-crs/OadpSecret.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/source-crs/OadpSecret.yaml" + - src: source-crs/OadpCm.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/source-crs/OadpCm.yaml" + - src: source-crs/DataProtectionApplication.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/source-crs/DataProtectionApplication.yaml" + - src: source-crs/OadpBackupStorageLocationStatus.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/source-crs/OadpBackupStorageLocationStatus.yaml" + - src: source-crs/LcaSubscriptionNS.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/source-crs/LcaSubscriptionNS.yaml" + - src: source-crs/LcaSubscriptionOperGroup.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/source-crs/LcaSubscriptionOperGroup.yaml" + - src: source-crs/LcaSubscription.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/source-crs/LcaSubscription.yaml" + - src: source-crs/LcaOperatorStatus.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/source-crs/LcaOperatorStatus.yaml" + - src: source-crs/LcaSecret.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/source-crs/LcaSecret.yaml" + - src: source-crs/ImageBasedUpgrade.yaml + dest: "{{ install_directory }}/rhacm-ztp/telco-reference/telco-ran/configuration/argocd/policy/common-and-group/source-crs/ImageBasedUpgrade.yaml" + +- name: Commit and push RAN DU ZTP to telco-reference + shell: | + cd {{ install_directory }}/rhacm-ztp/telco-reference + git add telco-ran/configuration/argocd/cluster/ telco-ran/configuration/argocd/cluster-applications/ + git add telco-ran/configuration/argocd/policy/ telco-ran/configuration/argocd/policy-applications/ + git commit -a -m "Add RAN DU SiteConfigs, Manifests, and Policies" + git push origin-gogs diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/98-var-lib-containers-partitioned.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/98-var-lib-containers-partitioned.yaml new file mode 100644 index 0000000..78f0727 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/98-var-lib-containers-partitioned.yaml @@ -0,0 +1,46 @@ +### https://cloud.redhat.com/blog/a-guide-to-creating-a-separate-disk-partition-at-installation-time +### https://docs.openshift.com/container-platform/4.13/installing/installing_vsphere/installing-vsphere.html#installation-disk-partitioning_installing-vsphere +apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + labels: + machineconfiguration.openshift.io/role: master + name: 98-var-lib-containers-partitioned +spec: + config: + ignition: + version: 3.2.0 + storage: + disks: + - device: /dev/sda + partitions: + - label: var-lib-containers + startMiB: 50000 # Leave room for rootfs + sizeMiB: 0 # Use available space + filesystems: + - device: /dev/disk/by-partlabel/var-lib-containers + format: xfs + mountOptions: + - defaults + - prjquota + path: /var/lib/containers + wipeFilesystem: true + systemd: + units: + - contents: |- + # Generated by Butane + [Unit] + Before=local-fs.target + Requires=systemd-fsck@dev-disk-by\x2dpartlabel-var\x2dlib\x2dcontainers.service + After=systemd-fsck@dev-disk-by\x2dpartlabel-var\x2dlib\x2dcontainers.service + + [Mount] + Where=/var/lib/containers + What=/dev/disk/by-partlabel/var-lib-containers + Type=xfs + Options=defaults,prjquota + + [Install] + RequiredBy=local-fs.target + enabled: true + name: var-lib-containers.mount diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/999-sync-time-once-master.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/999-sync-time-once-master.yaml new file mode 100644 index 0000000..61bab7e --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/999-sync-time-once-master.yaml @@ -0,0 +1,26 @@ +apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + labels: + machineconfiguration.openshift.io/role: master + name: 999-sync-time-once-master +spec: + config: + ignition: + version: 3.2.0 + systemd: + units: + - contents: | + [Unit] + Description=Sync time once + After=network.service + [Service] + Type=oneshot + TimeoutStartSec=300 + ExecCondition=/bin/bash -c 'systemctl is-enabled chronyd.service --quiet && exit 1 || exit 0' + ExecStart=/usr/sbin/chronyd -n -f /etc/chrony.conf -q + RemainAfterExit=yes + [Install] + WantedBy=multi-user.target + enabled: true + name: sync-time-once.service diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/999-sync-time-once-worker.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/999-sync-time-once-worker.yaml new file mode 100644 index 0000000..842da72 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/999-sync-time-once-worker.yaml @@ -0,0 +1,26 @@ +apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + labels: + machineconfiguration.openshift.io/role: worker + name: 999-sync-time-once-worker +spec: + config: + ignition: + version: 3.2.0 + systemd: + units: + - contents: | + [Unit] + Description=Sync time once + After=network.service + [Service] + Type=oneshot + TimeoutStartSec=300 + ExecCondition=/bin/bash -c 'systemctl is-enabled chronyd.service --quiet && exit 1 || exit 0' + ExecStart=/usr/sbin/chronyd -n -f /etc/chrony.conf -q + RemainAfterExit=yes + [Install] + WantedBy=multi-user.target + enabled: true + name: sync-time-once.service diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/BareMetalHostOverride.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/BareMetalHostOverride.yaml new file mode 100644 index 0000000..0d7ecb2 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/BareMetalHostOverride.yaml @@ -0,0 +1,24 @@ +apiVersion: metal3.io/v1alpha1 +kind: BareMetalHost +metadata: + name: "{% raw %}{{ .Node.HostName }}{% endraw %}" + namespace: "{% raw %}{{ .Cluster.ClusterName }}{% endraw %}" + annotations: + argocd.argoproj.io/sync-wave: "1" + bmac.agent-install.openshift.io/hostname: "{% raw %}{{ .Node.HostName }}{% endraw %}" + bmac.agent-install.openshift.io/installer-args: "{% raw %}{{ .Node.InstallerArgs }}{% endraw %}" + bmac.agent-install.openshift.io/ignition-config-overrides: "{% raw %}{{ .Node.IgnitionConfigOverride }}{% endraw %}" + bmac.agent-install.openshift.io/role: "{% raw %}{{ .Node.Role }}{% endraw %}" + labels: + infraenvs.agent-install.openshift.io: "{% raw %}{{ .Cluster.ClusterName }}{% endraw %}" +spec: + bootMode: "{% raw %}{{ .Node.BootMode }}{% endraw %}" + bmc: + address: "{% raw %}{{ .Node.BmcAddress }}{% endraw %}" + disableCertificateVerification: true + credentialsName: "{% raw %}{{ .Node.BmcCredentialsName.Name }}{% endraw %}" + bootMACAddress: "{% raw %}{{ .Node.BootMACAddress }}{% endraw %}" + automatedCleaningMode: disabled + online: true + rootDeviceHints: "{% raw %}{{ .Node.RootDeviceHints }}{% endraw %}" + userData: "{% raw %}{{ .Node.UserData }}{% endraw %}" diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/KlusterletAddonConfigOverride.yaml.j2 b/ansible/roles/telco-ran-du-ztp-421/templates/KlusterletAddonConfigOverride.yaml.j2 new file mode 100644 index 0000000..5b3f32e --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/KlusterletAddonConfigOverride.yaml.j2 @@ -0,0 +1,25 @@ +apiVersion: agent.open-cluster-management.io/v1 +kind: KlusterletAddonConfig +metadata: + annotations: + argocd.argoproj.io/sync-wave: "2" + name: "{% raw %}{{ .Cluster.ClusterName }}{% endraw %}" + namespace: "{% raw %}{{ .Cluster.ClusterName }}{% endraw %}" +spec: + clusterName: "{% raw %}{{ .Cluster.ClusterName }}{% endraw %}" + clusterNamespace: "{% raw %}{{ .Cluster.ClusterName }}{% endraw %}" + applicationManager: + enabled: false + certPolicyController: + enabled: false + iamPolicyController: + enabled: false + policyController: + enabled: true + searchCollector: + enabled: {{ setup_ztp_searchcollector_enabled }} + +# Note: This override replaces the entire KlusterletAddonConfig, and so must be +# provided in full. The templated portions will be substituted to match the +# cluster as defined in the SiteConfig, so it should be sufficient to adjust +# the 'enabled' flags to match the desired configuration. diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/cluster-kustomization.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/cluster-kustomization.yaml new file mode 100644 index 0000000..e747bb2 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/cluster-kustomization.yaml @@ -0,0 +1,15 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +generators: +{% for sc in range((item - 1) * siteconfigs_per_application, item * siteconfigs_per_application ) %} +{% if siteconfig_list | length > sc %} +#- ./{{ siteconfig_list[sc] }}-siteconfig.yml +{% endif %} +{% endfor %} + +resources: +{% for sc in range((item - 1) * siteconfigs_per_application, item * siteconfigs_per_application ) %} +{% if siteconfig_list | length > sc %} +#- ./{{ siteconfig_list[sc] }}-resources.yml +{% endif %} +{% endfor %} diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/cluster-ns-application.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/cluster-ns-application.yaml new file mode 100644 index 0000000..b5186e9 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/cluster-ns-application.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ztp-clusters-{{ '%02d' | format(item) }} +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: ztp-clusters-{{ '%02d' | format(item) }} + namespace: openshift-gitops +spec: + destination: + server: https://kubernetes.default.svc + namespace: ztp-clusters-{{ '%02d' | format(item) }} + project: ztp-app-project + source: + path: telco-ran/configuration/argocd/cluster/ztp-clusters-{{ '%02d' | format(item) }} + repoURL: http://{{ gogs_username }}:{{ gogs_password }}@{{ gogs_host }}:{{ gogs_port }}/testadmin/telco-reference.git + targetRevision: {{ telco_reference_branch }} + ignoreDifferences: # recommended way to allow ACM controller to manage its fields. alternative approach documented below (1) + - group: cluster.open-cluster-management.io + kind: ManagedCluster + managedFieldsManagers: + - controller + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - PrunePropagationPolicy=background + - RespectIgnoreDifferences=true diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/enable-crun-master.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/enable-crun-master.yaml new file mode 100644 index 0000000..8d88653 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/enable-crun-master.yaml @@ -0,0 +1,10 @@ +apiVersion: machineconfiguration.openshift.io/v1 +kind: ContainerRuntimeConfig +metadata: + name: enable-crun-master +spec: + machineConfigPoolSelector: + matchLabels: + pools.operator.machineconfiguration.openshift.io/master: "" + containerRuntimeConfig: + defaultRuntime: crun diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/enable-crun-worker.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/enable-crun-worker.yaml new file mode 100644 index 0000000..d10bd86 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/enable-crun-worker.yaml @@ -0,0 +1,10 @@ +apiVersion: machineconfiguration.openshift.io/v1 +kind: ContainerRuntimeConfig +metadata: + name: enable-crun-worker +spec: + machineConfigPoolSelector: + matchLabels: + pools.operator.machineconfiguration.openshift.io/worker: "" + containerRuntimeConfig: + defaultRuntime: crun diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/hub-template-maps.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/hub-template-maps.yaml new file mode 100644 index 0000000..97c7b89 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/hub-template-maps.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: common-template-map + namespace: ztp-common +data: + key1: "value1" + key2: "value2" + key3: "value3" + key4: "value4" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: group-template-map + namespace: ztp-group +data: + key1: "group-value1" + key2: "group-value2" + key3: "group-value3" + key4: "group-value4" diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/ns-upgrade.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/ns-upgrade.yaml new file mode 100644 index 0000000..fbdf17d --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/ns-upgrade.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ztp-group-du-sno +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ztp-platform-upgrade-prep +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ztp-platform-upgrade +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ztp-operator-upgrade-prep +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ztp-operator-upgrade diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/ns.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/ns.yaml new file mode 100644 index 0000000..b4a4690 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/ns.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ztp-common +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ztp-group +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ztp-site +{% if acm_policygenerator %} +--- +apiVersion: cluster.open-cluster-management.io/v1beta2 +kind: ManagedClusterSetBinding +metadata: + name: global + namespace: ztp-common +spec: + clusterSet: global +--- +apiVersion: cluster.open-cluster-management.io/v1beta2 +kind: ManagedClusterSetBinding +metadata: + name: global + namespace: ztp-group +spec: + clusterSet: global +--- +apiVersion: cluster.open-cluster-management.io/v1beta2 +kind: ManagedClusterSetBinding +metadata: + name: global + namespace: ztp-site +spec: + clusterSet: global +{% endif %} diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/common-mno-ranGen.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/common-mno-ranGen.yaml new file mode 100644 index 0000000..b6f60dc --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/common-mno-ranGen.yaml @@ -0,0 +1,18 @@ +# For MNO(multi node cluster), both common-ranGen.yaml and common-mno-ranGen.yaml needs to be applied +# For SNO, please avoid adding common-mno-ranGen.yaml +--- +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + name: "common-mno-latest" + namespace: "ztp-common" +spec: + bindingRules: + # These policies will correspond to all clusters with this label: + common: "true" + common-mno: "true" + #du-profile: "latest" + sourceFiles: + # Create operators policies that will be installed in all clusters + - fileName: OperatorHub.yaml + policyName: "config-policy" diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/common-ranGen.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/common-ranGen.yaml new file mode 100644 index 0000000..a5b9f34 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/common-ranGen.yaml @@ -0,0 +1,210 @@ +--- +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + name: "common-latest" + namespace: "ztp-common" +spec: + bindingRules: + # These policies will correspond to all clusters with this label: + common: "true" + # du-profile: "latest" + sourceFiles: + # Create operators policies that will be installed in all clusters + - fileName: sriov-operator/SriovSubscriptionNS.yaml + policyName: "{{ 'sriov-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: sriov-operator/SriovSubscriptionOperGroup.yaml + policyName: "{{ 'sriov-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: sriov-operator/SriovSubscription.yaml + policyName: "{{ 'sriov-subs' if manyPolicies else 'subscriptions' }}-policy" +{% if extraHubCommonTemplates %} + metadata: + annotations: + {%- raw %} + scale-test-label-1: '{{hub fromConfigMap "" "common-template-map" "key1" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "common-template-map" "key2" hub}}' + {%- endraw +%} +{% endif %} +{% if rhacm_disconnected_registry and rhacm_disconnected_registry|length > 1 %} + spec: + source: {{ common_catalogsource_name }} +{% endif %} + - fileName: sriov-operator/SriovOperatorStatus.yaml + policyName: "{{ 'sriov-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: ptp-operator/PtpSubscriptionNS.yaml + policyName: "{{ 'ptp-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: ptp-operator/PtpSubscriptionOperGroup.yaml + policyName: "{{ 'ptp-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: ptp-operator/PtpSubscription.yaml + policyName: "{{ 'ptp-subs' if manyPolicies else 'subscriptions' }}-policy" +{% if extraHubCommonTemplates %} + metadata: + annotations: + {%- raw %} + scale-test-label-1: '{{hub fromConfigMap "" "common-template-map" "key3" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "common-template-map" "key4" hub}}' + {%- endraw +%} +{% endif %} +{% if rhacm_disconnected_registry and rhacm_disconnected_registry|length > 1 %} + spec: + source: {{ common_catalogsource_name }} +{% endif %} + - fileName: ptp-operator/PtpOperatorStatus.yaml + policyName: "{{ 'ptp-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: cluster-logging/ClusterLogNS.yaml + policyName: "{{ 'log-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: cluster-logging/ClusterLogOperGroup.yaml + policyName: "{{ 'log-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: cluster-logging/ClusterLogSubscription.yaml + policyName: "{{ 'log-subs' if manyPolicies else 'subscriptions' }}-policy" +{% if extraHubCommonTemplates %} + metadata: + annotations: + {%- raw %} + scale-test-label-1: '{{hub fromConfigMap "" "common-template-map" "key3" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "common-template-map" "key4" hub}}' + {%- endraw +%} +{% endif %} +{% if rhacm_disconnected_registry and rhacm_disconnected_registry|length > 1 %} + spec: + source: {{ common_catalogsource_name }} + channel: stable-6.4 +{% endif %} + - fileName: cluster-logging/ClusterLogOperatorStatus.yaml + policyName: "{{ 'log-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: cluster-logging/ClusterLogServiceAccount.yaml + policyName: "{{ 'log-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: cluster-logging/ClusterLogServiceAccountAuditBinding.yaml + policyName: "{{ 'log-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: cluster-logging/ClusterLogServiceAccountInfrastructureBinding.yaml + policyName: "{{ 'log-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: storage-lso/StorageNS.yaml + policyName: "{{ 'storage-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: storage-lso/StorageOperGroup.yaml + policyName: "{{ 'storage-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: storage-lso/StorageSubscription.yaml + policyName: "{{ 'storage-subs' if manyPolicies else 'subscriptions' }}-policy" +{% if extraHubCommonTemplates %} + metadata: + annotations: + {%- raw %} + scale-test-label-1: '{{hub fromConfigMap "" "common-template-map" "key1" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "common-template-map" "key2" hub}}' + {%- endraw +%} +{% endif %} +{% if rhacm_disconnected_registry and rhacm_disconnected_registry|length > 1 %} + spec: + source: {{ common_catalogsource_name }} +{% endif %} + - fileName: storage-lso/StorageOperatorStatus.yaml + policyName: "{{ 'storage-subs' if manyPolicies else 'subscriptions' }}-policy" + # - fileName: AmqSubscriptionNS.yaml + # policyName: "subscriptions-policy" + # - fileName: AmqSubscriptionOperGroup.yaml + # policyName: "subscriptions-policy" + # - fileName: AmqSubscription.yaml + # policyName: "subscriptions-policy" + # + # LCA operator is used for orchestrating Image Based Upgrade for SNO +{% if include_lca_operator %} + - fileName: lca/LcaSubscriptionNS.yaml + policyName: "{{ 'lca-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: lca/LcaSubscriptionOperGroup.yaml + policyName: "{{ 'lca-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: lca/LcaSubscription.yaml + policyName: "{{ 'lca-subs' if manyPolicies else 'subscriptions' }}-policy" + spec: + channel: {{ lifecycle_agent_channel }} +{% if rhacm_disconnected_registry and rhacm_disconnected_registry|length > 1 %} + source: {{ common_catalogsource_name }} +{% endif %} + - fileName: lca/LcaOperatorStatus.yaml + policyName: "{{ 'lca-subs' if manyPolicies else 'subscriptions' }}-policy" + +{% else %} + # - fileName: LcaSubscriptionNS.yaml + # policyName: "subscriptions-policy" + # - fileName: LcaSubscriptionOperGroup.yaml + # policyName: "subscriptions-policy" + # - fileName: LcaSubscription.yaml + # policyName: "subscriptions-policy" + # - fileName: LcaOperatorStatus.yaml + # policyName: "subscriptions-policy" + # +{% endif %} + # OADP operator is used for backing up and restoring application during Image Based Upgrade +{% if include_oadp_operator %} + - fileName: data-protection/OadpSubscriptionNS.yaml + policyName: "{{ 'oadp-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: data-protection/OadpSubscriptionOperGroup.yaml + policyName: "{{ 'oadp-subs' if manyPolicies else 'subscriptions' }}-policy" + - fileName: data-protection/OadpSubscription.yaml + policyName: "{{ 'oadp-subs' if manyPolicies else 'subscriptions' }}-policy" +{% if rhacm_disconnected_registry and rhacm_disconnected_registry|length > 1 %} + spec: + source: {{ common_catalogsource_name }} +{% endif %} + - fileName: data-protection/OadpOperatorStatus.yaml + policyName: "{{ 'oadp-subs' if manyPolicies else 'subscriptions' }}-policy" + +{% else %} + # - fileName: OadpSubscriptionNS.yaml + # policyName: "subscriptions-policy" + # - fileName: OadpSubscriptionOperGroup.yaml + # policyName: "subscriptions-policy" + # - fileName: OadpSubscription.yaml + # policyName: "subscriptions-policy" + # - fileName: OadpOperatorStatus.yaml + # policyName: "subscriptions-policy" + # +{% endif %} + - fileName: cluster-tuning/monitoring-configuration/ReduceMonitoringFootprint.yaml + policyName: "{{ 'monitoring-config' if manyPolicies else 'config' }}-policy" +{% if rhacm_disconnected_registry and rhacm_disconnected_registry|length > 1 %} + # + # These CRs are in support of installation from a disconnected registry + # + - fileName: disconnected-registry/DefaultCatsrc.yaml + policyName: "config-policy" + # The Subscriptions all point to redhat-operators-disconnected. The OperatorHub CR + # disables the defaults and this CR replaces redhat-operators-disconnected with a + # CatalogSource pointing to the disconnected registry. Including both of + # these in the same policy orders their application to the cluster. + # Tip: for RH sources `image: registry.redhat.io/redhat/redhat-operator-index:v4.xx` + metadata: + name: {{ common_catalogsource_name }} +{% if extraHubCommonTemplates %} + annotations: + {%- raw %} + scale-test-label-1: '{{hub fromConfigMap "" "common-template-map" "key3" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "common-template-map" "key4" hub}}' + {%- endraw +%} +{% endif %} + labels: + lca.openshift.io/target-ocp-version: "4.21.0" + spec: + displayName: disconnected-redhat-operators + image: {{ rhacm_disconnected_registry }}:{{ rhacm_disconnected_registry_port }}/{{ disconnected_operator_index_name }}:{{ operator_index_tag }} + status: + connectionState: + lastObservedState: READY + - fileName: disconnected-registry/DisconnectedIDMS.yaml + policyName: "config-policy" + spec: + imageDigestMirrors: + - mirrors: + - {{ rhacm_disconnected_registry }}:{{ rhacm_disconnected_registry_port }} + source: registry.redhat.io + - mirrors: + - {{ rhacm_disconnected_registry }}:{{ rhacm_disconnected_registry_port }} + source: brew.registry.redhat.io + - mirrors: + - {{ rhacm_disconnected_registry }}:{{ rhacm_disconnected_registry_port }} + source: registry-proxy.engineering.redhat.com + - mirrors: + - {{ rhacm_disconnected_registry }}:{{ rhacm_disconnected_registry_port }}/ocp4/openshift4 + source: quay.io/openshift-release-dev/ocp-v4.0-art-dev + - mirrors: + - {{ rhacm_disconnected_registry }}:{{ rhacm_disconnected_registry_port }}/ocp4/openshift4 + source: quay.io/openshift-release-dev/ocp-release +{% endif %} diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-3node-ranGen.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-3node-ranGen.yaml new file mode 100644 index 0000000..72fe559 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-3node-ranGen.yaml @@ -0,0 +1,155 @@ +--- +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + name: "group-du-3node-latest" + namespace: "ztp-group" +spec: + bindingRules: + # These policies will correspond to all clusters with this label: + group-du-3node: "" + # du-profile: "latest" + # Because 3-node clusters are both workers and masters, and the MCP pool for master binds more strongly than that for worker, + # the Performance Profile needs to be set up to apply to the master MCP: + mcp: "master" + sourceFiles: + - fileName: ptp-operator/configuration/PtpConfigSlave.yaml # Change to PtpConfigSlaveCvl.yaml for ColumbiaVille NIC + policyName: "{{ 'ptp-config' if manyPolicies else 'config' }}-policy" + metadata: + name: "du-ptp-slave" +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} + annotations: +{%- if extraHubGroupTemplates %} + {% raw %} + scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} + site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' + site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} + spec: + profile: + - name: "slave" + # This interface must match the hardware in this group + interface: "ens5f0" + ptp4lOpts: "-2 -s --summary_interval -4" + phc2sysOpts: "-a -r -n 24" + - fileName: sriov-operator/SriovOperatorConfig.yaml + policyName: "{{ 'sriov-config' if manyPolicies else 'config' }}-policy" +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} + metadata: + annotations: +{%- if extraHubGroupTemplates %} + {% raw %} + scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} + site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' + site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} +{% if setup_ztp_enable_performanceprofile %} + - fileName: node-tuning-operator/x86_64/PerformanceProfile.yaml + policyName: "{{ 'tuning-config' if manyPolicies else 'config' }}-policy" + spec: + cpu: + # These must be tailored for the specific hardware platform + isolated: "2-19,22-39" + reserved: "0-1,20-21" + hugepages: + defaultHugepagesSize: 1G + pages: + - size: 1G + count: 32 + - fileName: node-tuning-operator/TunedPerformancePatch.yaml + policyName: "{{ 'tuning-config' if manyPolicies else 'config' }}-policy" +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} + metadata: + annotations: +{%- if extraHubGroupTemplates %} + {% raw %} + scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} + site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' + site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} +{% else %} +# - fileName: PerformanceProfile.yaml +# policyName: "{{ 'tuning-config' if manyPolicies else 'config' }}-policy" +# spec: +# cpu: +# # These must be tailored for the specific hardware platform +# isolated: "2-19,22-39" +# reserved: "0-1,20-21" +# hugepages: +# defaultHugepagesSize: 1G +# pages: +# - size: 1G +# count: 32 +# - fileName: TunedPerformancePatch.yaml +# policyName: "{{ 'tuning-config' if manyPolicies else 'config' }}-policy" +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} +# metadata: +# annotations: +{%- if extraHubGroupTemplates %} + {% raw %} +# scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' +# scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} +# site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' +# site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} +{% endif %} + # + # These CRs are to enable crun on master and worker nodes for 4.13+ only + # + # Include these CRs in the group PGT instead of the common PGT to make sure + # they are applied after the operators have been successfully installed, + # however, it's strongly recommended to include these CRs as day-0 extra manifests + # to avoid the risky of an extra reboot. + - fileName: optional-extra-manifest/enable-crun-master.yaml + policyName: "config-policy" +{% if manyPolicies %} + metadata: + annotations: + ran.openshift.io/ztp-deploy-wave: "10" +{% endif %} + - fileName: optional-extra-manifest/enable-crun-worker.yaml + policyName: "config-policy" +{% if manyPolicies %} + metadata: + annotations: + ran.openshift.io/ztp-deploy-wave: "10" +{% endif %} + # + # NMState operator is used for IPsec configuration with NMState + # - fileName: NMStateSubscriptionNS.yaml + # policyName: "subscriptions-policy" + # - fileName: NMStateSubscriptionOperGroup.yaml + # policyName: "subscriptions-policy" + # - fileName: NMStateSubscription.yaml + # policyName: "subscriptions-policy" + # - fileName: NMStateOperatorStatus.yaml + # policyName: "subscriptions-policy" + # - fileName: NMState.yaml + # policyName: "subscriptions-policy" diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-3node-validator-ranGen.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-3node-validator-ranGen.yaml new file mode 100644 index 0000000..a605061 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-3node-validator-ranGen.yaml @@ -0,0 +1,28 @@ +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + # The name will be used to generate the placementBinding name as {name}-placementBinding, the placementRule name as {name}-placementRule, + # and the policy name as {name}-{spec.sourceFiles[x].policyName} + name: "group-du-3node-validator-latest" + namespace: "ztp-group" +spec: + # This policy will correspond to all clusters with label specified in bindingRules and + # without label specified in bindingExcludedRules. + bindingRules: + group-du-3node: "" + # du-profile: "latest" + bindingExcludedRules: + # The ztp-done label is used in coordination with the Topology Aware Lifecycle Operator(TALO). + # Please do not change this label. + ztp-done: "" + mcp: "master" + sourceFiles: + # Create inform policy to validate configuration CRs that will be applied to all 3-node clusters + - fileName: validatorCRs/informDuValidator.yaml + remediationAction: inform + policyName: "du-policy" + + # This low setting is only valid if the validation policy is disconnected from the cluster at steady-state + # using a bindingExcludeRules entry with ztp-done + evaluationInterval: + compliant: 5s diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-sno-ranGen.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-sno-ranGen.yaml new file mode 100644 index 0000000..5246a4e --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-sno-ranGen.yaml @@ -0,0 +1,355 @@ +--- +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + # The name will be used to generate the placementBinding and placementRule names as {name}-placementBinding and {name}-placementRule + name: "group-du-sno-latest" + namespace: "ztp-group" +spec: + bindingRules: + # These policies will correspond to all clusters with this label: + group-du-sno: "" + # du-profile: "latest" + mcp: "master" + sourceFiles: +{% if include_oadp_operator %} + - fileName: data-protection/OadpSecret.yaml + policyName: "{{ 'oadp-config' if manyPolicies else 'config' }}-policy" + data: + cloud: {{ lookup('template', './s3-creds.j2') | b64encode }} + - fileName: DataProtectionApplication.yaml + policyName: "{{ 'oadp-config' if manyPolicies else 'config' }}-policy" + spec: + backupLocations: + - velero: + config: + region: minio + s3Url: {{ oadp_s3Url }} + objectStorage: + bucket: '{% raw %}{{hub (printf "%s" .ManagedClusterName) hub}}{% endraw %}-ibu' + - fileName: OadpBackupStorageLocationStatus.yaml + policyName: "{{ 'oadp-config' if manyPolicies else 'config' }}-policy" + - fileName: OadpCm.yaml + policyName: "config-policy" +{% endif %} +{% if include_lca_operator %} + - fileName: LcaSecret.yaml + policyName: "{{ 'lca-config' if manyPolicies else 'config' }}-policy" + data: + seedAuth: {{ lookup('template', './seedgen-auth.j2') | to_json | b64encode }} + .dockerconfigjson: {{ lookup('template', './seedgen-auth.j2') | to_json | b64encode }} +{% endif %} + - fileName: cluster-tuning/DisableOLMPprof.yaml + policyName: "{{ 'olm-config' if manyPolicies else 'config' }}-policy" +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} + metadata: + annotations: +{%- if extraHubGroupTemplates %} + {% raw %} + scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} + site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' + site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} + # Set ClusterLogForwarder & ClusterLogging as example might be better to create another policyTemp-Group + - fileName: cluster-logging/ClusterLogForwarder.yaml + policyName: "{{ group_policy_logforwarder_name }}" +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} + metadata: + annotations: +{%- if extraHubGroupTemplates %} + {% raw %} + scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} + site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' + site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} + spec: + outputs: + - type: "kafka" + name: kafka-open + # below url is an example + kafka: + url: tcp://10.46.55.190:9092/test + filters: + - name: test-labels + type: openshiftLabels + openshiftLabels: + label1: test1 + label2: test2 + label3: test3 + label4: test4 + pipelines: + - name: all-to-default + inputRefs: + - audit + - infrastructure + filterRefs: + - test-labels + outputRefs: + - kafka-open + # The setting below overrides the default "worker" selector predefined in + # the source-crs. The change is recommended on SNOs configured with PTP + # event notification for forward compatibility with possible SNO expansion. + # When the default setting is left intact, then in case of an SNO + # expansion with one or more workers, PTP operator + # would not create linuxptp-daemon containers on the worker node(s). Any + # attempt to change the daemonsetNodeSelector will result in ptp daemon + # restart and time synchronization loss. + # After complying with the policy, complianceType can be set to a safer "musthave" + # - fileName: PtpOperatorConfigForEvent.yaml + # policyName: "config-policy" + # complianceType: mustonlyhave + # spec: + # daemonNodeSelector: + # node-role.kubernetes.io/worker: "" + + - fileName: ptp-operator/configuration/PtpConfigSlave.yaml # Change to PtpConfigSlaveCvl.yaml for ColumbiaVille NIC + policyName: "{{ 'ptp-config' if manyPolicies else 'config' }}-policy" + metadata: + name: "du-ptp-slave" +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} + annotations: +{%- if extraHubGroupTemplates %} + {% raw %} + scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} + site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' + site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} + spec: + profile: + - name: "slave" + # This interface must match the hardware in this group + interface: "ens5f0" + ptp4lOpts: "-2 -s --summary_interval -4" + phc2sysOpts: "-a -r -n 24" + - fileName: sriov-operator/SriovOperatorConfigForSNO.yaml + policyName: "{{ 'sriov-config' if manyPolicies else 'config' }}-policy" + # For existing clusters with node selector set as "master", + # change the complianceType to "mustonlyhave". + # After complying with the policy, the complianceType can + # be reverted to "musthave" + complianceType: musthave +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} + metadata: + annotations: +{%- if extraHubGroupTemplates %} + {% raw %} + scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} + site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' + site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} + spec: + configDaemonNodeSelector: + node-role.kubernetes.io/worker: "" + - fileName: storage/StorageLV.yaml + policyName: "{{ group_policy_storage_name }}" +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} + metadata: + annotations: +{%- if extraHubGroupTemplates %} + {% raw %} + scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} + site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' + site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} + spec: + storageClassDevices: + - storageClassName: "example-storage-class-1" + volumeMode: Filesystem + fsType: xfs + devicePaths: + - /dev/disk/by-path/pci-0000:88:00.0-nvme-1 + - storageClassName: "example-storage-class-2" + volumeMode: Filesystem + fsType: xfs + devicePaths: + - /dev/disk/by-path/pci-0000:89:00.0-nvme-1 + - fileName: cluster-tuning/disabling-network-diagnostics/DisableSnoNetworkDiag.yaml + policyName: "{{ 'network-config' if manyPolicies else 'config' }}-policy" +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} + metadata: + annotations: +{%- if extraHubGroupTemplates %} + {% raw %} + scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} + site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' + site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} +{% if setup_ztp_enable_performanceprofile %} + - fileName: node-tuning-operator/x86_64/PerformanceProfile.yaml + policyName: "{{ 'tuning-config' if manyPolicies else 'config' }}-policy" + spec: + cpu: + # These must be tailored for the specific hardware platform + isolated: "{{ setup_ztp_perfprofile_isolated_cpus }}" + reserved: "{{ setup_ztp_perfprofile_reserved_cpus }}" + hugepages: + defaultHugepagesSize: 1G + pages: + - size: 1G + count: {{ setup_ztp_perfprofile_hugepage_count }} + realTimeKernel: + enabled: {{ setup_ztp_perfprofile_realtime | lower }} + - fileName: node-tuning-operator/TunedPerformancePatch.yaml + policyName: "{{ 'tuning-config' if manyPolicies else 'config' }}-policy" +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} + metadata: + annotations: +{%- if extraHubGroupTemplates %} + {% raw %} + scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} + site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' + site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} +{% else %} +# - fileName: PerformanceProfile.yaml +# policyName: "{{ 'tuning-config' if manyPolicies else 'config' }}-policy" +# spec: +# cpu: +# # These must be tailored for the specific hardware platform +# isolated: "{{ setup_ztp_perfprofile_isolated_cpus }}" +# reserved: "{{ setup_ztp_perfprofile_reserved_cpus }}" +# hugepages: +# defaultHugepagesSize: 1G +# pages: +# - size: 1G +# count: {{ setup_ztp_perfprofile_hugepage_count }} +# realTimeKernel: +# enabled: {{ setup_ztp_perfprofile_realtime | lower }} +# - fileName: TunedPerformancePatch.yaml +# policyName: "{{ 'tuning-config' if manyPolicies else 'config' }}-policy" +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} +# metadata: +# annotations: +{%- if extraHubGroupTemplates %} + {% raw %} +# scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' +# scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} +# site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' +# site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} +{% endif %} + # + # These CRs are to enable crun on master and worker nodes for 4.13+ only + # + # Include these CRs in the group PGT instead of the common PGT to make sure + # they are applied after the operators have been successfully installed, + # however, it's strongly recommended to include these CRs as day-0 extra manifests + # to avoid the risky of an extra reboot. + - fileName: optional-extra-manifest/enable-crun-master.yaml + policyName: "config-policy" + - fileName: optional-extra-manifest/enable-crun-worker.yaml + policyName: "config-policy" + # # AmqInstance is required if PTP and BMER operators use AMQP transport + # - fileName: AmqInstance.yaml +# --- sources needed for image registry (check ImageRegistry.md for more details)---- +# - fileName: StorageClass.yaml +# policyName: "config-policy" +# metadata: +# name: image-registry-sc +# - fileName: StoragePVC.yaml +# policyName: "config-policy" +# metadata: +# name: image-registry-pvc +# namespace: openshift-image-registry +# spec: +# accessModes: +# - ReadWriteMany +# resources: +# requests: +# storage: 100Gi +# storageClassName: image-registry-sc +# volumeMode: Filesystem +# - fileName: ImageRegistryPV.yaml # this is assuming that mount_point is set to `/var/imageregistry` in SiteConfig + # using StorageClass `image-registry-sc` (see the first sc-file) +# policyName: "config-policy" +# - fileName: ImageRegistryConfig.yaml +# policyName: "config-policy" +# spec: +# storage: +# pvc: +# claim: "image-registry-pvc" +# ---- sources needed for image registry ends here ---- + +# --- sources needed for updating CRI-O workload-partitioning ---- +# more info here: on the base64 content https://docs.openshift.com/container-platform/4.11/scalability_and_performance/sno-du-enabling-workload-partitioning-on-single-node-openshift.html +# - fileName: MachineConfigGeneric.yaml +# policyName: "config-policy" +# complianceType: mustonlyhave # This is to update array entry as opposed to appending a new entry. +# metadata: +# name: 02-master-workload-partitioning +# spec: +# config: +# storage: +# files: +# - contents: +# # crio cpuset config goes below. This value needs to updated and matched PerformanceProfile. Check the link for more info on the content. +# source: data:text/plain;charset=utf-8;base64,W2NyaW8ucnVudGltZS53b3JrbG9hZHMubWFuYWdlbWVudF0KYWN0aXZhdGlvbl9hbm5vdGF0aW9uID0gInRhcmdldC53b3JrbG9hZC5vcGVuc2hpZnQuaW8vbWFuYWdlbWVudCIKYW5ub3RhdGlvbl9wcmVmaXggPSAicmVzb3VyY2VzLndvcmtsb2FkLm9wZW5zaGlmdC5pbyIKcmVzb3VyY2VzID0geyAiY3B1c2hhcmVzIiA9IDAsICJjcHVzZXQiID0gIjAtMSw1Mi01MyIgfQo= +# mode: 420 +# overwrite: true +# path: /etc/crio/crio.conf.d/01-workload-partitioning +# user: +# name: root +# - contents: +# # openshift cpuset config goes below. This value needs to be updated and matched with crio cpuset (array entry above this). Check the link for more info on the content. +# source: data:text/plain;charset=utf-8;base64,ewogICJtYW5hZ2VtZW50IjogewogICAgImNwdXNldCI6ICIwLTEsNTItNTMiCiAgfQp9Cg== +# mode: 420 +# overwrite: true +# path: /etc/kubernetes/openshift-workload-pinning +# user: +# name: root +# ---- sources needed for updating CRI-O workload-partitioning ends here ---- diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-sno-validator-ranGen.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-sno-validator-ranGen.yaml new file mode 100644 index 0000000..6b63fe3 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-sno-validator-ranGen.yaml @@ -0,0 +1,28 @@ +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + # The name will be used to generate the placementBinding name as {name}-placementBinding, the placementRule name as {name}-placementRule, + # and the policy name as {name}-{spec.sourceFiles[x].policyName} + name: "group-du-sno-validator-latest" + namespace: "ztp-group" +spec: + bindingRules: + # This policy will correspond to all clusters with label specified in bindingRules and + # without label specified in bindingExcludedRules. + group-du-sno: "" + # du-profile: "latest" + bindingExcludedRules: + # The ztp-done label is used in coordination with the Topology Aware Lifecycle Operator(TALO). + # Please do not change this label. + ztp-done: "" + mcp: "master" + sourceFiles: + # Create inform policy to validate configuration CRs that will be applied to all SNO clusters + - fileName: validatorCRs/informDuValidator.yaml + remediationAction: inform + policyName: "du-policy" + + # This low setting is only valid if the validation policy is disconnected from the cluster at steady-state + # using a bindingExcludeRules entry with ztp-done + evaluationInterval: + compliant: 5s diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-standard-ranGen.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-standard-ranGen.yaml new file mode 100644 index 0000000..320ab59 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-standard-ranGen.yaml @@ -0,0 +1,145 @@ +--- +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + name: "group-du-standard-latest" + namespace: "ztp-group" +spec: + bindingRules: + # These policies will correspond to all clusters with this label: + group-du-standard: "" + # du-profile: "latest" + mcp: "worker" + sourceFiles: + - fileName: ptp-operator/PtpOperatorConfig.yaml + policyName: "config-policy" + - fileName: ptp-operator/configuration/PtpConfigSlave.yaml # Change to PtpConfigSlaveCvl.yaml for ColumbiaVille NIC + policyName: "{{ 'ptp-config' if manyPolicies else 'config' }}-policy" + metadata: + name: "du-ptp-slave" +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} + annotations: +{%- if extraHubGroupTemplates %} + {% raw %} + scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} + site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' + site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} + spec: + profile: + - name: "slave" + # This interface must match the hardware in this group + interface: "ens5f0" + ptp4lOpts: "-2 -s --summary_interval -4" + phc2sysOpts: "-a -r -n 24" + - fileName: sriov-operator/SriovOperatorConfig.yaml + policyName: "{{ 'sriov-config' if manyPolicies else 'config' }}-policy" +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} + metadata: + annotations: +{%- if extraHubGroupTemplates %} + {% raw %} + scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} + site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' + site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} +{% if setup_ztp_enable_performanceprofile %} + - fileName: node-tuning-operator/x86_64/PerformanceProfile.yaml + policyName: "{{ 'tuning-config' if manyPolicies else 'config' }}-policy" + spec: + cpu: + # These must be tailored for the specific hardware platform + isolated: "2-19,22-39" + reserved: "0-1,20-21" + hugepages: + defaultHugepagesSize: 1G + pages: + - size: 1G + count: 32 + - fileName: node-tuning-operator/TunedPerformancePatch.yaml + policyName: "{{ 'tuning-config' if manyPolicies else 'config' }}-policy" +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} + metadata: + annotations: +{%- if extraHubGroupTemplates %} + {% raw %} + scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' + scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} + site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' + site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} +{% else %} +# - fileName: PerformanceProfile.yaml +# policyName: "{{ 'tuning-config' if manyPolicies else 'config' }}-policy" +# spec: +# cpu: +# # These must be tailored for the specific hardware platform +# isolated: "2-19,22-39" +# reserved: "0-1,20-21" +# hugepages: +# defaultHugepagesSize: 1G +# pages: +# - size: 1G +# count: 32 +# - fileName: TunedPerformancePatch.yaml +# policyName: "{{ 'tuning-config' if manyPolicies else 'config' }}-policy" +{%- if extraHubGroupTemplates or extraHubSiteTemplates +%} +# metadata: +# annotations: +{%- if extraHubGroupTemplates %} + {% raw %} +# scale-test-label-1: '{{hub fromConfigMap "" "group-template-map" "key1" hub}}' +# scale-test-label-2: '{{hub fromConfigMap "" "group-template-map" "key2" hub}}' + {%- endraw %} +{% endif %} +{% if extraHubSiteTemplates %} + {% raw %} +# site-test-label-1: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey1" hub}}' +# site-test-label-2: '{{hub fromConfigMap "" (printf "site-%s" .ManagedClusterName) "sitekey2" hub}}' + {%- endraw %} +{% endif %} +{% endif +%} +{% endif %} + # + # These CRs are to enable crun on master and worker nodes for 4.13+ only + # + # Include these CRs in the group PGT instead of the common PGT to make sure + # they are applied after the operators have been successfully installed, + # however, it's strongly recommended to include these CRs as day-0 extra manifests + # to avoid an extra reboot of the master nodes. + - fileName: optional-extra-manifest/enable-crun-master.yaml + policyName: "config-policy" + - fileName: optional-extra-manifest/enable-crun-worker.yaml + policyName: "config-policy" + # + # NMState operator is used for IPsec configuration with NMState + # - fileName: NMStateSubscriptionNS.yaml + # policyName: "subscriptions-policy" + # - fileName: NMStateSubscriptionOperGroup.yaml + # policyName: "subscriptions-policy" + # - fileName: NMStateSubscription.yaml + # policyName: "subscriptions-policy" + # - fileName: NMStateOperatorStatus.yaml + # policyName: "subscriptions-policy" + # - fileName: NMState.yaml + # policyName: "subscriptions-policy" diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-standard-validator-ranGen.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-standard-validator-ranGen.yaml new file mode 100644 index 0000000..502754f --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-standard-validator-ranGen.yaml @@ -0,0 +1,28 @@ +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + # The name will be used to generate the placementBinding name as {name}-placementBinding, the placementRule name as {name}-placementRule, + # and the policy name as {name}-{spec.sourceFiles[x].policyName} + name: "group-du-standard-validator-latest" + namespace: "ztp-group" +spec: + # This policy will correspond to all clusters with label specified in bindingRules and + # without label specified in bindingExcludedRules. + bindingRules: + group-du-standard: "" + # du-profile: "latest" + bindingExcludedRules: + # The ztp-done label is used in coordination with the Topology Aware Lifecycle Operator(TALO). + # Please do not change this label. + ztp-done: "" + mcp: "worker" + sourceFiles: + # Create inform policy to validate configuration CRs that will be applied to all standard clusters + - fileName: validatorCRs/informDuValidator.yaml + remediationAction: inform + policyName: "du-policy" + + # This low setting is only valid if the validation policy is disconnected from the cluster at steady-state + # using a bindingExcludeRules entry with ztp-done + evaluationInterval: + compliant: 5s diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/policy-common-and-group.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/policy-common-and-group.yaml new file mode 100644 index 0000000..a34702e --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/policy-common-and-group.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: policy-common-and-group + namespace: openshift-gitops +spec: + destination: + server: https://kubernetes.default.svc + namespace: ztp-site + project: policy-app-project + source: + path: telco-ran/configuration/argocd/policy/common-and-group + repoURL: http://{{ gogs_username }}:{{ gogs_password }}@{{ gogs_host }}:{{ gogs_port }}/testadmin/telco-reference.git + targetRevision: {{ telco_reference_branch }} + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/policy-kustomization.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/policy-kustomization.yaml new file mode 100644 index 0000000..69db0d8 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/policy-kustomization.yaml @@ -0,0 +1,78 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +generators: +# This is common to all RAN deployments +{% if setup_ztp_common_policy %} +- common-ranGen.yaml +{% if du_profile_version is version('4.15', '>=') %} +# This is common for MNO (Multi-Node OpenShift) RAN deployments (3-node and standard clusters) du >= 4.15 +- common-mno-ranGen.yaml +{% endif %} +{% else %} +# - common-ranGen.yaml +# This is common for MNO (Multi-Node OpenShift) RAN deployments (3-node and standard clusters) du >= 4.15 +# - common-mno-ranGen.yaml +{% endif %} + +# This group policy is for all single-node deployments: +{% if setup_ztp_sno_policy %} +- group-du-sno-ranGen.yaml +# This group validator policy is for all single-node deployments: +{% if setup_ztp_enable_performanceprofile %} +- group-du-sno-validator-ranGen.yaml +{% else %} +# - group-du-sno-validator-ranGen.yaml +{% endif %} +# This site policy is for all clusters with the label scalelab +{% if setup_ztp_sno_site_policy %} +- sno-site.yaml +{% else %} +# - sno-site.yaml +{% endif %} +{% else %} +# - group-du-sno-ranGen.yaml +# This group validator policy is for all single-node deployments: +# - group-du-sno-validator-ranGen.yaml +# This site policy is for all clusters with the label scalelab +# - sno-site.yaml +{% endif %} + +# This group policy is for all compressed 3-node cluster deployments: +{% if setup_ztp_compact_policy %} +- group-du-3node-ranGen.yaml +# This group validator policy is for all compressed 3-node cluster deployments: +{% if setup_ztp_enable_performanceprofile %} +- group-du-3node-validator-ranGen.yaml +{% else %} +# - group-du-3node-validator-ranGen.yaml +{% endif %} +{% else %} +# - group-du-3node-ranGen.yaml +# This group validator policy is for all compressed 3-node cluster deployments: +# - group-du-3node-validator-ranGen.yaml +{% endif %} + +# This group policy is for all standard cluster deployments: +{% if setup_ztp_standard_policy %} +- group-du-standard-ranGen.yaml +# This group validator policy is for all standard cluster deployments: +{% if setup_ztp_enable_performanceprofile %} +- group-du-standard-validator-ranGen.yaml +{% else %} +# - group-du-standard-validator-ranGen.yaml +{% endif %} +{% else %} +# - group-du-standard-ranGen.yaml +# This group validator policy is for all standard cluster deployments: +# - group-du-standard-validator-ranGen.yaml +{% endif %} + +# Commented out by default for now +# - du-upgrade.yaml +# - du-operator-upgrade.yaml +# - du-ibu.yaml + +resources: +- ns.yaml +- ns-upgrade.yaml +- hub-template-maps.yaml diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/s3-creds.j2 b/ansible/roles/telco-ran-du-ztp-421/templates/s3-creds.j2 new file mode 100644 index 0000000..eaec080 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/s3-creds.j2 @@ -0,0 +1,3 @@ +[default] +aws_access_key_id={{ s3_access_key_id }} +aws_secret_access_key={{ s3_secret_access_key }} diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/seedgen-auth.j2 b/ansible/roles/telco-ran-du-ztp-421/templates/seedgen-auth.j2 new file mode 100644 index 0000000..f59e211 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/seedgen-auth.j2 @@ -0,0 +1 @@ +{"auths": {"{{ rhacm_disconnected_registry }}:{{ rhacm_disconnected_registry_port }}": {"auth": "{{ (registry_user + ':' + registry_password) | b64encode }}"}}} diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/sno-site.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/sno-site.yaml new file mode 100644 index 0000000..475c303 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/sno-site.yaml @@ -0,0 +1,49 @@ +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + name: "site" + namespace: "ztp-site" +spec: + bindingRules: + # These policies will correspond to all clusters with this label: + site: "scalelab" + mcp: "master" + sourceFiles: + - fileName: SriovNetwork.yaml + policyName: "config-policy" + metadata: + name: "sriov-nw-du-fh" + spec: + resourceName: du_fh + vlan: 140 + # - fileName: SriovNetworkNodePolicy.yaml + # policyName: "config-policy" + # metadata: + # name: "sriov-nnp-du-fh" + # spec: + # deviceType: netdevice + # isRdma: false + # nicSelector: + # pfNames: ["ens5f0"] + # numVfs: 8 + # priority: 10 + # resourceName: du_fh + - fileName: SriovNetwork.yaml + policyName: "config-policy" + metadata: + name: "sriov-nw-du-mh" + spec: + resourceName: du_mh + vlan: 150 + # - fileName: SriovNetworkNodePolicy.yaml + # policyName: "config-policy" + # metadata: + # name: "sriov-nnp-du-mh" + # spec: + # deviceType: vfio-pci + # isRdma: false + # nicSelector: + # pfNames: ["ens7f0"] + # numVfs: 8 + # priority: 10 + # resourceName: du_mh diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/DataProtectionApplication.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/DataProtectionApplication.yaml new file mode 100644 index 0000000..c565383 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/DataProtectionApplication.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: oadp.openshift.io/v1alpha1 +kind: DataProtectionApplication +metadata: + name: dataprotectionapplication + namespace: openshift-adp + annotations: + ran.openshift.io/ztp-deploy-wave: "10" +spec: + configuration: + velero: + defaultPlugins: + - aws + - openshift + resourceTimeout: 10m + backupLocations: + - velero: + config: + profile: "default" + region: minio + s3Url: $placeholder_url + insecureSkipTLSVerify: "true" + s3ForcePathStyle: "true" + provider: aws + default: true + credential: + key: cloud + name: cloud-credentials + objectStorage: + bucket: $placeholder_bucket + prefix: velero +status: + conditions: + - reason: Complete + status: "True" + type: Reconciled diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/ImageBasedUpgrade.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/ImageBasedUpgrade.yaml new file mode 100644 index 0000000..c32a426 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/ImageBasedUpgrade.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: lca.openshift.io/{{ ibu_source_crs_apiversion }} +kind: ImageBasedUpgrade +metadata: + name: upgrade +spec: + stage: Idle + # When setting `stage: Prep`, remember to add the seed image reference object below. + # seedImageRef: + # image: $image + # version: $version diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaOperatorStatus.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaOperatorStatus.yaml new file mode 100644 index 0000000..588f643 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaOperatorStatus.yaml @@ -0,0 +1,27 @@ +--- +# This CR verifies the installation/upgrade of the LCA +apiVersion: operators.coreos.com/v1 +kind: Operator +metadata: + name: lifecycle-agent.openshift-lifecycle-agent + annotations: + ran.openshift.io/ztp-deploy-wave: "2" +status: + components: + refs: + - kind: Subscription + namespace: openshift-lifecycle-agent + conditions: + - type: CatalogSourcesUnhealthy + status: "False" + - kind: InstallPlan + namespace: openshift-lifecycle-agent + conditions: + - type: Installed + status: "True" + - kind: ClusterServiceVersion + namespace: openshift-lifecycle-agent + conditions: + - type: Succeeded + status: "True" + reason: InstallSucceeded diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSecret.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSecret.yaml new file mode 100644 index 0000000..5b5b810 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSecret.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: seedgen + namespace: openshift-lifecycle-agent + annotations: + ran.openshift.io/ztp-deploy-wave: "100" +type: Opaque diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSubscription.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSubscription.yaml new file mode 100644 index 0000000..d08318a --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSubscription.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: lifecycle-agent + namespace: openshift-lifecycle-agent + annotations: + ran.openshift.io/ztp-deploy-wave: "2" +spec: + channel: stable + name: lifecycle-agent + source: redhat-operators + sourceNamespace: openshift-marketplace + installPlanApproval: Manual +status: + state: AtLatestKnown diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSubscriptionNS.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSubscriptionNS.yaml new file mode 100644 index 0000000..cdd7da4 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSubscriptionNS.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-lifecycle-agent + annotations: + workload.openshift.io/allowed: management + ran.openshift.io/ztp-deploy-wave: "2" + labels: + kubernetes.io/metadata.name: openshift-lifecycle-agent diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSubscriptionOperGroup.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSubscriptionOperGroup.yaml new file mode 100644 index 0000000..27bc48a --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSubscriptionOperGroup.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: openshift-lifecycle-agent + namespace: openshift-lifecycle-agent + annotations: + ran.openshift.io/ztp-deploy-wave: "2" +spec: + targetNamespaces: + - openshift-lifecycle-agent diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpBackupStorageLocationStatus.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpBackupStorageLocationStatus.yaml new file mode 100644 index 0000000..29a23ed --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpBackupStorageLocationStatus.yaml @@ -0,0 +1,11 @@ +--- +# This CR verifies the availability of backup storage locations created by OADP +apiVersion: velero.io/v1 +kind: BackupStorageLocation +metadata: + name: dataprotectionapplication-1 + namespace: openshift-adp + annotations: + ran.openshift.io/ztp-deploy-wave: "10" +status: + phase: Available diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpCm.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpCm.yaml new file mode 100644 index 0000000..c4a8273 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpCm.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: oadp-cm + namespace: openshift-adp + annotations: + ran.openshift.io/ztp-deploy-wave: "10" +data: + backup_acm_klusterlet.yaml: | + apiVersion: velero.io/v1 + kind: Backup + metadata: + name: acm-klusterlet + annotations: + lca.openshift.io/apply-label: "apps/v1/deployments/open-cluster-management-agent/klusterlet,v1/secrets/open-cluster-management-agent/bootstrap-hub-kubeconfig,rbac.authorization.k8s.io/v1/clusterroles/klusterlet,v1/serviceaccounts/open-cluster-management-agent/klusterlet,rbac.authorization.k8s.io/v1/clusterroles/open-cluster-management:klusterlet-admin-aggregate-clusterrole,rbac.authorization.k8s.io/v1/clusterrolebindings/klusterlet,operator.open-cluster-management.io/v1/klusterlets/klusterlet,apiextensions.k8s.io/v1/customresourcedefinitions/klusterlets.operator.open-cluster-management.io,v1/secrets/open-cluster-management-agent/open-cluster-management-image-pull-credentials" + labels: + velero.io/storage-location: default + namespace: openshift-adp + spec: + includedNamespaces: + - open-cluster-management-agent + includedClusterScopedResources: + - klusterlets.operator.open-cluster-management.io + - clusterclaims.cluster.open-cluster-management.io + - clusterroles.rbac.authorization.k8s.io + - clusterrolebindings.rbac.authorization.k8s.io + includedNamespaceScopedResources: + - deployments + - serviceaccounts + - secrets + restore_acm_klusterlet.yaml: | + apiVersion: velero.io/v1 + kind: Restore + metadata: + name: acm-klusterlet + namespace: openshift-adp + labels: + velero.io/storage-location: default + annotations: + lca.openshift.io/apply-wave: "1" + spec: + backupName: + acm-klusterlet diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpOperatorStatus.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpOperatorStatus.yaml new file mode 100644 index 0000000..6b81af0 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpOperatorStatus.yaml @@ -0,0 +1,27 @@ +--- +# This CR verifies the installation/upgrade of the OADP +apiVersion: operators.coreos.com/v1 +kind: Operator +metadata: + name: redhat-oadp-operator.openshift-adp + annotations: + ran.openshift.io/ztp-deploy-wave: "2" +status: + components: + refs: + - kind: Subscription + namespace: openshift-adp + conditions: + - type: CatalogSourcesUnhealthy + status: "False" + - kind: InstallPlan + namespace: openshift-adp + conditions: + - type: Installed + status: "True" + - kind: ClusterServiceVersion + namespace: openshift-adp + conditions: + - type: Succeeded + status: "True" + reason: InstallSucceeded diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSecret.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSecret.yaml new file mode 100644 index 0000000..49cd31d --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSecret.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-credentials + namespace: openshift-adp + annotations: + ran.openshift.io/ztp-deploy-wave: "10" +type: Opaque diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSubscription.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSubscription.yaml new file mode 100644 index 0000000..e425138 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSubscription.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: openshift-adp + namespace: openshift-adp + annotations: + ran.openshift.io/ztp-deploy-wave: "2" +spec: + channel: stable + name: redhat-oadp-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + installPlanApproval: Manual +status: + state: AtLatestKnown diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSubscriptionNS.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSubscriptionNS.yaml new file mode 100644 index 0000000..ddcad97 --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSubscriptionNS.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-adp + annotations: + workload.openshift.io/allowed: management + ran.openshift.io/ztp-deploy-wave: "2" + labels: + kubernetes.io/metadata.name: openshift-adp diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSubscriptionOperGroup.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSubscriptionOperGroup.yaml new file mode 100644 index 0000000..003bb4e --- /dev/null +++ b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/OadpSubscriptionOperGroup.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: openshift-adp + namespace: openshift-adp + annotations: + ran.openshift.io/ztp-deploy-wave: "2" +spec: + targetNamespaces: + - openshift-adp From 5cdd700cd57a1f7570b0f4141d36c3871af65da2 Mon Sep 17 00:00:00 2001 From: tingxueca Date: Thu, 26 Feb 2026 17:10:21 -0500 Subject: [PATCH 2/3] fix the wave --- .../templates/pgt-du-4.21/group-du-sno-ranGen.yaml | 2 +- .../telco-ran-du-ztp-421/templates/source-crs/LcaSecret.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-sno-ranGen.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-sno-ranGen.yaml index 5246a4e..14704f7 100644 --- a/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-sno-ranGen.yaml +++ b/ansible/roles/telco-ran-du-ztp-421/templates/pgt-du-4.21/group-du-sno-ranGen.yaml @@ -13,7 +13,7 @@ spec: mcp: "master" sourceFiles: {% if include_oadp_operator %} - - fileName: data-protection/OadpSecret.yaml + - fileName: OadpSecret.yaml policyName: "{{ 'oadp-config' if manyPolicies else 'config' }}-policy" data: cloud: {{ lookup('template', './s3-creds.j2') | b64encode }} diff --git a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSecret.yaml b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSecret.yaml index 5b5b810..2b0aae2 100644 --- a/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSecret.yaml +++ b/ansible/roles/telco-ran-du-ztp-421/templates/source-crs/LcaSecret.yaml @@ -5,5 +5,5 @@ metadata: name: seedgen namespace: openshift-lifecycle-agent annotations: - ran.openshift.io/ztp-deploy-wave: "100" + ran.openshift.io/ztp-deploy-wave: "10" type: Opaque From 2fd5a57f393f683428476e3f3735663c06cf62ff Mon Sep 17 00:00:00 2001 From: tingxueca Date: Fri, 27 Feb 2026 15:57:42 -0500 Subject: [PATCH 3/3] Update ai-clusterinstance to include 4.21 --- .../sno-create-manifests/templates/ai-clusterinstance.yml.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/sno-create-manifests/templates/ai-clusterinstance.yml.j2 b/ansible/roles/sno-create-manifests/templates/ai-clusterinstance.yml.j2 index 2d034d9..d28eac2 100644 --- a/ansible/roles/sno-create-manifests/templates/ai-clusterinstance.yml.j2 +++ b/ansible/roles/sno-create-manifests/templates/ai-clusterinstance.yml.j2 @@ -143,7 +143,7 @@ spec: holdInstallation: false {% if sno_du_profile == "4.15" %} installConfigOverrides: "{\"capabilities\":{\"baselineCapabilitySet\": \"None\", \"additionalEnabledCapabilities\": [ \"OperatorLifecycleManager\", \"NodeTuning\" ] }}" -{% elif sno_du_profile in ["4.16", "4.17", "4.18", "4.19", "4.20"] %} +{% elif sno_du_profile in ["4.16", "4.17", "4.18", "4.19", "4.20", "4.21"] %} installConfigOverrides: "{\"capabilities\":{\"baselineCapabilitySet\": \"None\", \"additionalEnabledCapabilities\": [ \"OperatorLifecycleManager\", \"Ingress\", \"NodeTuning\" ] }}" ignitionConfigOverride: "{\"ignition\":{\"version\":\"3.2.0\"},\"storage\":{\"files\":[{\"path\":\"/etc/containers/policy.json\",\"mode\":420,\"overwrite\":true,\"contents\":{\"source\":\"data:text/plain;charset=utf-8;base64,ewogICAgImRlZmF1bHQiOiBbCiAgICAgICAgewogICAgICAgICAgICAidHlwZSI6ICJpbnNlY3VyZUFjY2VwdEFueXRoaW5nIgogICAgICAgIH0KICAgIF0sCiAgICAidHJhbnNwb3J0cyI6CiAgICAgICAgewogICAgICAgICAgICAiZG9ja2VyLWRhZW1vbiI6CiAgICAgICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAgICAgIiI6IFt7InR5cGUiOiJpbnNlY3VyZUFjY2VwdEFueXRoaW5nIn1dCiAgICAgICAgICAgICAgICB9CiAgICAgICAgfQp9\"}}]}}" {% endif %}