diff --git a/.ansible-lint b/.ansible-lint
new file mode 100644
index 00000000..4018263d
--- /dev/null
+++ b/.ansible-lint
@@ -0,0 +1,20 @@
+---
+# .ansible-lint
+
+profile: null # min, basic, moderate, safety, shared, production
+
+# exclude_paths included in this file are parsed relative to this file's location
+# and not relative to the CWD of execution. CLI arguments passed to the --exclude
+# option are parsed relative to the CWD of execution.
+exclude_paths:
+ - .github/
+ - docs/
+# parseable: true
+# quiet: true
+# strict: true
+verbosity: 1
+
+skip_list:
+ - no-changed-when
+ - name[casing]
+ - var-naming[no-role-prefix]
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 00000000..4b6c3526
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,31 @@
+# top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file
+[*]
+charset = utf-8
+end_of_line = lf
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+trim_trailing_whitespace = true
+
+[Dockerfile]
+indent_size = 4
+
+# Tab indentation (no size specified)
+[Makefile]
+indent_style = tab
+
+# Don't mess with markdown files
+[*.md]
+trim_trailing_whitespace = false
+
+[*.py]
+indent_size = 4
+
+[*.sh]
+indent_size = 4
+
+[*.{yml,yaml}]
+indent_size = 2
diff --git a/.gitattributes b/.gitattributes
index dfe07704..165cd6be 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,2 +1,4 @@
# Auto detect text files and perform LF normalization
* text=auto
+*.yaml linguist-detectable=true
+*.yaml linguist-language=YAML
diff --git a/.github/dco.yml b/.github/dco.yml
new file mode 100644
index 00000000..de6cd3bf
--- /dev/null
+++ b/.github/dco.yml
@@ -0,0 +1,4 @@
+# This enables DCO bot for you, please take a look https://github.com/probot/dco
+# for more details.
+require:
+ members: false
diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml
new file mode 100644
index 00000000..b8dd0bf4
--- /dev/null
+++ b/.github/workflows/main.yaml
@@ -0,0 +1,41 @@
+name: Publish docs via GitHub Pages
+on:
+ workflow_dispatch:
+
+ push:
+ branches:
+ - main
+ paths:
+ - 'mkdocs.yaml'
+ - 'docs/**'
+
+jobs:
+ build:
+ name: Deploy docs
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout main
+ uses: actions/checkout@v4
+ with:
+ ref: main
+ fetch-depth: 0
+
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: '3.x'
+
+ - name: Fetch Latest gh-pages Branch
+ run: |
+ git fetch origin gh-pages
+ git checkout gh-pages
+ git pull origin gh-pages
+ git checkout main
+
+ - name: Install mkdocs
+ run: pip install mkdocs
+
+ - name: Deploy docs
+ run: python3 -m mkdocs gh-deploy
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.gitignore b/.gitignore
index e43b0f98..f05903c7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,4 @@
.DS_Store
+.iso
+.vscode
+site
\ No newline at end of file
diff --git a/.releaserc.json b/.releaserc.json
new file mode 100644
index 00000000..73bb5647
--- /dev/null
+++ b/.releaserc.json
@@ -0,0 +1,30 @@
+{
+ "branches": [
+ {"name": "main"}
+ ],
+ "plugins": [
+ ["@semantic-release/commit-analyzer", {
+ "preset": "conventionalcommits",
+ "releaseRules": [
+ {"type": "chore", "release": "patch"},
+ {"type": "docs", "release": "patch"},
+ {"type": "test", "release": "patch"},
+ {"type": "perf", "release": "patch"}
+ ]
+ }],
+ ["@semantic-release/release-notes-generator", {
+ "preset": "conventionalcommits",
+ "presetConfig": {
+ "types": [
+ {"type": "feat", "section": "Features"},
+ {"type": "fix", "section": "Bug Fixes"},
+ {"type": "chore", "section": "Miscellaneous"},
+ {"type": "docs", "section": "Documentation"},
+ {"type": "perf", "section": "Performance"},
+ {"type": "test", "section": "Tests", "hidden": false}
+ ]
+ }
+ }],
+ "@semantic-release/github"
+ ]
+}
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..d4e8b942
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021, 2023 IBM Corporation. All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE
\ No newline at end of file
diff --git a/README.md b/README.md
index 10d0a3ae..47b4d04c 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,2 @@
-# Ansible-OpenShift-Provisioning
-
-Phillip adding comment via lesson 3
-
+# Ansible-Automated OpenShift Provisioning on KVM on IBM zSystems / LinuxONE
+The documentation for this project can be found [here](https://ibm.github.io/Ansible-OpenShift-Provisioning/).
diff --git a/ansible.cfg b/ansible.cfg
index 5d2ee146..f783e28b 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -1,5 +1,11 @@
[defaults]
-inventory = inventory
-private_key_file = ~/.ssh/ansible
-
+private_key_file=~/.ssh/ansible-ocpz
+inventory=inventories/default/
+roles_path=roles
+#collections_path=collections
+interpreter_python=auto
+host_key_checking=False
+deprecation_warnings=False
+[inventory]
+cache=True
diff --git a/bootstrap.yml b/bootstrap.yml
deleted file mode 100644
index 64b68c77..00000000
--- a/bootstrap.yml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-
-- hosts: all
- become: true
- pre_tasks:
-
- - name: install updates (CentOS)
- tags: always
- dnf:
- update_only: yes
- update_cache: yes
- when: ansible_distribution == "CentOS"
-
- - name: install updates (Ubuntu)
- tags: always
- apt:
- upgrade: dist
- update_cache: yes
- when: ansible_distribution == "Ubuntu"
-
-- hosts: all
- become: true
- tasks:
-
- - name: create simone user
- tags: always
- user:
- name: simone
- groups: root
-
- - name: add ssh key for simone
- tags: always
- authorized_key:
- user: simone
- key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKHEBRxXjvVaYY8mg0S05qqUWJQaDLbzO4w5uwN8ogJ2 ansible"
-
- - name: add sudeoers file for simone
- tags: always
- copy:
- src: sudoer_simone
- dest: /etc/sudoers.d/simone
- owner: root
- group: root
- mode: 0440
-
diff --git a/build_script.sh b/build_script.sh
deleted file mode 100644
index a073334b..00000000
--- a/build_script.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-# Created by Phillip
-
-
-
-#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap-0.qcow2 100G
-#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G
-#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G
-#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G
-#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G
-qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G
-
-#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole
-
-#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole
-
-#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole
-
-#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole
-
-#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole
-
-virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole
diff --git a/collections/ansible/posix/ansible-posix-1.4.0.tar.gz b/collections/ansible/posix/ansible-posix-1.4.0.tar.gz
new file mode 100644
index 00000000..8fe36513
Binary files /dev/null and b/collections/ansible/posix/ansible-posix-1.4.0.tar.gz differ
diff --git a/collections/community/crypto/community-crypto-2.3.4.tar.gz b/collections/community/crypto/community-crypto-2.3.4.tar.gz
new file mode 100644
index 00000000..a11f6b82
Binary files /dev/null and b/collections/community/crypto/community-crypto-2.3.4.tar.gz differ
diff --git a/collections/community/general/community-general-5.2.0.tar.gz b/collections/community/general/community-general-5.2.0.tar.gz
new file mode 100644
index 00000000..6781d2e1
Binary files /dev/null and b/collections/community/general/community-general-5.2.0.tar.gz differ
diff --git a/collections/community/libvirt/community-libvirt-1.1.0.tar.gz b/collections/community/libvirt/community-libvirt-1.1.0.tar.gz
new file mode 100644
index 00000000..84ad2278
Binary files /dev/null and b/collections/community/libvirt/community-libvirt-1.1.0.tar.gz differ
diff --git a/collections/ibm/ibm_zhmc/ibm-ibm_zhmc-1.1.0.tar.gz b/collections/ibm/ibm_zhmc/ibm-ibm_zhmc-1.1.0.tar.gz
new file mode 100644
index 00000000..1fce388c
Binary files /dev/null and b/collections/ibm/ibm_zhmc/ibm-ibm_zhmc-1.1.0.tar.gz differ
diff --git a/copy-image.yml b/copy-image.yml
deleted file mode 100644
index 13be2c2c..00000000
--- a/copy-image.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# tasks to check if kvm image file is present and copy if it is not
-
-- hosts: all
- become: true
- tasks:
-
- - name: check to see if kvm image file is present
- stat:
- path: /tmp/rhel-guest-image-8.3-400.s390x.qcow2
- get_checksum: no
- get_mime: no
- get_attributes: no
- register: os_disk_file
-
- - name: fail if image file exists
- fail:
- msg: "Image file exists"
- when: os_disk_file is true
-
- - name: copy kvm image to kvm host(s)
- copy:
- src: rhel-guest-image-8.3-400.s390x.qcow2
- dest: /tmp/rhel-guest-image-8.3-400.s390x.qcow2
- owner: root
- owner: root
- mode: 0644
-
diff --git a/docs/acknowledgements.md b/docs/acknowledgements.md
new file mode 100644
index 00000000..33087f0d
--- /dev/null
+++ b/docs/acknowledgements.md
@@ -0,0 +1,13 @@
+Phillip Wilson
+Filipe Miranda
+Patrick Fruth
+Wasif Mohammad
+Stuart Tener
+Fred Bader
+Ken Morse
+Nico Boehr
+Trevor Vardeman
+Matt Mondics
+Klaus Smolin
+Amadeus Podvratnik
+Miao Zhang-Cohen
\ No newline at end of file
diff --git a/docs/before-you-begin.md b/docs/before-you-begin.md
new file mode 100644
index 00000000..362d9d85
--- /dev/null
+++ b/docs/before-you-begin.md
@@ -0,0 +1,30 @@
+# Before You Begin
+## Description
+* This project automates the User-Provisioned Infrastructure (UPI) method for deploying Red Hat OpenShift Container Platform (RHOCP) on IBM zSystems / LinuxONE using Kernel-based Virtual Machine (KVM) as the hypervisor.
+## Support
+* This is an unofficial project created by IBMers.
+* This installation method is not officially supported by either Red Hat or IBM.
+* However, once installation is complete, the resulting cluster is supported by Red Hat. UPI is the only supported method for RHOCP on IBM zSystems.
+## Difficulty
+* This process is much easier than doing so manually, but still not an easy task. You will likely encounter errors, but you will reach those errors quicker and understand the problem faster than if you were doing this process manually. After using these playbooks once, successive deployments will be much easier.
+ * A very basic understanding of what Ansible does is recommended. Advanced understanding is helpful for further customization of the playbooks.
+ * A basic understanding of the command-line is required.
+ * A basic understanding of git is recommended, especially for creating your organization's own fork of the repository for further customization.
+ * An advanced understanding of your computing environment is required for setting the environment variables.
+* These Ansible Playbooks automate a User-Provisioned Infrastructure (UPI) deployment of Red Hat OpenShift Container Platform (RHOCP). This process, when done manually, is extremely tedious, time-consuming, and requires high levels of Linux AND IBM zSystems expertise.
+* UPI is currently the only supported method for deploying RHOCP on IBM zSystems.
+## Why Free and Open-Source?
+* Trust:
+ * IBM zSystems run some of the most highly-secure workloads in the world. Trust is paramount.
+ * Developing and using code transparently builds trust between developers and users, so that users feel safe using it on their highly sensitive systems.
+* Customization:
+ * IBM zSystems exist in environments that can be highly complex and vary drastically from one datacenter to another.
+ * Using code that isn't in a proprietary black box allows you to see exactly what is being done so that you can change any part of it to meet your specific needs.
+* Collaboration:
+ * If users encounter a problem, or have a feature request, they can get in contact with the developers directly.
+ * Submit an issue or pull request on GitHub or email jacob.emery@ibm.com.
+ * Collaboration is highly encouraged!
+* Lower Barriers to Entry:
+ * The easier it is to get RHOCP on IBM zSystems up and running, the better - for you, IBM and Red Hat!
+ * It is free because RHOCP is an incredible product that should have the least amount of barriers to entry as possible.
+ * The world needs open-source, private, and hybrid cloud.
\ No newline at end of file
diff --git a/docs/get-info.md b/docs/get-info.md
new file mode 100644
index 00000000..cf432c2b
--- /dev/null
+++ b/docs/get-info.md
@@ -0,0 +1,42 @@
+# Step 1: Get Info
+## Get Repository
+* Open the terminal
+* Navigate to a folder (AKA directory) where you would like to store this project.
+ * Either do so graphically, or use the command-line.
+ * Here are some helpful commands for doing so:
+ * `pwd` to see what directory you're currently in
+ * `ls` to list child directories
+ * `cd ` to change directories (`cd ..` to go up to the parent directory)
+ * `mkdir ` to create a new directory
+* Copy/paste the following and hit enter:
+`git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git`
+* Change into the newly created directory
+* The commands and output should resemble the following example:
+```
+$ pwd
+/Users/example-user
+$ mkdir ansible-project
+$ cd ansible-project/
+$ git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git
+Cloning into 'Ansible-OpenShift-Provisioning'...
+remote: Enumerating objects: 3472, done.
+remote: Counting objects: 100% (200/200), done.
+remote: Compressing objects: 100% (57/57), done.
+remote: Total 3472 (delta 152), reused 143 (delta 143), pack-reused 3272
+Receiving objects: 100% (3472/3472), 506.29 KiB | 1.27 MiB/s, done.
+Resolving deltas: 100% (1699/1699), done.
+$ ls
+Ansible-OpenShift-Provisioning
+$ cd Ansible-OpenShift-Provisioning/
+$ ls
+CHANGELOG.md README.md docs mkdocs.yaml roles
+LICENSE ansible.cfg inventories playbooks
+```
+## Get Pull Secret
+* In a web browser, navigate to Red Hat's [Hybrid Cloud Console](https://console.redhat.com/openshift/install/ibmz/user-provisioned), click the text that says 'Copy pull secret' and save it for the next step.
+## Gather Environment Information
+* You will need a lot of information about the environment this cluster will be set-up in.
+* You will need the help of at least your IBM zSystems infrastructure team so they can provision you a storage group. You'll also need them to provide you with
+* IP address range, hostnames, subnet, gateway, how much disk space you have to work with, etc.
+* A full list of variables needed are found on the next page. Many of them are filled in with defaults or are optional.
+* Please take your time. I would recommend having someone on stand-by in case you need more information or need to ask a question about the environment.
\ No newline at end of file
diff --git a/docs/images/ansible-logo.png b/docs/images/ansible-logo.png
new file mode 100644
index 00000000..16b6dd61
Binary files /dev/null and b/docs/images/ansible-logo.png differ
diff --git a/docs/images/overview.png b/docs/images/overview.png
new file mode 100644
index 00000000..b2abf456
Binary files /dev/null and b/docs/images/overview.png differ
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 00000000..8dd9f1d9
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,8 @@
+# Ansible-Automated OpenShift Provisioning on KVM on IBM zSystems / LinuxONE
+## Overview
+These Ansible Playbooks automate the setup and deployment of a Red Hat OpenShift Container Platform (RHOCP) cluster on IBM zSystems / LinuxONE with Kernel Virtual Machine (KVM) as the hypervisor.
+
+## Ready to Start?
+Use the left-hand panel to navigate the site. Start with the [Before You Begin](before-you-begin.md) page.
+## Need Help?
+Contact Jacob Emery at jacob.emery@ibm.com
\ No newline at end of file
diff --git a/docs/prerequisites.md b/docs/prerequisites.md
new file mode 100644
index 00000000..f0bcb886
--- /dev/null
+++ b/docs/prerequisites.md
@@ -0,0 +1,74 @@
+
+# Prerequisites
+## Red Hat
+* Account ([Sign Up](https://www.redhat.com/wapps/ugc/register.html?_flowId=register-flow&_flowExecutionKey=e1s1))
+* [License](https://access.redhat.com/products/red-hat-openshift-container-platform/) or [free trial](https://www.redhat.com/en/technologies/cloud-computing/openshift/try-it) of Red Hat OpenShift Container Platform for IBM Z systems - s390x architecture (comes with the required licenses for Red Hat Enterprise Linux (RHEL) and CoreOS)
+## IBM zSystems
+* Hardware Management Console (HMC) access on IBM zSystems or LinuxONE
+* In order to use the [playbook](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/2_create_kvm_host.yaml) that automates the creation of the KVM host Dynamic Partition Manager (DPM) mode is required.
+ * If DPM mode is not an option for your environment, that playbook can be skipped, but a bare-metal RHEL server must be set-up on an LPAR manually (Filipe Miranda's [how-to article](https://www.linkedin.com/pulse/demystifying-install-process-red-hat-enterprise-linux-filipe-miranda/)) before moving on. Once that is done, continue with the [playbook 3](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/3_setup_kvm_host.yaml) that sets up the KVM host.
+* For a minimum installation, at least:
+ * 6 Integrated Facilities for Linux (IFLs) with SMT2 enabled
+ * 85 GB of RAM
+ * An FCP storage group created with 1 TB of disk space
+ * 8 IPv4 addresses
+## File Server
+* A file server accessible from your IBM zSystems / LinuxONE server.
+* Use the [setup_file_server playbook](https://github.com/jacobemery/Ansible-OpenShift-Provisioning/blob/main/playbooks/setup_file_server.yaml) to help you setup the file server, or at least better understand what is required, if needed. It will work most reliably against a RHEL server. You will be prompted for an active RHEL 8.x 'Binary DVD' download link, see instructions for getting that in the next bullet point.
+ ```
+ ansible-playbook playbooks/setup_file_server.yaml
+ ```
+* A DVD ISO file of Red Hat Enterprise Linux (RHEL) 8 for s390x architecture mounted in an accessible folder (e.g. /home//rhel/ for FTP or /var/www/html/rhel for HTTP)
+ * If you do not have RHEL for s390x yet, go to the Red Hat [Customer Portal](https://access.redhat.com/downloads/content) and download it.
+ * Under 'Product Variant' use the drop-down menu to select 'Red Hat Enterprise Linux for IBM z Systems'
+ * Double-check it's for version 8 and for s390x architecture.
+ * Then scroll down to Red Hat Enterprise Linux 8.x Binary DVD and click on the 'Download Now' button, or right click the button and click 'Copy Link'.
+* A folder created to store config files (e.g. /home/user/ocp-config for FTP or /var/www/html/ocp-config for http)
+* A user with sudo and SSH access.
+## Ansible Controller
+* The computer/virtual machine running Ansible, sometimes referred to as localhost.
+* Must be running on with MacOS or Linux operating systems.
+* Network access to your IBM zSystems / LinuxONE hardware
+* All you need to run Ansible is a terminal and a text editor. However, an IDE like [VS Code](https://code.visualstudio.com/download) is highly recommended for an integrated, user-friendly experience with helpful extensions like [YAML](https://marketplace.visualstudio.com/items?itemName=redhat.vscode-yaml).
+* [Python3](https://realpython.com/installing-python/) installed:
+ * MacOS, first install [Homebrew](https://brew.sh/) package manager:
+ ```
+ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
+ ```
+ then install Python3
+ ```
+ brew install python3 #MacOS
+ ```
+ * Fedora:
+ ```
+ sudo dnf install python3 #Fedora
+ ```
+ * Debian:
+ ```
+ sudo apt install python3 #Debian
+ ```
+* Once Python3 is installed, you also need [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) version 2.9 or above:
+```
+pip3 install ansible
+```
+* Once Ansible is installed, you will need a few collections from Ansible Galaxy:
+```
+ansible-galaxy collection install community.general community.crypto ansible.posix community.libvirt
+```
+* If you will be using these playbooks to automate the creation of the LPAR(s) that will act as KVM host(s) for the cluster, you will also need:
+```
+ansible-galaxy collection install ibm.ibm_zhmc
+```
+* If you are using MacOS, you also need to have [Xcode](https://apps.apple.com/us/app/xcode/id497799835?mt=12):
+```
+xcode-select --install
+```
+## Jumphost for NAT network
+* If for KVM network NAT is used, instead of macvtap, a ssh tunnel using a jumphost is required to access the OCP cluster. To configure the ssh tunnel expect is required on the jumphost. Expect will be installed during the setup of the bastion (4_setup_bastion.yaml playbook). In case of missing access to install additional packages, install it manually on the jumphost by executing following command:
+```
+yum install expect
+```
+In addition make sure that python3 is installed on the jumphost otherwise ansible might fail to run the tasks. You can install python3 manually by executing the following command:
+```
+yum install python3
+```
diff --git a/docs/run-the-playbooks-for-hypershift.md b/docs/run-the-playbooks-for-hypershift.md
new file mode 100644
index 00000000..152e77b2
--- /dev/null
+++ b/docs/run-the-playbooks-for-hypershift.md
@@ -0,0 +1,134 @@
+# Run the Playbooks
+## Prerequisites
+* Running OCP Cluster ( Management Cluster )
+* KVM host with root user access or user with sudo privileges
+
+### Network Prerequisites
+* DNS entry to resolve api.${cluster}.${domain} , api-int.${cluster}.${domain} , *apps.${cluster}.${domain} to a load balancer deployed to redirect incoming traffic to the ingresses pod ( Bastion ).
+* If using dynamic IP for agents, make sure you have entries in DHCP Server for macaddresses you are using in installation to map to IPv4 addresses and along with this DHCP server should make your IPs to use nameserver which you have configured.
+## Note:
+* As of now we are supporting only macvtap for hypershift Agent based installation.
+
+## Step-1: Setup Ansible Vault for Management Cluster Credentials
+### Overview
+* Creating an encrypted file for storing Management Cluster Credentials and other passwords.
+### Steps:
+* The ansible-vault create command is used to create the encrypted file.
+* Create an encrypted file in playbooks directory and set the Vault password ( Below command will prompt for setting Vault password).
+```
+ansible-vault create playbooks/secrets.yaml
+```
+
+* Give the credentials of Management Cluster in the encrypted file (created above) in following format.
+```
+kvm_host_password: ''
+bastion_root_pw: ''
+api_server: ':'
+user_name: ''
+password: ''
+```
+
+* You can edit the encrypted file using below command
+```
+ansible-vault edit playbooks/secrets.yaml
+```
+* Make sure you entered Manamegement cluster credenitails properly ,incorrect credentails will cause problem while logging in to the cluster in further steps.
+
+## Step-2: Initial Setup for Hypershift
+* Navigate to the [root folder of the cloned Git repository](https://github.com/IBM/Ansible-OpenShift-Provisioning) in your terminal (`ls` should show [ansible.cfg](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/ansible.cfg)).
+* Update all the variables in Section-16 ( Hypershift ) and Section-3 ( File Server : ip , protocol and iso_mount_dir ) in [all.yaml](https://github.com/veera-damisetti/Ansible-OpenShift-Provisioning/blob/main/inventories/default/group_vars/all.yaml.template) before running the playbooks.
+* First playbook to be run is setup_for_hypershift.yaml which will create inventory file for hypershift and will add ssh key to the kvm host.
+
+* Run this shell command:
+```
+ansible-playbook playbooks/setup_for_hypershift.yaml --ask-vault-pass
+```
+
+## Step-3: Create Hosted Cluster
+* Run each part step-by-step by running one playbook at a time, or all at once using [hypershift.yaml](https://github.com/veera-damisetti/Ansible-OpenShift-Provisioning/blob/main/playbooks/hypershift.yaml).
+* Here's the full list of playbooks to be run in order, full descriptions of each can be found further down the page:
+ * create_hosted_cluster.yaml ([code](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/create_hosted_cluster.yaml))
+ * create_agents_and_wait_for_install_complete.yaml ([code](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/create_agents_and_wait_for_install_complete.yaml))
+
+* Watch Ansible as it completes the installation, correcting errors if they arise.
+* To look at what tasks are running in detail, open the playbook or roles/role-name/tasks/main.yaml
+* Alternatively, to run all the playbooks at once, start the master playbook by running this shell command:
+```
+ansible-playbook playbooks/hypershift.yaml --ask-vault-pass
+```
+
+# Description for Playbooks
+
+## setup_for_hypershift Playbook
+### Overview
+* First-time setup of the Ansible Controller,the machine running Ansible.
+### Outcomes
+* Inventory file for hypershift to be created.
+* SSH key generated for Ansible passwordless authentication.
+* Ansible SSH key is copied to kvm host.
+### Notes
+* You can use an existing SSH key as your Ansible key, or have Ansible create one for you.
+
+## create_hosted_cluster Playbook
+### Overview
+* Creates and configures bastion
+* Creating AgentServiceConfig, HostedControlPlane, InfraEnv Resources, Download Images
+### Outcomes
+* Install prerequisites on kvm_host
+* Create bastion
+* Configure bastion
+* Log in to Management Cluster
+* Creates AgentServiceConfig resource and required configmaps
+* Deploys HostedControlPlane
+* Creates InfraEnv resource and wait till ISO generation
+* Download required Images to kvm_host (initrd.img and kernel.img)
+* Download rootfs.img and configure httpd on bastion.
+
+## create_agents_and_wait_for_install_complete Playbook
+### Overview
+* Boots the Agents
+* Scale and Nodepool and monitor all the resources required.
+### Outcomes
+* Boot Agents
+* Monitor the attachment of agents
+* Approves the agents
+* Scale up the nodepool
+* Monitor agentmachines and machines creation
+* Monitor the worker nodes attachment
+* Configure HAProxy for Hosted workers
+* Monitor the Cluster operators
+* Display Login Credentials for Hosted Cluster
+
+
+
+# Destroy the Hosted Cluser
+
+### Overview
+* Destroy the Hosted Control Plane and other resources created as part of installation
+
+### Procedure
+* Run the playbook [destroy_cluster_hypershift.yaml](https://github.com/veera-damisetti/Ansible-OpenShift-Provisioning/blob/main/playbooks/destroy_cluster_hypershift.yaml) to destroy all the resources created while installation
+```
+ansible-playbook playbooks/destroy_cluster_hypershift.yaml --ask-vault-pass
+```
+
+## destroy_cluster_hypershift Playbook
+### Overview
+* Delete all the resources on Hosted Cluster
+* Destroy the Hosted Control Plane
+### Outcomes
+* Scale in the nodepool to 0
+* Monitors the deletion of workers, agent machines and machines.
+* Deletes the agents
+* Deletes InfraEnv Resource
+* Destroys the Hosted Control Plane
+* Deletes AgentServiceConfig
+* Deletes the images downloaded on kvm host
+* Destroys VMs of Bastion and Agents
+
+## Notes
+#### Overriding OCP Release Image for HCP
+* If you want to use any other image as OCP release image for HCP , you can override it by environment variable.
+```
+export HCP_RELEASE_IMAGE=""
+```
diff --git a/docs/run-the-playbooks.md b/docs/run-the-playbooks.md
new file mode 100644
index 00000000..60d663ef
--- /dev/null
+++ b/docs/run-the-playbooks.md
@@ -0,0 +1,220 @@
+# Step 4: Run the Playbooks
+## Overview
+* Navigate to the [root folder of the cloned Git repository](https://github.com/IBM/Ansible-OpenShift-Provisioning) in your terminal (`ls` should show [ansible.cfg](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/ansible.cfg)).
+* Run this shell command:
+```
+ansible-playbook playbooks/0_setup.yaml
+```
+
+* Run each part step-by-step by running one playbook at a time, or all at once using [playbooks/site.yaml](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/site.yaml).
+* Here's the full list of playbooks to be run in order, full descriptions of each can be found further down the page:
+ * 0_setup.yaml ([code](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/0_setup.yaml))
+ * 1_create_lpar.yaml ([code](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/1_create_lpar.yaml))
+ * 2_create_kvm_host.yaml ([code](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/2_create_kvm_host.yaml))
+ * 3_setup_kvm_host.yaml ([code](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/3_setup_kvm_host.yaml))
+ * 4_create_bastion.yaml ([code](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/4_create_bastion.yaml))
+ * 5_setup_bastion.yaml ([code](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/5_setup_bastion.yaml))
+ * 6_create_nodes.yaml ([code](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/6_create_nodes.yaml))
+ * 7_ocp_verification.yaml ([code](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/7_ocp_verification.yaml))
+* Watch Ansible as it completes the installation, correcting errors if they arise.
+* To look at what tasks are running in detail, open the playbook or roles/role-name/tasks/main.yaml
+* Alternatively, to run all the playbooks at once, start the master playbook by running this shell command:
+```
+ansible-playbook playbooks/site.yaml
+```
+* If the process fails in error, go through the steps in the [troubleshooting](troubleshooting.md) page.
+* At the end of the the last playbook, follow the printed instructions for first-time login to the cluster.
+
+## 0 Setup Playbook
+### Overview
+First-time setup of the Ansible Controller, the machine running Ansible.
+### Outcomes
+* Packages and Ansible Galaxy collections are confirmed to be installed properly.
+* host_vars files are confirmed to match KVM host(s) hostnames.
+* Ansible inventory is templated out and working properly.
+* SSH key generated for Ansible passwordless authentication.
+* SSH agent is setup on the Ansible Controller.
+* Ansible SSH key is copied to the file server.
+### Notes
+* You can use an existing SSH key as your Ansible key, or have Ansible create one for you. It is highly recommended to use one without a passphrase.
+## 1 Create LPAR Playbook
+### Overview
+Creation of one to three Logical Partitions (LPARs), depending on your configuration. Uses the Hardware Management Console (HMC) API, so your system must be in Dynamic Partition Manager (DPM) mode.
+### Outcomes
+* One to three LPARs created.
+* One to two Networking Interface Cards (NICs) attached per LPAR.
+* One to two storage groups attached per LPAR.
+* LPARs are in 'Stopped' state.
+### Notes
+* Recommend opening the HMC via web-browser to watch the LPARs come up.
+## 2 Create KVM Host Playbook
+### Overview
+First-time start-up of Red Hat Enterprise Linux installed natively on the LPAR(s). Uses the Hardware Management Console (HMC) API, so your system must be in Dynamic Partition Manager (DPM) mode. Configuration files are passed to the file server and RHEL is booted and then kickstarted for fully automated setup.
+### Outcomes
+* LPAR(s) started up in 'Active' state.
+* Configuration files (cfg, ins, prm) for the KVM host(s) are on the file server in the provided configs directory.
+### Notes
+* Recommended to open the HMC via web-browser to watch the Operating System Messages for each LPAR as they boot in order to debug any potential problems.
+## 3 Setup KVM Host Playbook
+### Overview
+Configures the RHEL server(s) installed natively on the LPAR(s) to act as virtualization hypervisor(s) to host the virtual machines that make up the eventual cluster.
+### Outcomes
+* Ansible SSH key is copied to all KVM hosts for passwordless authentication.
+* RHEL subscription is auto-attached to all KVM hosts.
+* Software packages specified in group_vars/all.yaml have been installed.
+* Cockpit console enabled for Graphical User Interface via web browser. Go to http://kvm-ip-here:9090 to view it.
+* Libvirt is started and enabled.
+* Logical volume group that was created during kickstart is extended to fill all available space.
+* A macvtap bridge has been created on the host's networking interface.
+### Notes
+* If you're using a pre-existing LPAR, take a look at roles/configure_storage/tasks/main.yaml to make sure that the commands that will be run to extend the logical volume will work. Storage configurations can vary widely. The values there are the defaults from using autopart during kickstart. Also be aware that if lpar.storage_group_2.auto_config is True, the role roles/configure_storage/tasks/main.yaml will be non-idempotent. Meaning, it will fail if you run it twice.
+## 4 Create Bastion Playbook
+### Overview
+Creates the bastion KVM guest on the first KVM host. The bastion hosts essential services for the cluster. If you already have a bastion server, that can be used instead of running this playbook.
+### Outcomes
+* Bastion configs are templated out to the file server.
+* Bastion is booted using virt-install.
+* Bastion is kickstarted for fully automated setup of the operating system.
+### Notes
+* This can be a particularly sticky part of the process.
+* If any of the variables used in the virt-install or kickstart are off, the bastion won't be able to boot.
+* Recommend watching it come up from the first KVM host's cockpit. Go to http://kvm-ip-here:9090 via web-browser to view it. You'll have to sign in, enable administrative access (top right), and then click on the virtual machines tab on the left-hand toolbar.
+## 5 Setup Bastion Playbook
+### Overview
+Configuration of the bastion to host essential infrastructure services for the cluster. Can be first-time setup or use an existing server.
+### Outcomes
+* Ansible SSH key copied to bastion for passwordless authentication.
+* Software packages specified in group_vars/all.yaml have been installed.
+* An OCP-specific SSH key is generated for passing into the install-config (then passed to the nodes).
+* Firewall is configured to permit traffic through the necessary ports.
+* Domain Name Server (DNS) configured to resolve cluster's IP addresses and APIs. Only done if env.bastion.options.dns is true.
+* DNS is checked to make sure all the necessary Fully Qualified Domain Names, including APIs resolve properly. Also ensures outside access is working.
+* High Availability Proxy (HAProxy) load balancer is configured. Only done if env.bastion.options.loadbalancer.on_bastion is true.
+* If the the cluster is to be highly available (meaning spread across more than one LPAR), an OpenVPN server is setup on the bastion to allow for the KVM hosts to communicate between eachother. OpenVPN clients are configured on the KVM hosts.
+* CoreOS roofts is pulled to the bastion if not already there.
+* OCP client and installer are pulled down if not there already.
+* oc, kubectl and openshift-install binaries are installed.
+* OCP install-config is templated and backed up.
+* Manfifests are created.
+* OCP install directory found at /root/ocpinst/ is created and populated with necessary files.
+* Ignition files for the bootstrap, control, and compute nodes are transferred to HTTP-accessible directory for booting nodes.
+### Notes
+* The stickiest part is DNS setup and get_ocp role at the end.
+## 6 Create Nodes Playbook
+### Overview
+OCP cluster's nodes are created and the control plane is bootstrapped.
+### Outcomes
+* CoreOS initramfs and kernel are pulled down.
+* Control nodes are created and bootstrapped.
+* Bootstrap has been created, done its job connecting the control plane, and is then destroyed.
+* Compute nodes are created, as many as is specified in groups_vars/all.yaml.
+* Infra nodes, if defined in group_vars/all.yaml have been created, but are at this point essentially just compute nodes.
+### Notes
+* To watch the bootstrap do its job connecting the control plane: first, SSH to the bastion, then change to root (sudo -i), from there SSH to the bootstrap node as user 'core' (e.g. ssh core@bootstrap-ip). Once you're in the bootstrap run 'journalctl -b -f -u release-image.service -u bootkube.service'. Expect many errors as the control planes come up. You're waiting for the message 'bootkube.service complete'
+* If the cluster is highly available, the bootstrap node will be created on the last (usually third) KVM host in the group. Since the bastion is on the first host, this was done to spread out the load.
+## 7 OCP Verification Playbook
+### Overview
+Final steps of waiting for and verifying the OpenShift cluster to complete its installation.
+### Outcomes
+* Certificate Signing Requests (CSRs) have been approved.
+* All nodes are in ready state.
+* All cluster operators are available.
+* OpenShift installation is verified to be complete.
+* Temporary credentials and URL are printed to allow easy first-time login to the cluster.
+### Notes
+* These steps may take a long time and the tasks are very repetitive because of that.
+* If your cluster has a very large number of compute nodes or insufficient resources, more rounds of approvals and time may be needed for these tasks.
+* If you made it this far, congratulations!
+* To install a new cluster, copy your inventory directory, change the default in the ansible.cfg, change the variables, and start again. With all the customizations to the playbooks you made along the way still intact.
+
+# Additional Playbooks
+
+## Create additional compute nodes (create_compute_node.yaml) and delete compute nodes (delete_compute_node.yaml)
+### Overview
+
+* In case you want to add additional compute nodes in a day-2 operation to your cluster or delete existing compute nodes in your cluster,
+run these playbooks. Currently we support only **env.network_mode** `macvtap` for these two playbooks.
+We recommand to create a new config file for the additional compute node with such parameters:
+
+ ```yaml
+ ---
+ day2_compute_node:
+ vm_name: worker-4
+ vm_hostname: worker-4
+ vm_ip: 172.192.100.101
+ hostname: kvm01
+ host_arch: s390x
+
+ # rhcos_download_url with '/' at the end !
+ rhcos_download_url: "https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.13/4.13.0/"
+ # RHCOS live image filenames
+ rhcos_live_kernel: "rhcos-4.13.0-s390x-live-kernel-s390x"
+ rhcos_live_initrd: "rhcos-4.13.0-s390x-live-initramfs.s390x.img"
+ rhcos_live_rootfs: "rhcos-4.13.0-s390x-live-rootfs.s390x.img"
+ ```
+
+Make sure that the hostname where you want to create the additional compute node is defined in the `inventories/default/hosts` file.
+Now you can execute the `add_compute_node` playbook with this command and parameter:
+
+```shell
+ansible-playbook playbooks/add_compute_node.yaml --extra-vars "@compute-node.yaml"
+````
+
+### Outcomes
+* The defind compute node will be added or deleted, depends which playbook you have executed.
+
+## Master Playbook (site.yaml)
+### Overview
+* Use this playbook to run all required playbooks (0-7) all at once.
+### Outcomes
+* Same as all the above outcomes for all required playbooks.
+* At the end you will have an OpenShift cluster deployed and first-time login credentials.
+
+## Pre-Existing Host Master Playbook (pre-existing_site.yaml)
+### Overview
+* Use this version of the master playbook if you are using a pre-existing LPAR(s) with RHEL already installed.
+### Outcomes
+* Same as all the above outcomes for all playbooks excluding 1 & 2.
+* This will not create LPAR(s) nor boot your RHEL KVM host(s).
+* At the end you will have an OpenShift cluster deployed and first-time login credentials.
+
+## Reinstall Cluster Playbook (reinstall_cluster.yaml)
+### Overview
+* In case the cluster needs to be completely reinstalled, run this playbook. It will refresh the ingitions that expire after 24 hours, teardown the nodes and re-create them, and then verify the installation.
+### Outcomes
+* get_ocp role runs.
+ * Delete the folders /var/www/html/bin and /var/www/html/ignition.
+ * CoreOS roofts is pulled to the bastion.
+ * OCP client and installer are pulled down.
+ * oc, kubectl and openshift-install binaries are installed.
+ * OCP install-config is created from scratch, templated and backed up.
+ * Manfifests are created.
+ * OCP install directory found at /root/ocpinst/ is deleted, re-created and populated with necessary files.
+ * Ignition files for the bootstrap, control, and compute nodes are transferred to HTTP-accessible directory for booting nodes.
+* 6 Create Nodes playbook runs, tearing down and recreating cluster nodes.
+* 7 OCP Verification playbook runs, verifying new deployment.
+
+## Setup File Server Playbook (setup_file_server.yaml)
+## Overview
+* If needed, use this playbook to setup the file server to be compatible with these playbooks.
+* Generally, it must be run after 0_setup.yaml and before 4_create_bastion.yaml, with a few specific cases:
+ * If using the file server to boot KVM hosts, it must be run before 2_create_kvm_host.yaml
+ * If using the KVM host as the file server (NAT-based networking only), it must be run after 3_setup_kvm_host.yaml
+## Outcomes
+* Prompt user for an an active download link (expires after a few hours) of RHEL for IBM zSystems Binary DVD (iso) from Red Hat's Customer Portal ( https://access.redhat.com/downloads/content ) website.
+* The interactive prompt can be avoided by defining it with extra-vars when running ansible-playbook on the command-line, i.e:
+ ```
+ ansible-playbook playbooks/setup_file_server.yaml --extra-vars "iso_link=http://https://access.cdn.redhat.com/content/[...]"
+ ```
+* Install httpd or vsftpd packages, depending on the env.file_server.protocol variable. As well as wget and firewalld. If install of packages fails, try registering with Red Hat, and then re-try installing packages after registration.
+* Download RHEL ISO from user-provided link.
+* Verify the SHA-256 checksum of the downloaded ISO.
+* Create a directory to store configuration files at the path provided by env.file_server.cfgs_dir
+* Create a directory for mounting the downloaded RHEL ISO at the path provided by env.file_server.iso_mount_dir
+* Mount RHEL ISO to path provided by env.file_server.iso_mount_dir
+* Start and enable either http or ftp service, based on the env.file_server.protocol variable.
+* Allow http or ftp traffic through the firewall, based on the env.file_server.protocol variable.
+
+## Test Playbook (test.yaml)
+### Overview
+* Use this playbook for your testing purposes, if needed.
diff --git a/docs/set-variables-group-vars.md b/docs/set-variables-group-vars.md
new file mode 100644
index 00000000..bd57a9ac
--- /dev/null
+++ b/docs/set-variables-group-vars.md
@@ -0,0 +1,242 @@
+# Step 2: Set Variables (group_vars)
+## Overview
+* In a text editor of your choice, open the template of the [environment variables file](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/inventories/default/group_vars/all.yaml.template). Make a copy of it called all.yaml and paste it into the same directory with its template.
+* all.yaml is your master variables file and you will likely reference it many times throughout the process. The default inventory can be found at [inventories/default](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/inventories/default).
+* The variables marked with an `X` are required to be filled in. Many values are pre-filled or are optional. Optional values are commented out; in order to use them, remove the `#` and fill them in.
+* This is the most important step in the process. Take the time to make sure everything here is correct.
+* Note on YAML syntax: Only the lowest value in each hierarchicy needs to be filled in. For example, at the top of the variables file env and z don't need to be filled in, but the cpc_name does. There are X's where input is required to help you with this.
+* Scroll the table to the right to see examples for each variable.
+
+## 1 - Controller
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**env.controller.sudo_pass** | The password to the machine running Ansible (localhost).
This will only be used for two things. To ensure you've installed the
pre-requisite packages if you're on Linux, and to add the login URL
to your /etc/hosts file. | Pas$w0rd!
+
+## 2 - LPAR(s)
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**env.z.high_availability** | Is this cluster spread across three LPARs? If yes, mark True. If not (just in
one LPAR), mark False | True
+**env.z.ip_forward** | This variable specifies if ip forwarding is enabled or not if NAT network is selected. If ip_forwarding is set to 0, the installed OCP cluster will not be able to access external services. This setting will be configured during 3_setup_kvm playbook. If NAT will be configured after 3_setup_kvm playbook, the setup needs to be done manually before bastion is being created, configured or reconfigured by running the 3_setup_kvm playbook with parameter: --tags cfg_ip_forward | 1
+**env.z.lpar1.create** | To have Ansible create an LPAR and install RHEL on it for the KVM
host, mark True. If using a pre-existing LPAR with RHEL already
installed, mark False. | True
+**env.z.lpar1.hostname** | The hostname of the KVM host. | kvm-host-01
+**env.z.lpar1.ip** | The IPv4 address of the KVM host. | 192.168.10.1
+**env.z.lpar1.user** | Username for Linux admin on KVM host 1. Recommended to run as a non-root user with sudo access. | admin
+**env.z.lpar1.pass** | The password for the user that will be created or exists on the KVM host. | ch4ngeMe!
+**env.z.lpar2.create** | To create a second LPAR and install RHEL on it to act as
another KVM host, mark True. If using pre-existing LPAR(s) with RHEL
already installed, mark False. | True
+**env.z.lpar2.hostname** | (Optional) The hostname of the second KVM host. | kvm-host-02
+**env.z.lpar2.ip** | (Optional) The IPv4 address of the second KVM host. | 192.168.10.2
+**env.z.lpar2.user** | Username for Linux admin on KVM host 2. Recommended to run as a non-root user with sudo access. | admin
+**env.z.lpar2.pass** | (Optional) The password for the admin user on the second KVM host. | ch4ngeMe!
+**env.z.lpar3.create** | To create a third LPAR and install RHEL on it to act as
another KVM host, mark True. If using pre-existing LPAR(s) with RHEL
already installed, mark False. | True
+**env.z.lpar3.hostname** | (Optional) The hostname of the third KVM host. | kvm-host-03
+**env.z.lpar3.ip** | (Optional) The IPv4 address of the third KVM host. | 192.168.10.3
+**env.z.lpar3.user** | Username for Linux admin on KVM host 3. Recommended to run as a non-root user with sudo access. | admin
+**env.z.lpar3.pass** | (Optional) The password for the admin user on the third KVM host. | ch4ngeMe!
+
+## 3 - File Server
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**env.file_server.ip** | IPv4 address for the file server that will be used to pass config files and
iso to KVM host LPAR(s) and bastion VM during their first boot. | 192.168.10.201
+**env.file_server.user** | Username to connect to the file server. Must have sudo and SSH access. | user1
+**env.file_server.pass** | Password to connect to the file server as above user. | user1pa$s!
+**env.file_server.protocol** | Protocol used to serve the files, either 'ftp' or 'http' | http
+**env.file_server.iso_mount_dir** | Directory path relative to the HTTP/FTP accessible directory where RHEL ISO is mounted. For example, if the FTP root is at /home/user1
and the ISO is mounted at /home/user1/RHEL/8.7 then this variable would be
RHEL/8.7 - no slash before or after. | RHEL/8.7
+**env.file_server.cfgs_dir** | Directory path relative to to the HTTP/FTP accessible directory where configuration files can be stored. For example, if FTP root is /home/user1
and you would like to store the configs at /home/user1/ocpz-config then this variable would be
ocpz-config. No slash before or after. | ocpz-config
+
+## 4 - Red Hat Info
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**env.redhat.username** | Red Hat username with a valid license or free trial to Red Hat
OpenShift Container Platform (RHOCP), which comes with
necessary licenses for Red Hat Enterprise Linux (RHEL) and
Red Hat CoreOS (RHCOS). | redhat.user
+**env.redhat.password** | Password to Red Hat above user's account. Used to auto-attach
necessary subscriptions to KVM Host, bastion VM, and pull live
images for OpenShift. | rEdHatPa$s!
+**env.redhat.attach_subscription** | True or False. Would you like to subscribe the server with Red Hat? | True
+**env.redhat.pull_secret** | Pull secret for OpenShift, comes from Red Hat's [Hybrid Cloud Console](https://console.redhat.com/openshift/install/ibmz/user-provisioned).
Make sure to enclose in 'single quotes'.
| '{"auths":{"cloud.openshift
.com":{"auth":"b3Blb
...
4yQQ==","email":"redhat.
user@gmail.com"}}}'
+
+## 5 - Bastion
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**env.bastion.create** | True or False. Would you like to create a bastion KVM guest to host essential infrastructure services like DNS,
load balancer, firewall, etc? Can de-select certain services with the env.bastion.options
variables below. | True
+**env.bastion.vm_name** | Name of the bastion VM. Arbitrary value. | bastion
+**env.bastion.resources.disk_size** | How much of the storage pool would you like to allocate to the bastion (in
Gigabytes)? Recommended 30 or more. | 30
+**env.bastion.resources.ram** | How much memory would you like to allocate the bastion (in
megabytes)? Recommended 4096 or more | 4096
+**env.bastion.resources.swap** | How much swap storage would you like to allocate the bastion (in
megabytes)? Recommended 4096 or more. | 4096
+**env.bastion.resources.vcpu** | How many virtual CPUs would you like to allocate to the bastion? Recommended 4 or more. | 4
+**env.bastion.networking.ip** | IPv4 address for the bastion. | 192.168.10.3
+**env.bastion.networking.hostname** | Hostname of the bastion. Will be combined with
env.bastion.networking.base_domain to create a Fully Qualified Domain Name (FQDN). | ocpz-bastion
+**env.bastion.networking.base_
domain** | Base domain that, when combined with the hostname, creates a fully-qualified
domain name (FQDN) for the bastion? | ihost.com
+**env.bastion.networking.
subnetmask** | Subnet of the bastion. | 255.255.255.0
+**env.bastion.networking.gateway** | IPv4 of he bastion's gateway server. | 192.168.10.0
+**env.bastion.networking.name
server1** | IPv4 address of the server that resolves the bastion's hostname. | 192.168.10.200
+**env.bastion.networking.name
server2** | (Optional) A second IPv4 address that resolves the bastion's hostname. | 192.168.10.201
+**env.bastion.networking.forwarder** | What IPv4 address will be used to make external DNS calls for the bastion? Can use 1.1.1.1 or 8.8.8.8 as defaults. | 8.8.8.8
+**env.bastion.networking.interface** | Name of the networking interface on the bastion from Linux's perspective. Most likely enc1. | enc1
+**env.bastion.access.user** | What would you like the admin's username to be on the bastion?
If root, make pass and root_pass vars the same. | admin
+**env.bastion.access.pass** | The password to the bastion's admin user. If using root, make
pass and root_pass vars the same. | cH4ngeM3!
+**env.bastion.access.root_pass** | The root password for the bastion. If using root, make
pass and root_pass vars the same. | R0OtPa$s!
+**env.bastion.options.dns** | Would you like the bastion to host the DNS information for the
cluster? True or False. If false, resolution must come from
elsewhere in your environment. Make sure to add IP addresses for
KVM hosts, bastion, bootstrap, control, compute nodes, AND api,
api-int and *.apps as described [here](https://docs.openshift.com/container-platform/4.8/installing/installing_bare_metal/installing-bare-metal-network-customizations.html) in section "User-provisioned
DNS Requirements" Table 5. If True this will be done for you in
the dns and check_dns roles. | True
+**env.bastion.options.load
balancer.on_bastion** | Would you like the bastion to host the load balancer (HAProxy) for the cluster?
True or False (boolean).
If false, this service must be provided elsewhere in your environment, and public and
private IP of the load balancer must be
provided in the following two variables. | True
+**env.bastion.options.load
balancer.public_ip** | (Only required if env.bastion.options.loadbalancer.on_bastion is True). The public IPv4
address for your environment's loadbalancer. api, apps, *.apps must use this. | 192.168.10.50
+**env.bastion.options.load
balancer.private_ip** | (Only required if env.bastion.options.loadbalancer.on_bastion is True). The private IPv4 address
for your environment's loadbalancer. api-int must use this. | 10.24.17.12
+
+## 6 - Cluster Networking
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**env.cluster.networking.metadata_name** | Name to describe the cluster as a whole, can be anything if DNS will be hosted on the bastion. If
DNS is not on the bastion, must match your DNS configuration. Will be combined with the base_domain
and hostnames to create Fully Qualified Domain Names (FQDN). | ocpz
+**env.cluster.networking.base_domain** | The site name, where is the cluster being hosted? This will be combined with the metadata_name
and hostnames to create FQDNs. | ihost.com
+**env.cluster.networking.nameserver1** | IPv4 address that the cluster get its hostname resolution from. If env.bastion.options.dns
is True, this should be the IP address of the bastion. | 192.168.10.200
+**env.cluster.networking.nameserver2** | (Optional) A second IPv4 address will the cluster get its hostname resolution from? If env.bastion.options.dns
is True, this should be left commented out. | 192.168.10.201
+**env.cluster.networking.forwarder** | What IPv4 address will be used to make external DNS calls for the cluster? Can use 1.1.1.1 or 8.8.8.8 as defaults. | 8.8.8.8
+
+## 7 - Bootstrap Node
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**env.cluster.nodes.bootstrap.disk_size** | How much disk space do you want to allocate to the bootstrap node (in Gigabytes)? Bootstrap node
is temporary and will be brought down automatically when its job completes. 120 or more recommended. | 120
+**env.cluster.nodes.bootstrap.ram** | How much memory would you like to allocate to the temporary bootstrap node (in
megabytes)? Recommended 16384 or more. | 16384
+**env.cluster.nodes.bootstrap.vcpu** | How many virtual CPUs would you like to allocate to the temporary bootstrap node?
Recommended 4 or more. | 4
+**env.cluster.nodes.bootstrap.vm_name** | Name of the temporary bootstrap node VM. Arbitrary value. | bootstrap
+**env.cluster.nodes.bootstrap.ip** | IPv4 address of the temporary bootstrap node. | 192.168.10.4
+**env.cluster.nodes.bootstrap.hostname** | Hostname of the temporary boostrap node. If DNS is hosted on the bastion, this can be anything.
If DNS is hosted elsewhere, this must match DNS definition. This will be combined with the
metadata_name and base_domain to create a Fully Qualififed Domain Name (FQDN). | bootstrap-ocpz
+
+## 8 - Control Nodes
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**env.cluster.nodes.control.disk_size** | How much disk space do you want to allocate to each control node (in Gigabytes)? 120 or more recommended. | 120
+**env.cluster.nodes.control.ram** | How much memory would you like to allocate to the each control
node (in megabytes)? Recommended 16384 or more. | 16384
+**env.cluster.nodes.control.vcpu** | How many virtual CPUs would you like to allocate to each control node? Recommended 4 or more. | 4
+**env.cluster.nodes.control.vm_name** | Name of the control node VMs. Arbitrary values. Usually no more or less than 3 are used. Must match
the total number of IP addresses and hostnames for control nodes. Use provided list format. | control-1
control-2
control-3
+**env.cluster.nodes.control.ip** | IPv4 address of the control nodes. Use provided
list formatting. | 192.168.10.5
192.168.10.6
192.168.10.7
+**env.cluster.nodes.control.hostname** | Hostnames for control nodes. Must match the total number of IP addresses for control nodes
(usually 3). If DNS is hosted on the bastion, this can be anything. If DNS is hosted elsewhere,
this must match DNS definition. This will be combined with the metadata_name and
base_domain to create a Fully Qualififed Domain Name (FQDN). | control-01
control-02
control-03
+
+## 9 - Compute Nodes
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**env.cluster.nodes.compute.disk_size** | How much disk space do you want to allocate to each compute
node (in Gigabytes)? 120 or more recommended. | 120
+**env.cluster.nodes.compute.ram** | How much memory would you like to allocate to the each compute
node (in megabytes)? Recommended 16384 or more. | 16384
+**env.cluster.nodes.compute.vcpu** | How many virtual CPUs would you like to allocate to each compute node? Recommended 2 or more. | 2
+**env.cluster.nodes.compute.vm_name** | Name of the compute node VMs. Arbitrary values. This list can be expanded to any
number of nodes, minimum 2. Must match the total number of IP
addresses and hostnames for compute nodes. Use provided list format. | compute-1
compute-2
+**env.cluster.nodes.compute.ip** | IPv4 address of the compute nodes. Must match the total number of VM names and
hostnames for compute nodes. Use provided list formatting. | 192.168.10.8
192.168.10.9
+**env.cluster.nodes.compute.hostname** | Hostnames for compute nodes. Must match the total number of IP addresses and
VM names for compute nodes. If DNS is hosted on the bastion, this can be anything.
If DNS is hosted elsewhere, this must match DNS definition. This will be combined with the
metadata_name and base_domain to create a Fully Qualififed Domain Name (FQDN). | compute-01
compute-02
+
+## 10 - Infra Nodes
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**env.cluster.nodes.infra.disk_size** | (Optional) Set up compute nodes that are made for infrastructure workloads (ingress,
monitoring, logging)? How much disk space do you want to allocate to each infra node (in Gigabytes)?
120 or more recommended. | 120
+**env.cluster.nodes.infra.ram** | (Optional) How much memory would you like to allocate to the each infra node (in
megabytes)? Recommended 16384 or more. | 16384
+**env.cluster.nodes.infra.vcpu** | (Optional) How many virtual CPUs would you like to allocate to each infra node?
Recommended 2 or more. | 2
+**env.cluster.nodes.infra.vm_name** | (Optional) Name of additional infra node VMs. Arbitrary values. This list can be
expanded to any number of nodes, minimum 2. Must match the total
number of IP addresses and hostnames for infra nodes. Use provided list format. | infra-1
infra-2
+**env.cluster.nodes.infra.ip** | (Optional) IPv4 address of the infra nodes. This list can be expanded to any number of nodes,
minimum 2. Use provided list formatting. | 192.168.10.8
192.168.10.9
+**env.cluster.nodes.infra.hostname** | (Optional) Hostnames for infra nodes. Must match the total number of IP addresses for infra nodes.
If DNS is hosted on the bastion, this can be anything. If DNS is hosted elsewhere, this must match
DNS definition. This will be combined with the metadata_name and base_domain
to create a Fully Qualififed Domain Name (FQDN). | infra-01
infra-02
+
+## 11 - (Optional) Packages
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**env.pkgs.galaxy** | A list of Ansible Galaxy collections that will be installed during the setup playbook. The
collections listed are required. Feel free to add more as needed, just make sure to follow the same list format. | community.general
+**env.pkgs.controller** | A list of packages that will be installed on the machine running Ansible during the setup
playbook. Feel free to add more as needed, just make sure to follow the same list format. | openssh
+**env.pkgs.kvm** | A list of packages that will be installed on the KVM Host during the setup_kvm_host playbook.
Feel free to add more as needed, just make sure to follow the same list format. | qemu-kvm
+**env.pkgs.bastion** | A list of packages that will be installed on the bastion during the setup_bastion playbook.
Feel free to add more as needed, just make sure to follow the same list format. | haproxy
+
+## 12 - OpenShift Settings
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**env.install_config.api_version** | Kubernetes API version for the cluster. These install_config variables will be passed to the OCP
install_config file. This file is templated in the get_ocp role during the setup_bastion playbook.
To make more fine-tuned adjustments to the install_config, you can find it at
roles/get_ocp/templates/install-config.yaml.j2 | v1
+**env.install_config.compute.architecture** | Computing architecture for the compute nodes. Must be s390x for clusters on IBM zSystems. | s390x
+**env.install_config.compute.hyperthreading** | Enable or disable hyperthreading on compute nodes. Recommended enabled. | Enabled
+**env.install_config.control.architecture** | Computing architecture for the control nodes. Must be s390x for clusters on IBM zSystems. | s390x
+**env.install_config.control.hyperthreading** | Enable or disable hyperthreading on control nodes. Recommended enabled. | Enabled
+**env.install_config.cluster_network.cidr** | IPv4 block in Internal cluster networking in Classless Inter-Domain
Routing (CIDR) notation. Recommended to keep as is. | 10.128.0.0/14
+**env.install_config.cluster_network.host_prefix** | The subnet prefix length to assign to each individual node. For example, if
hostPrefix is set to 23 then each node is assigned a /23 subnet out of the given cidr. A hostPrefix
value of 23 provides 510 (2^(32 - 23) - 2) pod IP addresses. | 23
+**env.install_config.cluster_network.type** | The cluster network provider Container Network Interface (CNI) plug-in to install.
Either OpenShiftSDN or OVNKubernetes (default). | OVNKubernetes
+**env.install_config.service_network** | The IP address block for services. The default value is 172.30.0.0/16. The OpenShift SDN
and OVN-Kubernetes network providers support only a single IP address block for the service
network. An array with an IP address block in CIDR format. | 172.30.0.0/16
+**env.install_config.fips** | True or False (boolean) for whether or not to use the United States' Federal Information Processing
Standards (FIPS). Not yet certified on IBM zSystems. Enclosed in 'single quotes'. | 'false'
+
+## 13 - (Optional) Proxy
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**env.proxy.http** | (Optional) A proxy URL to use for creating HTTP connections outside the cluster. Will be
used in the install-config and applied to other Ansible hosts unless set otherwise in
no_proxy below. Must follow this pattern: http://username:pswd>@ip:port | http://ocp-admin:Pa$sw0rd@9.72.10.1:80
+**env.proxy.https** | (Optional) A proxy URL to use for creating HTTPS connections outside the cluster. Will be
used in the install-config and applied to other Ansible hosts unless set otherwise in
no_proxy below. Must follow this pattern: https://username:pswd@ip:port | https://ocp-admin:Pa$sw0rd@9.72.10.1:80
+**env.proxy.no** | (Optional) A comma-separated list (no spaces) of destination domain names, IP
addresses, or other network CIDRs to exclude from proxying. When using a
proxy, all necessary IPs and domains for your cluster will be added automatically. See
roles/get_ocp/templates/install-config.yaml.j2 for more details on the template.
Preface a domain with . to match subdomains only. For example, .y.com matches
x.y.com, but not y.com. Use * to bypass the proxy for all listed destinations. | example.com,192.168.10.1
+
+## 14 - (Optional) Misc
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**env.language** | What language would you like Red Hat Enterprise Linux to use? In UTF-8 language code.
Available languages and their corresponding codes can be found [here](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html-single/international_language_support_guide/index), in the "Locale" column of Table 2.1. | en_US.UTF-8
+**env.timezone** | Which timezone would you like Red Hat Enterprise Linux to use? A list of available timezone
options can be found [here](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). | America/New_York
+**env.keyboard** | Which keyboard layout would you like Red Hat Enterprise Linux to use? | us
+**env.ansible_key_name** | (Optional) Name of the SSH key that Ansible will use to connect to hosts. | ansible-ocpz
+**env.ocp_key_name** | Comment to describe the SSH key used for OCP. Arbitrary value. | OCPZ-01 key
+**env.bridge_name** | (Optional) Name of the macvtap bridge that will be created on the KVM host or in case of NAT the name of the NAT network defenition (usually it is 'default'). If NAT is being used and a jumphost is needed, the parameters network_mode, jumphost.name, jumphost.user and jumphost.pass must be specified, too. In case of default (NAT) network verify that the configured IP ranges does not interfere with the IPs defined for the controle and compute nodes. Modify the default network (dhcp range setting) to prevent issues with VMs using dhcp and OCP nodes having fixed IPs.| macvtap-net
+**env.network_mode** | (Optional) In case the network mode will be NAT and the installation will be executed from remote (e.g. your laptop), a jumphost needs to be defined to let the installation access the bastion host. If macvtap for networking is being used this variable should be empty. | NAT
+**env.jumphost.name** | (Optional) If env.network.mode is set to 'NAT' the name of the jumphost (e.g. the name of KVM host if used as jumphost) should be specified. | kvm-host-01
+**env.jumphost.ip** | (Optional) The ip of the jumphost. | 192.168.10.1
+**env.jumphost.user** | (Optional) The user name to login to the jumphost. | admin
+**env.jumphost.pass** | (Optional) The password for user to login to the jumphost. | ch4ngeMe!
+**env.jumphost.path_to_keypair** | (Optional) The absolute path to the public key file on the jumphost to be copied to the bastion. | /home/admin/.ssh/id_rsa.pub
+
+## 15 - OCP and RHCOS (CoreOS)
+
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**ocp_download_url** | Link to the mirror for the OpenShift client and installer from Red Hat. | https://mirror.openshift.com
/pub/openshift-v4/multi
/clients/ocp/4.13.1/s390x/
+**ocp_client_tgz** | OpenShift client filename (tar.gz). | openshift-client-linux.tar.gz
+**ocp_install_tgz** | OpenShift installer filename (tar.gz). | openshift-install-linux.tar.gz
+**rhcos_download_url** | Link to the CoreOS files to be used for the bootstrap, control and compute nodes.
Feel free to change to a different version. | https://mirror.openshift.com
/pub/openshift-v4/s390x
/dependencies/rhcos
/4.12/4.12.3/
+**rhcos_os_variant** | CoreOS base OS. Use the OS string as defined in 'osinfo-query os -f short-id' | rhel8.6
+**rhcos_live_kernel** | CoreOS kernel filename to be used for the bootstrap, control and compute nodes. | rhcos-4.12.3-s390x-live-kernel-s390x
+**rhcos_live_initrd** | CoreOS initramfs to be used for the bootstrap, control and compute nodes. | rhcos-4.12.3-s390x-live-initramfs.s390x.img
+**rhcos_live_rootfs** | CoreOS rootfs to be used for the bootstrap, control and compute nodes. | rhcos-4.12.3-s390x-live-rootfs.s390x.img
+
+## 16 - Hypershift ( Optional )
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**hypershift.kvm_host** | IPv4 address of KVM host for hypershift
(kvm host where you want to run all oc commands and create VMs)| 192.168.10.1
+**hypershift.kvm_host_user** | User for KVM host | root
+**hypershift.bastion_hypershift** | IPv4 address for bastion of Hosted Cluster | 192.168.10.1
+**hypershift.bastion_hypershift_user** | User for bastion of Hosted Cluster | root
+**hypershift.create_bastion** | true or false - create bastion with the provided IP (hypershift.bastion_hypershift) | true
+**hypershift.networking_device** | The network interface card from Linux's perspective.
Usually enc and then a number that comes from the dev_num of the network adapter. | enc1100
+**hypershift.gateway** | IPv4 Address for gateway from where the kvm_host and bastion are reachable
This for adding ip route from kvm_host to bastion through gateway | 192.168.10.1
+**hypershift.bastion_parms.interface** | Interface for bastion | enc1
+**hypershift.bastion_parms.hostname** | Hostname for bastion | bastion
+**hypershift.bastion_parms.base_domain** | DNS base domain for the bastion. | ihost.com
+**hypershift.bastion_parms.os_variant** | rhel os variant for creating bastion | 8.7
+**hypershift.bastion_parms.nameserver** | Nameserver for creating bastion | 192.168.10.1
+**hypershift.bastion_parms.gateway** | Gateway IP for creating bastion
This is how it well be used ip=::: | 192.168.10.1
+**hypershift.bastion_parms.subnet_mask** | IPv4 address of subnetmask | 255.255.255.0
+**hypershift.mgmt_cluster_nameserver** | IP Address of Nameserver of Management Cluster | 192.168.10.1
+**hypershift.oc_url** | URL for OC Client that you want to install on the host | https://...
..openshift-client-linux-4.13.0-ec.4.tar.gz
+**hypershift.hcp.clusters_namespace** | Namespace for Creating Hosted Control Plane | clusters
+**hypershift.hcp.hosted_cluster_name** | Name for the Hosted Cluster | hosted0
+**hypershift.hcp.basedomain** | Base domain for Hosted Cluster | example.com
+**hypershift.hcp.pull_secret_file** | Path for the pull secret
No need to change this as we are copying the pullsecret to same file
/root/ansible_workdir/auth_file | /root/ansible_workdir/auth_file
+**hypershift.hcp.ocp_release** | OCP Release version for Hosted Control Cluster and Nodepool | 4.13.0-rc.4-multi
+**hypershift.hcp.machine_cidr** | Machines CIDR for Hosted Cluster | 192.168.122.0/24
+**hypershift.hcp.arch** | Architecture for InfraEnv and AgentServiceConfig" | s390x
+**hypershift.hcp.pull_secret** | Pull Secret of Management Cluster
Make sure to enclose pull_secret in 'single quotes' | '{"auths":{"cloud.openshift
.com":{"auth":"b3Blb
...
4yQQ==","email":"redhat.
user@gmail.com"}}}'
+**hypershift.mce.version** | version for multicluster-engine Operator | 2.4
+**hypershift.mce.instance_name** | name of the MultiClusterEngine instance | engine
+**hypershift.mce.delete** | true or false - deletes mce and related resources while running deletion playbook | true
+**hypershift.asc.url_for_ocp_release_file** | Add URL for OCP release.txt File | https://...
..../release.txt
+**hypershift.asc.db_volume_size** | DatabaseStorage Volume Size | 10Gi
+**hypershift.asc.fs_volume_size** | FileSystem Storage Volume Size | 10Gi
+**hypershift.asc.ocp_version** | OCP Version for AgentServiceConfig | 4.13.0-ec.4
+**hypershift.asc.iso_url** | Give URL for ISO image | https://...
...s390x-live.s390x.iso
+**hypershift.asc.root_fs_url** | Give URL for rootfs image | https://...
... live-rootfs.s390x.img
+**hypershift.asc.mce_namespace** | Namespace where your Multicluster Engine Operator is installed.
Recommended Namespace for MCE is 'multicluster-engine'.
Change this only if MCE is installed in other namespace. | multicluster-engine
+**hypershift.agents_parms.static_ip_parms.static_ip** | true or false - use static IPs for agents using NMState | true
+**hypershift.agents_parms.static_ip_parms.ip** | List of IP addresses for agents | 192.168.10.1
+**hypershift.agents_parms.static_ip_parms.interface** | Interface for agents for configuring NMStateConfig | eth0
+**hypershift.agents_parms.agents_count** | Number of agents for the hosted cluster
The same number of compute nodes will be attached to Hosted Cotrol Plane | 2
+**hypershift.agents_parms.agent_mac_addr** | List of macaddresses for the agents.
Configure in DHCP if you are using dynamic IPs for Agents. | - 52:54:00:ba:d3:f7
+**hypershift.agents_parms.disk_size** | Disk size for agents | 100G
+**hypershift.agents_parms.ram** | RAM for agents | 16384
+**hypershift.agents_parms.vcpus** | vCPUs for agents | 4
+**hypershift.agents_parms.nameserver** | Nameserver to be used for agents | 192.168.10.1
+
+## 17 - (Optional) Create compute node in a day-2 operation
+
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**day2_compute_node.vm_name** | Name of the compute node VM. | compute-4
+**day2_compute_node.vm_hostname** | Hostnames for compute node. | compute-4
+**day2_compute_node.vm_vm_ip** | IPv4 address of the compute node. | 192.168.10.99
+**day2_compute_node.hostname** | The hostname of the KVM host | kvm-host-01
+**day2_compute_node.host_arch** | KVM host architecture. | s390x
diff --git a/docs/set-variables-host-vars.md b/docs/set-variables-host-vars.md
new file mode 100644
index 00000000..1c22f434
--- /dev/null
+++ b/docs/set-variables-host-vars.md
@@ -0,0 +1,84 @@
+# Step 3: Set Variables (host_vars)
+
+## Overview
+* Similar to the group_vars file, the host_vars files for each LPAR (KVM host) must be filled in.
+* For each KVM host to be acted upon with Ansible, you must have a corresponding host_vars file named `.yaml` (i.e. ocpz1.yaml, ocpz2.yaml, ocpz3.yaml), so you must copy and rename the templates found in the [host_vars folder](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/inventories/default/host_vars) accordingly.
+* The variables marked with an `X` are required to be filled in. Many values are pre-filled or are optional.
+* Optional values are commented out; in order to use them, remove the `#` and fill them in.
+* Many of the variables in these host_vars files are only required if you are NOT using pre-existing LPARs with RHEL installed. See the `Important Note` below this first section for more details.
+* This is the most important step in the process. Take the time to make sure everything here is correct.
+* Note on YAML syntax: Only the lowest value in each hierarchicy needs to be filled in. For example, at the top of the variables file networking does not need to be filled in, but the hostname does. There are X's where input is required to help you with this.
+* Scroll the table to the right to see examples for each variable.
+
+## 1 - KVM Host
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**networking.hostname** | The hostname of the LPAR with RHEL installed natively (the KVM host). | kvm-host-01
+**networking.ip** | The IPv4 address of the LPAR with RHEL installed natively (the KVM host). | 192.168.10.2
+**networking.subnetmask** | The subnet that the LPAR resides in within your network. | 255.255.255.0
+**networking.gateway** | The IPv4 address of the gateway to the network where the KVM host resides. | 192.168.10.0
+**networking.nameserver1** | The IPv4 address from which the KVM host gets its hostname resolved. | 192.168.10.200
+**networking.nameserver2** | (Optional) A second IPv4 address from which the KVM host can get its hostname
resolved. Used for high availability. | 192.168.10.201
+**networking.device1** | The network interface card from Linux's perspective. Usually enc and then a number that comes
from the dev_num of the network adapter. | enc100
+**networking.device2** | (Optional) Another Linux network interface card. Usually enc and then a number that comes
from the dev_num of the second network adapter. | enc1
+**storage.pool_path** | The absolute path to a directory on your KVM host that will be used to store qcow2
images for the cluster and other installation artifacts. A sub-directory will be created here that matches your clsuter's
metadata name that will act as the cluster's libvirt storage pool directory. Note: all directories present in this path will be made executable for the
'qemu' group, as is required. | /home/kvm_admin/VirtualMachines
+
+## Important Note
+* You can skip the rest of the variables on this page IF you are using existing LPAR(s) that has RHEL already installed.
+ * Since this is how most production deployments on-prem are done on IBM zSystems, these variables have been marked as optional.
+ * With pre-existing LPARs with RHEL installed, you can also skip [1_create_lpar.yaml](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/1_create_lpar.yaml) and [2_create_kvm_host.yaml](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/2_create_kvm_host.yaml) playbooks. Make sure to still do [0_setup.yaml](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/0_setup.yaml) first though, then skip to [3_setup_kvm_host.yaml](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/3_setup_kvm_host.yaml)
+
+## 2 - (Optional) CPC & HMC
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**cpc_name** | The name of the IBM zSystems / LinuxONE mainframe that you are creating a Red Hat OpenShift Container
Platform cluster on. Can be found under the "Systems Management" tab of the Hardware Management
Console (HMC). | SYS1
+**hmc.host** | The IPv4 address of the HMC you will be connecting to in order to create a Logical Partition (LPAR)
on which will act as the Kernel-based Virtual Machine (KVM) host aftering installing and setting up
Red Hat Enterprise Linux (RHEL). | 192.168.10.1
+**hmc.user** | The username that the HMC API call will use to connect to the HMC. Must have access to create
LPARs, attach storage groups and networking cards. | hmc-user
+**hmc.pass** | The password that the HMC API call will use to connect to the HMC. Must have access to create
LPARs, attach storage groups and networking cards. | hmcPas$w0rd!
+
+## 3 - (Optional) LPAR
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**lpar.name** | The name of the Logical Partition (LPAR) that you would like to create/target for the creation of
your cluster. This LPAR will act as the KVM host, with RHEL installed natively. | OCPKVM1
+**lpar.description** | A short description of what this LPAR will be used for, will only be displayed in the HMC next to
the LPAR name for identification purposes. | KVM host LPAR for RHOCP cluster.
+**lpar.access.user** | The username that will be created in RHEL when it is installed on the LPAR (the KVM host). | kvm-admin
+**lpar.access.pass** | The password for the user that will be created in RHEL when it is installed on the LPAR (the KVM host). | ch4ngeMe!
+**lpar.root_pass** | The root password for RHEL installed on the LPAR (the KVM host). | $ecureP4ass!
+
+## 4 - (Optional) IFL & Memory
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**lpar.ifl.count** | Number of Integrated Facilities for Linux (IFL) processors will be assigned to this LPAR.
6 or more recommended. | 6
+**lpar.ifl.initial memory** | Initial memory allocation for LPAR to have at start-up (in megabytes). | 55000
+**lpar.ifl.max_memory** | The most amount of memory this LPAR can be using at any one time (in megabytes). | 99000
+**lpar.ifl.initial_weight** | For LPAR load balancing purposes, the processing weight this LPAR will have at start-up (1-999). | 100
+**lpar.ifl.min_weight** | For LPAR load balancing purposes, the minimum weight that this LPAR can have at any one time (1-999). | 50
+**lpar.ifl.max_weight** | For LPAR load balancing purposes, the maximum weight that this LPAR can have at any one time (1-999). | 500
+
+## 5 - (Optional) Networking
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**lpar.networking.subnet_cidr** | The same value as the above variable but in Classless Inter-
Domain Routing (CIDR) notation. | 23
+**lpar.networking.nic.card1.name** | The logical name of the Network Interface Card (NIC) within the HMC.
An arbitrary value
that is human-readable that points to the NIC. | SYS-NIC-01
+**lpar.networking.nic.card1.adapter** | The physical adapter name reference to the logical adapter for the LPAR. | 10Gb-A
+**lpar.networking.nic.card1.port** | The port number for the NIC. | 0
+**lpar.networking.nic.card1.dev_num** | The logical device number for the NIC. In hex format. | 0x0100
+**lpar.networking.nic.card2.name** | (Optional) The logical name of a second Network Interface Card
(NIC) within the HMC. An arbitrary value that is human-readable
that points to the NIC. | SYS-NIC-02
+**lpar.networking.nic.card2.adapter** | (Optional) The physical adapter name of a second NIC. | 10Gb-B
+**lpar.networking.nic.card2.port** | (Optional) The port number for a second NIC. | 1
+**lpar.networking.nic.card2.dev_num** | (Optional) The logical device number for a second NIC. In hex format. | 0x0001
+
+## 6 - (Optional) Storage
+**Variable Name** | **Description** | **Example**
+:--- | :--- | :---
+**lpar.storage_group_1.name** | The name of the storage group that will be attached to the LPAR. | OCP-storage-01
+**lpar.storage_group_1.type** | Storage type. FCP is the only tested type as of now. | fcp
+**lpar.storage_group_1.storage_wwpn** | World-wide port numbers for storage group. Use provided list formatting. | 500708680235c3f0
500708680235c3f1
500708680235c3f2
500708680235c3f3
+**lpar.storage_group_1.dev_num** | The logical device number of the Host Bus Adapter (HBA) for the storage group. | C001
+**lpar.storage_group_1.lun_name** | The Logical Unit Numbers (LUN) that points to a specific virtual disk behind the WWPN. | 4200569309ahhd240000000000000c001
+**lpar.storage_group_2.name** | (Optional) The name of the storage group that will be attached to the LPAR. | OCP-storage-01
+**lpar.storage_group_2.auto_config** | (Optional) Attempt to automate the addition of the disk space to the existing logical
volume. Check out roles/configure_storage/tasks/main.yaml to ensure this will work
properly with your setup. | True
+**lpar.storage_group_2.type** | (Optional) Storage type. FCP is the only tested type as of now. | fcp
+**lpar.storage_group_2_.storage_wwpn** | (Optional) World-wide port numbers for storage group. Use provided list formatting. | 500708680235c3f0
500708680235c3f1
500708680235c3f2
500708680235c3f3
+**lpar.storage_group_2_.dev_num** | (Optional) The logical device number of the Host Bus Adapter (HBA) for the storage group. | C001
+**lpar.storage_group_2_.lun_name** | (Optional) he Logical Unit Numbers (LUN) that points to a specific virtual disk
behind the WWPN. | 4200569309ahhd240000000000000c001
\ No newline at end of file
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
new file mode 100644
index 00000000..804e9bb7
--- /dev/null
+++ b/docs/troubleshooting.md
@@ -0,0 +1,30 @@
+# Troubleshooting
+If you encounter errors while running the main playbook, there are a few things you can do:
+
+* Double check your variables.
+* Inspect the part that failed by opening the playbook or role at roles/role-name/tasks/main.yaml
+* Google the specific error message.
+* Re-run the role with the verbosity '-v' option to get more debugging information (more v's give more info). For example:
+```
+ansible-playbook playbooks/setup_bastion.yaml -vvv
+```
+* Use tags
+ * To be more selective with what parts of a playbook are run, use tags.
+ * To determine what part of a playbook you would like to run, open the playbook you'd like to run and find the roles parameter. Each [role](https://github.com/IBM/Ansible-OpenShift-Provisioning/tree/main/roles) has a corresponding tag.
+ * There are also occasionally tags for sections of a playbook or within the role themselves.
+ * This is especially helpful for troubleshooting. You can add in tags under the `name` parameter for individual tasks you'd like to run.
+ * Here's an example of using a tag:
+```
+ansible-playbook playbooks/setup_kvm_host.yaml --tags "section_2,section_3"
+```
+ * This runs only the parts of the [setup_kvm_host playbook](https://github.com/IBM/Ansible-OpenShift-Provisioning/blob/main/playbooks/3_setup_kvm_host.yaml) marked with tags section_2 and section_3. To use more than one tag, they must be quoted (single or double) and comma-separated (with or without spaces between).
+* E-mail Jacob Emery at jacob.emery@ibm.com
+* If it's a problem with an OpenShift verification step:
+ * Open the cockpit to monitor the VMs.
+ * In a web browser, go to https://kvm-host-IP-here:9090
+ * Sign-in with your credentials set in the variables file
+ * Enable administrative access in the top right.
+ * Open the 'Virtual Machines' tab from the left side toolbar.
+ * Sometimes it just takes a while, especially if it's lacking resources. Give it some time and then re-reun the playbook/role with tags.
+ * If that doesn't work, SSH into the bastion as root ("ssh root@\") and then run, "export KUBECONFIG=/root/ocpinst/auth/kubeconfig" and then "oc whoami" and make sure it ouputs "system:admin". Then run the shell command from the role you would like to check on manually: i.e. 'oc get nodes', 'oc get co', etc.
+ * Open the .openshift_install.log file for information on what happened and try to debug the issue.
\ No newline at end of file
diff --git a/dwnload-image-files.yml b/dwnload-image-files.yml
deleted file mode 100644
index c2ecaa80..00000000
--- a/dwnload-image-files.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-
-- hosts: kvm_host
- become: true
- tasks:
-
- - name: download RHCOS initramfs
- get_url:
- url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img
- dest: /var/lib/libvirt/images
- mode: 0755
-
- - name: download RHCOS kernel
- get_url:
- url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x
- dest: /var/lib/libvirt/images
- mode: 0755
-
- - name: download RHCOS rootfs
- get_url:
- url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img
- dest: /var/lib/libvirt/images
- mode: 0755
-
- - name: download QCOW2 image
- get_url:
- url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz
- dest: /var/lib/libvirt/images
- mode: 0755
-
-
-
diff --git a/files/rhel-guest-image.txt b/files/rhel-guest-image.txt
deleted file mode 100644
index 242bf2b8..00000000
--- a/files/rhel-guest-image.txt
+++ /dev/null
@@ -1 +0,0 @@
-# This file is a placeholder to identify the image file without uploading to github
diff --git a/files/shell_scripts/create_bootstrap.sh b/files/shell_scripts/create_bootstrap.sh
deleted file mode 100644
index 597d0c37..00000000
--- a/files/shell_scripts/create_bootstrap.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!bin/bash
-
-##create
-qemu-img create -f qcow2 -F qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G
-
-##boot
-virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1
-coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://:8080/bin/rhcos-rootfs.img
-coreos.inst.ignition_url=http://:8080/ignition/bootstrap.ign ip=::::::none
-nameserver=’ --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/
-images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive
-if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-
-blk,serial=ignition,drive=ignition"
-
-
diff --git a/files/shell_scripts/create_http.sh b/files/shell_scripts/create_http.sh
deleted file mode 100644
index 4978ee22..00000000
--- a/files/shell_scripts/create_http.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!bin/bash
-
-##install HTTP
-dnf install -y httpd
-
-##make folders
-mkdir /var/www/html/bin /var/www/html/bootstrap
-
-##get mirror 1
-wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-kernel-s390x
--O /var/www/html/bin/rhcos-kernel
-
-##get mirror 2
-wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-
-initramfs.s390x.img -O /var/www/html/bin/rhcos-initramfs.img
-
-##get mirror 3
-wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-
-rootfs.s390x.img -O rhcos-rootfs.img
-
-##enable http
-systemctl enable --now httpd; systemctl status httpd
diff --git a/files/shell_scripts/dl_rhel_iso.sh b/files/shell_scripts/dl_rhel_iso.sh
deleted file mode 100644
index 1377f15b..00000000
--- a/files/shell_scripts/dl_rhel_iso.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!bin/bash
-wget /URL/rhel-8.3-s390x-dvd.iso
-rhel-8.3-s390x-dvd.iso rhel83.iso
diff --git a/files/shell_scripts/get_ocp_installer.sh b/files/shell_scripts/get_ocp_installer.sh
deleted file mode 100644
index a19a0abb..00000000
--- a/files/shell_scripts/get_ocp_installer.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!bin/bash
-
-##get and extract mirror 1
-wget https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/latest/openshift-client-linux.tar.gz
-tar -xvzf openshift-client-linux.tar.gz
-
-##get and extract mirror 2
-wget https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/latest/openshift-install-linux.tar.gz
-tar -xvzf openshift-client-linux.tar.gz
-
-##Make executable
-chmod +x kubectl oc openshift_install
-
-##move installed to bin folder
-mv kubectl oc openshift_install /usr/local/bin/
diff --git a/files/shell_scripts/macvtap-net.sh b/files/shell_scripts/macvtap-net.sh
deleted file mode 100644
index 1d51d565..00000000
--- a/files/shell_scripts/macvtap-net.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!bin/bash
-virsh net-create macvtap.xml
-virsh net-start --network macvtap-net
-virsh net-autostart --network macvtap-net
-virsh net-list --all
diff --git a/files/shell_scripts/prep_kvm_guests.sh b/files/shell_scripts/prep_kvm_guests.sh
deleted file mode 100644
index 50b36294..00000000
--- a/files/shell_scripts/prep_kvm_guests.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!bin/bash
-wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz
-dnf install -y gzip
-gunzip rhcos-qemu.s390x.qcow2.gz /var/lib/libvirt/images/
diff --git a/files/shell_scripts/start_libvirtd.sh b/files/shell_scripts/start_libvirtd.sh
deleted file mode 100644
index 332d1b33..00000000
--- a/files/shell_scripts/start_libvirtd.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-systemctl enable --now libvirtd
-systemctl status libvirtd.service
-systemctl status libvirtd
diff --git a/files/shell_scripts/start_rhel_install.sh b/files/shell_scripts/start_rhel_install.sh
deleted file mode 100644
index 89a907a8..00000000
--- a/files/shell_scripts/start_rhel_install.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!bin/bash
-virt# virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso
---accelerate --import --network network=macvtap-net --extra-args "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none
-nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive
-if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --
-noautoconsole
diff --git a/files/shell_scripts/verify_bootstrap.sh b/files/shell_scripts/verify_bootstrap.sh
deleted file mode 100644
index fcb0845d..00000000
--- a/files/shell_scripts/verify_bootstrap.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!bin/bash
-virsh console bootstrap
-journalctl -u bootkube.service
diff --git a/files/sudoers_zcts b/files/sudoers_zcts
deleted file mode 100644
index 054d9c55..00000000
--- a/files/sudoers_zcts
+++ /dev/null
@@ -1,2 +0,0 @@
-zcts ALL=(ALL) NOPASSWD: ALL
-
diff --git a/files/test.txt b/files/test.txt
deleted file mode 100644
index 16b14f5d..00000000
--- a/files/test.txt
+++ /dev/null
@@ -1 +0,0 @@
-test file
diff --git a/host_vars b/host_vars
deleted file mode 100644
index 97570a25..00000000
--- a/host_vars
+++ /dev/null
@@ -1,12 +0,0 @@
-##placeholder until ready to simplify playbooks
-
-##list of needed variables:
-
-##in bastion main.yaml:
-##baseDomain
-##cluster_name
-##pullsecret
-##ssh-public-key
-##installation_directory
-
-
diff --git a/inventories/.gitignore b/inventories/.gitignore
new file mode 100644
index 00000000..e918a3bf
--- /dev/null
+++ b/inventories/.gitignore
@@ -0,0 +1,3 @@
+/*
+!.gitignore
+!default
diff --git a/inventories/default/.gitignore b/inventories/default/.gitignore
new file mode 100644
index 00000000..cd1a35c6
--- /dev/null
+++ b/inventories/default/.gitignore
@@ -0,0 +1,4 @@
+/*
+!group_vars
+!host_vars
+!.gitignore
\ No newline at end of file
diff --git a/inventories/default/group_vars/.gitignore b/inventories/default/group_vars/.gitignore
new file mode 100644
index 00000000..bb409d93
--- /dev/null
+++ b/inventories/default/group_vars/.gitignore
@@ -0,0 +1,3 @@
+/*
+!.gitignore
+!all.yaml.template
\ No newline at end of file
diff --git a/inventories/default/group_vars/all.yaml.template b/inventories/default/group_vars/all.yaml.template
new file mode 100644
index 00000000..7f7d88d2
--- /dev/null
+++ b/inventories/default/group_vars/all.yaml.template
@@ -0,0 +1,298 @@
+# Copy this file to 'all.yaml' in the same folder and add your required values there !
+#
+# For a comprehensive description of each variable, please see documentation here:
+# https://ibm.github.io/Ansible-OpenShift-Provisioning/set-variables-group-vars/
+
+# Section 1 - Ansible Controller
+env:
+ controller:
+ sudo_pass: #X
+
+# Section 2 - LPAR(s)
+ z:
+ high_availability: False
+ ip_forward: "{{ 1 if network_mode | upper == 'NAT' else 0 }}"
+ lpar1:
+ create: True
+ hostname: #X
+ ip: #X
+ user: #X
+ pass: #X
+ lpar2:
+ create: False
+# hostname:
+# ip:
+# user:
+# pass:
+ lpar3:
+ create: False
+# hostname:
+# ip:
+# user:
+# pass:
+
+# Section 3 - File Server
+ file_server:
+ ip: #X
+ user: #X
+ pass: #X
+ protocol: #X
+ iso_mount_dir: #X
+ cfgs_dir: #X
+
+# Section 4 - Red Hat
+ redhat:
+ username: #X
+ password: #X
+ attach_subscription: true
+ # Make sure to enclose pull_secret in 'single quotes'
+ pull_secret: #'X'
+
+# Section 5 - Bastion
+ bastion:
+ create: True
+ vm_name: #X
+ resources:
+ disk_size: 30
+ ram: 4096
+ swap: 4096
+ vcpu: 4
+ networking:
+ ip: #X
+ hostname: #X
+ base_domain: #X
+ subnetmask: #X
+ gateway: #X
+ nameserver1: #X
+# nameserver2:
+ forwarder: 1.1.1.1
+ interface: #X
+ access:
+ user: #X
+ pass: #X
+ root_pass: #X
+ options:
+ dns: True
+ loadbalancer:
+ on_bastion: True
+# public_ip:
+# private_ip:
+
+# Section 6 - Cluster Networking
+ cluster:
+ networking:
+ metadata_name: #X
+ base_domain: #X
+ subnetmask: #X
+ gateway: #X
+ nameserver1: #X
+# nameserver2:
+ forwarder: 1.1.1.1
+
+# Section 7 - Bootstrap Node
+ nodes:
+ bootstrap:
+ disk_size: 120
+ ram: 16384
+ vcpu: 4
+ vm_name: #X
+ ip: #X
+ hostname: #X
+
+# Section 8 - Control Nodes
+ control:
+ disk_size: 120
+ ram: 16384
+ vcpu: 4
+ vm_name:
+ - #X
+ - #X
+ - #X
+ ip:
+ - #X
+ - #X
+ - #X
+ hostname:
+ - #X
+ - #X
+ - #X
+
+# Section 9 - Compute Nodes
+ compute:
+ disk_size: 120
+ ram: 16384
+ vcpu: 4
+ vm_name:
+ - #X
+ - #X
+ ip:
+ - #X
+ - #X
+ hostname:
+ - #X
+ - #X
+
+# Section 10 - Infra Nodes
+# infra:
+# disk_size: 120
+# ram: 16384
+# vcpu: 4
+# vm_name:
+# - infra-1
+# - infra-2
+# ip:
+# - 1.1.1.1
+# - 1.1.1.2
+# hostname:
+# - infra1
+# - infra2
+
+#######################################################################################
+# All variables below this point do not need to be changed for a default installation #
+#######################################################################################
+
+# Section 11 - (Optional) Packages
+ pkgs:
+ galaxy: [ ibm.ibm_zhmc, community.general, community.crypto, ansible.posix, community.libvirt ]
+ controller: [ openssh, expect, sshuttle ]
+ kvm: [ libguestfs, libvirt-client, libvirt-daemon-config-network, libvirt-daemon-kvm, cockpit-machines, libvirt-devel, virt-top, qemu-kvm, python3-lxml, cockpit, lvm2 ]
+ bastion: [ haproxy, httpd, bind, bind-utils, expect, firewalld, mod_ssl, python3-policycoreutils, rsync ]
+ hypershift: [ make, jq, git, virt-install ]
+
+# Section 12 - OpenShift Settings
+ install_config:
+ api_version: v1
+ compute:
+ architecture: s390x
+ hyperthreading: Enabled
+ control:
+ architecture: s390x
+ hyperthreading: Enabled
+ cluster_network:
+ cidr: 10.128.0.0/14
+ host_prefix: 23
+ type: OVNKubernetes
+ service_network: 172.30.0.0/16
+ fips: 'false'
+
+# Section 13 - (Optional) Proxy
+# proxy:
+# http:
+# https:
+# no:
+
+# Section 14 - (Optional) Misc
+ language: en_US.UTF-8
+ timezone: America/New_York
+ keyboard: us
+ root_access: false
+ ansible_key_name: ansible-ocpz
+ ocp_ssh_key_comment: OpenShift key
+ bridge_name: macvtap
+ network_mode:
+
+#jumphost if network mode is NAT
+ jumphost:
+ name:
+ ip:
+ user:
+ pass:
+ path_to_keypair:
+
+# Section 15 - OCP and RHCOS (CoreOS)
+
+# ocp_download_url with '/' at the end !
+ocp_download_url: "https://mirror.openshift.com/pub/openshift-v4/multi/clients/ocp/4.13.1/s390x/"
+# ocp client and installer filenames
+ocp_client_tgz: "openshift-client-linux.tar.gz"
+ocp_install_tgz: "openshift-install-linux.tar.gz"
+
+# rhcos_download_url with '/' at the end !
+rhcos_download_url: "https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.12/4.12.3/"
+
+# For rhcos_os_variant use the OS string as defined in 'osinfo-query os -f short-id'
+rhcos_os_variant: rhel8.6
+
+# RHCOS live image filenames
+rhcos_live_kernel: "rhcos-4.12.3-s390x-live-kernel-s390x"
+rhcos_live_initrd: "rhcos-4.12.3-s390x-live-initramfs.s390x.img"
+rhcos_live_rootfs: "rhcos-4.12.3-s390x-live-rootfs.s390x.img"
+
+# Section 16 - Hypershift ( Optional )
+
+hypershift:
+ kvm_host:
+ kvm_host_user:
+ bastion_hypershift:
+ bastion_hypershift_user:
+
+ create_bastion: true
+ networking_device: enc1100
+ gateway:
+
+ bastion_parms:
+ interface:
+ hostname:
+ base_domain:
+ os_variant:
+ nameserver:
+ gateway:
+ subnet_mask:
+
+ mgmt_cluster_nameserver:
+ oc_url:
+
+ #Hosted Control Plane Parameters
+
+ hcp:
+ clusters_namespace:
+ hosted_cluster_name:
+ basedomain:
+ pull_secret_file: /root/ansible_workdir/auth_file
+ ocp_release:
+ machine_cidr: 192.168.122.0/24
+ arch:
+ # Make sure to enclose pull_secret in 'single quotes'
+ pull_secret:
+
+ # MultiClusterEngine Parameters
+ mce:
+ version:
+ instance_name: engine
+ delete: false
+
+ # AgentServiceConfig Parameters
+
+ asc:
+ url_for_ocp_release_file:
+ db_volume_size: "10Gi"
+ fs_volume_size: "10Gi"
+ ocp_version:
+ iso_url:
+ root_fs_url:
+ mce_namespace: multicluster-engine # This is the Recommended Namespace for Multicluster Engine operator
+
+ agents_parms:
+ static_ip_parms:
+ static_ip: true
+ ip: # Required only if static_ip is true
+ #-
+ #-
+ interface: eth0
+ agents_count:
+ # If you want to use specific mac addresses, provide them here
+ agent_mac_addr:
+ #-
+ disk_size: 100G
+ ram: 16384
+ vcpus: 4
+ nameserver:
+
+# Section 17 - (Optional) Create additional compute node in a day-2 operation
+
+day2_compute_node:
+ vm_name:
+ vm_hostname:
+ vm_ip:
+ hostname:
+ host_arch:
diff --git a/inventories/default/host_vars/.gitignore b/inventories/default/host_vars/.gitignore
new file mode 100644
index 00000000..627571ad
--- /dev/null
+++ b/inventories/default/host_vars/.gitignore
@@ -0,0 +1,3 @@
+/*
+!.gitignore
+!KVMhostname*-here.yaml.template
\ No newline at end of file
diff --git a/inventories/default/host_vars/KVMhostname1-here.yaml.template b/inventories/default/host_vars/KVMhostname1-here.yaml.template
new file mode 100644
index 00000000..c4aa3d1c
--- /dev/null
+++ b/inventories/default/host_vars/KVMhostname1-here.yaml.template
@@ -0,0 +1,84 @@
+# Section 1 - KVM Host
+networking:
+ hostname: #X
+ ip: #X
+ subnetmask: #X
+ gateway: #X
+ nameserver1: #X
+# nameserver2:
+ device1: #X
+# device2:
+
+storage:
+ pool_path: #X
+
+##############################################################
+# Variables below this point only need to be filled out if #
+# env.z.lpar1.create is True. Meaning, if the LPARs you will #
+# be using as KVM host(s) already exist and have RHEL #
+# installed, the variables below are not required. #
+##############################################################
+
+# Section 2 - CPC & HMC
+cpc_name: #X
+hmc:
+ host: #X
+ auth:
+ user: #X
+ pass: #X
+
+# Section 3 - LPAR
+lpar:
+ name: #X
+ description: #X
+ access:
+ user: #X
+ pass: #X
+ root_pass: #X
+
+# Section 4 - IFL & Memory
+ ifl:
+ count: #X
+ initial_memory: #X
+ max_memory: #X
+ initial_weight: #X
+ min_weight: #X
+ max_weight: #X
+
+# Section 5 - Networking
+ networking:
+ subnet_cidr: #X
+ nic:
+ card1:
+ name: #X
+ adapter: #X
+ port: #X
+ dev_num: #X
+# card2:
+# name:
+# adapter:
+# port:
+# dev_num:
+
+# Section 6 - Storage
+ storage_group_1:
+ name: #X
+ type: fcp
+ storage_wwpn:
+ - #X
+ - #X
+ - #X
+ - #X
+ dev_num: #X
+ lun_name: #X
+# storage_group_2:
+# auto_config: True
+# name:
+# type: fcp
+# storage_wwpn:
+# -
+# -
+# -
+# -
+# dev_num:
+# lun_name:
\ No newline at end of file
diff --git a/inventories/default/host_vars/KVMhostname2-here.yaml.template b/inventories/default/host_vars/KVMhostname2-here.yaml.template
new file mode 100644
index 00000000..89b801f5
--- /dev/null
+++ b/inventories/default/host_vars/KVMhostname2-here.yaml.template
@@ -0,0 +1,84 @@
+# Section 1 - KVM Host
+networking:
+ hostname: #X
+ ip: #X
+ subnetmask: #X
+ gateway: #X
+ nameserver1: #X
+# nameserver2:
+ device1: #X
+# device2:
+
+storage:
+ pool_path: #X
+
+##############################################################
+# Variables below this point only need to be filled out if #
+# env.z.lpar2.create is True. Meaning, if the LPARs you will #
+# be using as KVM host(s) already exist and have RHEL #
+# installed, the variables below are not required. #
+##############################################################
+
+# Section 2 - CPC & HMC
+cpc_name: #X
+hmc:
+ host: #X
+ auth:
+ user: #X
+ pass: #X
+
+# Section 3 - LPAR
+lpar:
+ name: #X
+ description: #X
+ access:
+ user: #X
+ pass: #X
+ root_pass: #X
+
+# Section 4 - IFL & Memory
+ ifl:
+ count: #X
+ initial_memory: #X
+ max_memory: #X
+ initial_weight: #X
+ min_weight: #X
+ max_weight: #X
+
+# Section 5 - Networking
+ networking:
+ subnet_cidr: #X
+ nic:
+ card1:
+ name: #X
+ adapter: #X
+ port: #X
+ dev_num: #X
+# card2:
+# name:
+# adapter:
+# port:
+# dev_num:
+
+# Section 6 - Storage
+ storage_group_1:
+ name: #X
+ type: fcp
+ storage_wwpn:
+ - #X
+ - #X
+ - #X
+ - #X
+ dev_num: #X
+ lun_name: #X
+# storage_group_2:
+# auto_config: True
+# name:
+# type: fcp
+# storage_wwpn:
+# -
+# -
+# -
+# -
+# dev_num:
+# lun_name:
\ No newline at end of file
diff --git a/inventories/default/host_vars/KVMhostname3-here.yaml.template b/inventories/default/host_vars/KVMhostname3-here.yaml.template
new file mode 100644
index 00000000..997b7f61
--- /dev/null
+++ b/inventories/default/host_vars/KVMhostname3-here.yaml.template
@@ -0,0 +1,84 @@
+# Section 1 - KVM Host
+networking:
+ hostname: #X
+ ip: #X
+ subnetmask: #X
+ gateway: #X
+ nameserver1: #X
+# nameserver2:
+ device1: #X
+# device2:
+
+storage:
+ pool_path: #X
+
+##############################################################
+# Variables below this point only need to be filled out if #
+# env.z.lpar3.create is True. Meaning, if the LPARs you will #
+# be using as KVM host(s) already exist and have RHEL #
+# installed, the variables below are not required. #
+##############################################################
+
+# Section 2 - CPC & HMC
+cpc_name: #X
+hmc:
+ host: #X
+ auth:
+ user: #X
+ pass: #X
+
+# Section 3 - LPAR
+lpar:
+ name: #X
+ description: #X
+ access:
+ user: #X
+ pass: #X
+ root_pass: #X
+
+# Section 4 - IFL & Memory
+ ifl:
+ count: #X
+ initial_memory: #X
+ max_memory: #X
+ initial_weight: #X
+ min_weight: #X
+ max_weight: #X
+
+# Section 5 - Networking
+ networking:
+ subnet_cidr: #X
+ nic:
+ card1:
+ name: #X
+ adapter: #X
+ port: #X
+ dev_num: #X
+# card2:
+# name:
+# adapter:
+# port:
+# dev_num:
+
+# Section 6 - Storage
+ storage_group_1:
+ name: #X
+ type: fcp
+ storage_wwpn:
+ - #X
+ - #X
+ - #X
+ - #X
+ dev_num: #X
+ lun_name: #X
+# storage_group_2:
+# auto_config: True
+# name:
+# type: fcp
+# storage_wwpn:
+# -
+# -
+# -
+# -
+# dev_num:
+# lun_name:
\ No newline at end of file
diff --git a/inventory b/inventory
deleted file mode 100644
index 521b9c44..00000000
--- a/inventory
+++ /dev/null
@@ -1,17 +0,0 @@
-[kvm_host]
-9.60.87.132
-
-[bootstrap_server]
-9.60.87.133
-
-[bastion_server]
-9.60.87.139
-
-[control_nodes]
-9.60.87.138
-9.60.87.137
-9.60.87.136
-
-[worker_nodes]
-9.60.87.135
-9.60.87.134
diff --git a/list_vms.yml b/list_vms.yml
deleted file mode 100644
index 951f9714..00000000
--- a/list_vms.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-
-- hosts: kvm_host
-
- tasks:
- - name: list all VMs
- community.libvirt.virt:
- command: list_vms
- register: running_vms
-
- - name: Print running vms
- ansible.builtin.debug:
- var: running_vms
- verbosity: 0
-
-
diff --git a/main.yml b/main.yml
deleted file mode 100644
index 555dd273..00000000
--- a/main.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-# tasks file for kvm-vm creation for OCP install
-- name: Check if operating system disk already exists
- stat:
- path: /var/lib/libvirt/images/{{ kvm_vm_os_disk_name }}.qcow2
- get_checksum: no
- get_md5: no
- get_mime: no
- get_attributes: no
- register: os_disk_file
\ No newline at end of file
diff --git a/mkdocs.yaml b/mkdocs.yaml
new file mode 100644
index 00000000..91cb7892
--- /dev/null
+++ b/mkdocs.yaml
@@ -0,0 +1,40 @@
+site_name: Ansible-Automated OpenShift Provisioning on KVM on IBM zSystems / LinuxONE
+repo_url: https://github.com/IBM/Ansible-OpenShift-Provisioning
+site_url: https://ibm.github.io/Ansible-OpenShift-Provisioning/
+edit_uri: edit/main/docs
+
+nav:
+ - Home: 'index.md'
+ - Read Me:
+ - Before You Begin: 'before-you-begin.md'
+ - Prerequisites: 'prerequisites.md'
+ - Installation Instructions:
+ - 1 Get Info: 'get-info.md'
+ - 2 Set Variables (group_vars): 'set-variables-group-vars.md'
+ - 3 Set Variables (host_vars): 'set-variables-host-vars.md'
+ - 4 Run the Playbooks: 'run-the-playbooks.md'
+ - Run the Playbooks (HyperShift): 'run-the-playbooks-for-hypershift.md'
+ - Misc:
+ - Troubleshooting: 'troubleshooting.md'
+ - Acknowledgements: 'acknowledgements.md'
+
+theme:
+ name: readthedocs
+ palette:
+ scheme: default
+ primary: black
+ font:
+ text: IBM Plex Sans
+ code: IBM Plex Mono
+ favicon: images/ansible-logo.png
+ logo: images/ansible-logo.png
+
+copyright: Copyright © 2022 IBM zSystems Washington Systems Center
+
+markdown_extensions:
+ - fenced_code
+ - codehilite
+ - toc:
+ permalink: "#"
+ - attr_list
+
diff --git a/playbooks/0_setup.yaml b/playbooks/0_setup.yaml
new file mode 100644
index 00000000..ea718146
--- /dev/null
+++ b/playbooks/0_setup.yaml
@@ -0,0 +1,68 @@
+---
+
+- hosts: localhost
+ tags: localhost
+ connection: local
+ become: false
+ gather_facts: true
+ roles:
+ - set_inventory
+
+ post_tasks:
+ - name: Ensure Ansible Galaxy collections have been installed.
+ tags: galaxy
+ command: ansible-galaxy collection install {{ item }}
+ loop: "{{ env.pkgs.galaxy }}"
+
+ - name: Find ibm_zhmc collection install location, if automated LPAR creation is to be used.
+ tags: galaxy
+ shell: ansible-galaxy collection list ibm.ibm_zhmc | grep -i ansible | cut -c 3-
+ register: zhmc_path
+ when: env.z.lpar1.create == True or env.z.lpar2.create == True or env.z.lpar3.create == True
+
+ - name: Ensure zhmcclient requirements are installed.
+ tags: galaxy
+ pip:
+ requirements: "{{ zhmc_path.stdout }}/ibm/ibm_zhmc/requirements.txt"
+ executable: pip3
+ extra_args: --upgrade
+ when: env.z.lpar1.create == True or env.z.lpar2.create == True or env.z.lpar3.create == True
+
+ - name: Check to make sure that the KVM host has a corresponding inventory host_vars file named with matching hostname and .yaml extension.
+ tags: lpar_check
+ stat:
+ path: "{{ inventory_dir }}/host_vars/{{ env.z.lpar1.hostname }}.yaml"
+ when: env.z.lpar1.hostname is defined
+ register: lpar_host_vars
+ failed_when: lpar_host_vars.stat.exists == False
+
+ - name: Check to make sure the second KVM hosts have a corresponding inventory host_vars file named with matching hostname and .yaml extension, if defined.
+ tags: lpar_check
+ stat:
+ path: "{{ inventory_dir }}/host_vars/{{ env.z.lpar2.hostname }}.yaml"
+ when: env.z.lpar2.hostname is defined
+ register: lpar_host_vars
+ failed_when: lpar_host_vars.stat.exists == False
+
+ - name: Check to make sure the third KVM hosts have a corresponding inventory host_vars file named with matching hostname and .yaml extension, if defined.
+ tags: lpar_check
+ stat:
+ path: "{{ inventory_dir }}/host_vars/{{ env.z.lpar3.hostname }}.yaml"
+ when: env.z.lpar3.hostname is defined
+ register: lpar_host_vars
+ failed_when: lpar_host_vars.stat.exists == False
+
+- hosts: localhost
+ connection: local
+ become: false
+ gather_facts: true
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ vars:
+ packages: "{{ env.pkgs.controller }}"
+ ssh_target: [ "{{ env.file_server.ip }}", "{{ env.file_server.user }}", "{{ env.file_server.pass }}", "{{ path_to_key_pair }}" ]
+ roles:
+ - install_packages
+ - ssh_key_gen
+ - ssh_agent
+ - ssh_copy_id #to file server
diff --git a/playbooks/1_create_lpar.yaml b/playbooks/1_create_lpar.yaml
new file mode 100644
index 00000000..ffe8df41
--- /dev/null
+++ b/playbooks/1_create_lpar.yaml
@@ -0,0 +1,55 @@
+---
+#Create logical partition
+- hosts: localhost
+ tags: create_lpar, create, kvm_host_1
+ connection: local
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ tasks:
+ - name: Install required python packages.
+ ansible.builtin.pip:
+ name: "{{ item }}"
+ loop:
+ - requests
+ - zhmcclient
+ - cryptography
+ - packaging
+ - PyYAML
+
+ - name: Include vars for first LPAR if it is to be created.
+ include_vars:
+ file: "{{ inventory_dir }}/host_vars/{{ env.z.lpar1.hostname }}.yaml"
+ when: env.z.lpar1.create == True
+
+ - name: Create an LPAR for the first KVM host.
+ import_role:
+ name: create_lpar
+ when: env.z.lpar1.create == True
+
+- hosts: localhost
+ tags: create_lpar, create, kvm_host_2
+ connection: local
+ tasks:
+ - name: Include vars for second LPAR if it is to be created.
+ include_vars:
+ file: "{{ inventory_dir }}/host_vars/{{ env.z.lpar2.hostname }}.yaml"
+ when: env.z.lpar2.create == True
+
+ - name: Create an LPAR for the second KVM host.
+ import_role:
+ name: create_lpar
+ when: env.z.lpar2.create == True
+
+- hosts: localhost
+ tags: create_lpar, create, kvm_host_3
+ connection: local
+ tasks:
+ - name: Include vars for third LPAR if it is to be created.
+ include_vars:
+ file: "{{ inventory_dir }}/host_vars/{{ env.z.lpar3.hostname }}.yaml"
+ when: env.z.lpar3.create == True
+
+ - name: Create an LPAR for a third KVM host.
+ import_role:
+ name: create_lpar
+ when: env.z.lpar3.create == True
\ No newline at end of file
diff --git a/playbooks/2_create_kvm_host.yaml b/playbooks/2_create_kvm_host.yaml
new file mode 100644
index 00000000..5f7dc704
--- /dev/null
+++ b/playbooks/2_create_kvm_host.yaml
@@ -0,0 +1,97 @@
+---
+#Template and update RHEL configuration files for KVM host to boot from
+- hosts: file_server
+ tags: update_cfgs, update, kvm_host_1
+ become: false
+ gather_facts: false
+ tasks:
+ - name: Include vars for the KVM host.
+ include_vars:
+ file: "{{ inventory_dir }}/host_vars/{{ env.z.lpar1.hostname }}.yaml"
+ when: env.z.lpar1.create == True
+
+ - name: Update configuration files for the KVM host.
+ import_role:
+ name: update_cfgs
+ when: env.z.lpar1.create == True
+
+#Template and update RHEL configuration files for KVM host to boot from
+- hosts: file_server
+ tags: update_cfgs, update, kvm_host_2
+ become: false
+ gather_facts: false
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ tasks:
+ - name: Include vars for a second KVM host if it is to be created.
+ include_vars:
+ file: "{{ inventory_dir }}/host_vars/{{ env.z.lpar2.hostname }}.yaml"
+ when: env.z.lpar2.create == True
+
+ - name: Update configuration files for a second KVM host, if cluster is to be highly available
+ import_role:
+ name: update_cfgs
+ when: env.z.lpar2.create == True
+
+#Template and update RHEL configuration files for KVM host to boot from
+- hosts: file_server
+ tags: update_cfgs, update, kvm_host_3
+ become: false
+ gather_facts: false
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ tasks:
+ - name: Include vars for third KVM host if it is to be created.
+ include_vars:
+ file: "{{ inventory_dir }}/host_vars/{{ env.z.lpar3.hostname }}.yaml"
+ when: env.z.lpar3.create == True
+
+ - name: Update configuration files for a third KVM host, if cluster is to be highly available
+ import_role:
+ name: update_cfgs
+ when: env.z.lpar3.create == True
+
+- name: Boot RHEL on LPAR 1
+ hosts: localhost
+ tags: create_kvm_host, boot, kvm_host_1
+ connection: local
+ tasks:
+ - name: Include vars for first LPAR if it is to be created.
+ include_vars:
+ file: "{{ inventory_dir }}/host_vars/{{ env.z.lpar1.hostname }}.yaml"
+ when: env.z.lpar1.create == True
+
+ - name: Boot and install RHEL for KVM host
+ import_role:
+ name: create_kvm_host
+ when: env.z.lpar1.create == True
+
+- name: Boot RHEL on LPAR 2
+ hosts: localhost
+ tags: create_kvm_host, boot, kvm_host_2
+ connection: local
+ tasks:
+ - name: Include vars for second KVM host.
+ include_vars:
+ file: "{{ inventory_dir }}/host_vars/{{ env.z.lpar2.hostname }}.yaml"
+ when: env.z.lpar2.create == True
+
+ - name: Boot and install RHEL for a second KVM host.
+ import_role:
+ name: create_kvm_host
+ when: env.z.lpar2.create == True
+
+- name: Boot RHEL on LPAR 3
+ hosts: localhost
+ tags: create_kvm_host, boot, kvm_host_3
+ connection: local
+ tasks:
+ - name: Include vars for third KVM host.
+ include_vars:
+ file: "{{ inventory_dir }}/host_vars/{{ env.z.lpar3.hostname }}.yaml"
+ when: env.z.lpar3.create == True
+
+ - name: Boot and install RHEL for a third KVM host.
+ import_role:
+ name: create_kvm_host
+ when: env.z.lpar3.create == True
diff --git a/playbooks/3_setup_kvm_host.yaml b/playbooks/3_setup_kvm_host.yaml
new file mode 100644
index 00000000..2b83b81a
--- /dev/null
+++ b/playbooks/3_setup_kvm_host.yaml
@@ -0,0 +1,190 @@
+---
+
+- name: Copy SSH key to access KVM host 1
+ hosts: localhost
+ tags: ssh, ssh_copy_id, section_1, kvm_host_1
+ connection: local
+ become: false
+ gather_facts: true
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ vars:
+ ssh_target: ["{{ env.z.lpar1.ip }}","{{ env.z.lpar1.user }}","{{ env.z.lpar1.pass }}","{{ path_to_key_pair }}"]
+ tasks:
+ - name: Include vars for the KVM host.
+ include_vars:
+ file: "{{ inventory_dir }}/host_vars/{{ env.z.lpar1.hostname }}.yaml"
+
+ - name: Copy SSH key to KVM host.
+ import_role:
+ name: ssh_copy_id
+
+- name: Copy SSH key to access KVM host 2
+ hosts: localhost
+ tags: ssh, ssh_copy_id, section_1, kvm_host_2
+ connection: local
+ become: false
+ gather_facts: false
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ vars:
+ ssh_target: ["{{ env.z.lpar2.ip }}","{{ env.z.lpar2.user }}","{{ env.z.lpar2.pass }}","{{ path_to_key_pair }}"]
+ tasks:
+ - name: Include vars for second KVM host.
+ include_vars:
+ file: "{{ inventory_dir }}/host_vars/{{ env.z.lpar2.hostname }}.yaml"
+ when: env.z.lpar2.hostname is defined
+
+ - name: copy SSH key to second KVM host, if cluster is to be highly available.
+ tags: ssh_copy_id, ssh
+ import_role:
+ name: ssh_copy_id
+ when: env.z.lpar2.hostname is defined
+
+- name: Copy SSH key to access KVM host 3
+ hosts: localhost
+ tags: ssh, ssh_copy_id, section_1, kvm_host_3
+ connection: local
+ become: false
+ gather_facts: false
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ vars:
+ ssh_target: ["{{ env.z.lpar3.ip }}","{{ env.z.lpar3.user }}","{{ env.z.lpar3.pass }}","{{ path_to_key_pair }}"]
+ tasks:
+ - name: Include vars for third KVM host.
+ include_vars:
+ file: "{{ inventory_dir }}/host_vars/{{ env.z.lpar3.hostname }}.yaml"
+ when: env.z.lpar3.hostname is defined
+
+ - name: copy SSH key to third KVM host, if cluster is to be highly available.
+ tags: ssh_copy_id, ssh
+ import_role:
+ name: ssh_copy_id
+ when: env.z.lpar3.hostname is defined
+
+- name: Prepare KVM host(s)
+ hosts: kvm_host
+ tags: setup, section_2
+ gather_facts: true
+ become: true
+ vars:
+ packages: "{{ env.pkgs.kvm }}"
+ roles:
+ - { role: attach_subscription, when: env.redhat.attach_subscription }
+ - install_packages
+ - httpd
+ post_tasks:
+ - name: Add ports to firewall
+ tags: firewall-libvirt, libvirt
+ ansible.posix.firewalld:
+ port: 80/tcp
+ permanent: yes
+ state: enabled
+
+ - name: Start and enable libvirt
+ tags: firewall-libvirt, libvirt
+ ansible.builtin.service:
+ name: libvirtd
+ enabled: yes
+ state: started
+
+ - name: Permit traffic in libvirt zone
+ tags: firewall-libvirt, libvirt
+ ansible.posix.firewalld:
+ service: http
+ permanent: yes
+ state: enabled
+ zone: libvirt
+ immediate: true
+
+ - name: Set libvirt management to libvirt group instead of root.
+ tags: libvirt
+ ansible.builtin.lineinfile:
+ path: /etc/libvirt/qemu.conf
+ regexp: '#group = "root"'
+ line: 'group = "libvirt"'
+ backup: true
+
+ - name: Get user home directory
+ tags: libvirt
+ shell: >
+ getent passwd {{ ansible_user }} | awk -F: '{ print $6 }'
+ changed_when: false
+ register: user_home
+
+ - name: Check if directory {{ user_home.stdout }}/.config/libvirt exists
+ tags: libvirt
+ ansible.builtin.stat:
+ path: "{{ user_home.stdout }}/.config/libvirt"
+ register: home_config_libvirt
+
+ - name: Create directory {{ user_home.stdout }}/.config/libvirt
+ tags: libvirt
+ file:
+ path: "{{ user_home.stdout }}/.config/libvirt"
+ state: directory
+ when: home_config_libvirt.stat.exists == false
+
+ - name: Create file for user's custom libvirt configurations.
+ tags: libvirt
+ ansible.builtin.file:
+ path: "{{ user_home.stdout }}/.config/libvirt/libvirt.conf"
+ state: touch
+ owner: "{{ ansible_user }}"
+ group: "{{ ansible_user }}"
+
+ - name: Set default uri connection to qemu:///system.
+ tags: libvirt
+ ansible.builtin.lineinfile:
+ path: "{{ item }}"
+ regexp: '#uri_default = "qemu:///system"'
+ line: 'uri_default = "qemu:///system"'
+ backup: true
+ loop:
+ - /etc/libvirt/qemu.conf
+ - "{{ user_home.stdout }}/.config/libvirt/libvirt.conf"
+ - /etc/libvirt/libvirt.conf
+
+ - name: Ensure KVM admin user is part of groups 'kvm', 'libvirt'.
+ tags: groups, group, libvirt
+ ansible.builtin.user:
+ name: "{{ ansible_user }}"
+ append: true
+ groups: kvm,libvirt
+
+ - name: Restart libvirt
+ tags: firewall-libvirt, libvirt
+ ansible.builtin.service:
+ name: libvirtd
+ enabled: yes
+ state: restarted
+
+ - name: Enable cockpit console
+ tags: cockpit
+ ansible.builtin.command: systemctl enable --now cockpit.socket
+
+- name: Configure ip_forward in case of NAT
+ hosts: kvm_host
+ tags: cfg_ip_forward, section_2
+ gather_facts: true
+ become: true
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ tasks:
+ - name: Configure ip_forward in case of network "NAT"
+ tags: cfg_ip_forward
+ ansible.posix.sysctl:
+ name: net.ipv4.ip_forward
+ value: "{{ env.z.ip_forward }}"
+ sysctl_set: true
+ state: present
+ reload: true
+ when: env.network_mode | upper == 'NAT'
+
+- hosts: kvm_host
+ tags: setup, section_3
+ become: true
+ roles:
+ - configure_storage
+ - { role: macvtap, when: env.network_mode | upper != 'NAT' }
diff --git a/playbooks/4_create_bastion.yaml b/playbooks/4_create_bastion.yaml
new file mode 100644
index 00000000..64c31d1b
--- /dev/null
+++ b/playbooks/4_create_bastion.yaml
@@ -0,0 +1,10 @@
+# Assume we have an existing ftp/http server already
+- name: 4 create bastion
+ hosts: kvm_host[0]
+ become: false
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ - "{{ inventory_dir }}/host_vars/{{ env.z.lpar1.hostname }}.yaml"
+ roles:
+ - common
+ - { role: create_bastion, when: env.bastion.create == True }
diff --git a/playbooks/5_setup_bastion.yaml b/playbooks/5_setup_bastion.yaml
new file mode 100644
index 00000000..7871768c
--- /dev/null
+++ b/playbooks/5_setup_bastion.yaml
@@ -0,0 +1,179 @@
+---
+
+- name: Copy ssh key to jumphost if network is NAT and jumphost defined, and add jumphost section to ssh config.
+ hosts: localhost
+ tags: ssh, ssh_copy_id, section_1
+ connection: local
+ gather_facts: false
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ vars:
+ ssh_target: ["{{ env.jumphost.ip }}", "{{ env.jumphost.user }}", "{{ env.jumphost.pass }}", "{{ path_to_key_pair }}"]
+ roles:
+ - { role: ssh_copy_id, tags: ssh_copy_id, ssh, when: (env.network_mode | upper == "NAT") and ( env.jumphost.ip is not none ) }
+ - { role: ssh_add_config, tags: ssh_copy_id, ssh, when: (env.network_mode | upper == "NAT") and ( env.jumphost.ip is not none ) }
+
+- name: Configure jumphost if network mode == 'NAT'
+ hosts: jumphost
+ tags: ssh, ssh_copy_id, section_1
+ gather_facts: true
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ vars:
+ ssh_target: ["{{ env.bastion.networking.ip }}", "{{ env.bastion.access.user }}", "{{ env.bastion.access.pass }}","{{ env.jumphost.path_to_keypair }}"]
+ pre_tasks:
+ - name: Generate an OpenSSH keypair with the default values (4096 bits, RSA), if using jumphost for NAT.
+ tags: ssh_key_gen, ssh, section_1
+ community.crypto.openssh_keypair:
+ path: "{{ env.jumphost.path_to_keypair.split('.')[:-1] | join('.') }}"
+ passphrase: ""
+ regenerate: never
+ when: (env.network_mode | upper == "NAT") and ( env.jumphost.ip is not none )
+ - block:
+ - name: Check if 'expect' is installed on jumphost, for use in ssh-copy-id role for NAT.
+ package_facts:
+ failed_when: "'expect' not in ansible_facts.packages"
+ when: (env.network_mode | upper == "NAT") and ( env.jumphost.ip is not none )
+ rescue:
+ - name: Package 'expect' must be installed on the jumphost, attempting to install it. #Using 'block' and 'rescue' to avoid running the 'package' module (which requires 'sudo') unless necessary.
+ become: true
+ package:
+ name: expect
+ when: (env.network_mode | upper == "NAT") and ( env.jumphost.ip is not none )
+ roles:
+ - { role: ssh_copy_id, ssh, when: (env.network_mode | upper == "NAT") and ( env.jumphost.ip is not none ) }
+ post_tasks:
+ - meta: clear_facts
+
+- name: 5 setup bastion - copy SSH key from localhost to access bastion.
+ hosts: localhost
+ tags: ssh, ssh_copy_id, section_1
+ gather_facts: true
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ vars:
+ ssh_target: ["{{ env.bastion.networking.ip }}", "{{ env.bastion.access.user }}", "{{ env.bastion.access.pass }}","{{ path_to_key_pair }}"]
+ roles:
+ - ssh_copy_id
+
+- name: 5 setup bastion - configure bastion node with essential services
+ hosts: bastion
+ tags: services, section_2
+ become: true
+ vars:
+ packages: "{{ env.pkgs.bastion }}"
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ pre_tasks:
+ - name: Import initial-resolv.yaml
+ ansible.builtin.import_role:
+ name: dns
+ tasks_from: initial-resolv.yaml
+ when: env.bastion.options.dns is defined and env.bastion.options.dns
+ roles:
+ - { role: attach_subscription, when: env.redhat.username is defined and env.redhat.password is defined }
+ - install_packages
+ - ssh_ocp_key_gen
+ - set_firewall
+ - { role: dns, when: env.bastion.options.dns }
+ - { role: check_dns, when: env.bastion.options.dns is defined and env.bastion.options.dns }
+ - { role: haproxy, when: env.bastion.options.loadbalancer.on_bastion }
+ - httpd
+
+- hosts: bastion
+ tags: services, section_2, openvpn
+ become: true
+ vars:
+ openvpn_role: "server"
+ roles:
+ #- { role: robertdebock.bootstrap, tags: openvpn, when: env.z.high_availability == True }
+ - { role: robertdebock.epel, tags: openvpn, when: env.z.high_availability == True }
+ - { role: robertdebock.openvpn, tags: openvpn, when: env.z.high_availability == True }
+
+- hosts: localhost
+ tags: services, section_2, openvpn
+ gather_facts: false
+ tasks:
+ - name: Create landing directories on controller for certificates and keys.
+ tags: openvpn
+ file:
+ state: directory
+ path: tmp
+ when: env.z.high_availability == True
+
+- hosts: bastion
+ tags: services, section_2, openvpn
+ become: true
+ tasks:
+ - name: Fetch certificates and keys from bastion.
+ tags: openvpn
+ fetch:
+ src: /etc/openvpn/easy-rsa/pki/{{ item }}
+ dest: tmp/
+ flat: yes
+ loop:
+ - ca.crt
+ - issued/client.crt
+ - private/client.key
+ - ta.key
+ when: env.z.high_availability == True
+
+- name: setup OpenVPN on KVM host(s).
+ hosts: kvm_host
+ tags: services, section_3, openvpn
+ become: true
+ gather_facts: false
+ vars:
+ openvpn_role: "client"
+ openvpn_client_server: "{{ env.bastion.networking.ip }}"
+ pre_tasks:
+ - name: Gather facts.
+ setup:
+ when: env.z.high_availability == True
+
+ - name: Create landing directories for certificates and keys on KVM hosts.
+ tags: openvpn
+ file:
+ state: directory
+ path: /etc/openvpn/client/{{ item }}
+ mode: '700'
+ loop:
+ - issued
+ - private
+ when: env.z.high_availability == True
+
+ - name: Copy certificates and keys from controller to KVM hosts.
+ tags: openvpn
+ copy:
+ src: tmp/{{ item }}
+ dest: /etc/openvpn/client/{{ item }}
+ mode: '600'
+ loop:
+ - ca.crt
+ - client.crt
+ - client.key
+ - ta.key
+ when: env.z.high_availability == True
+ roles:
+ - { role: robertdebock.epel, tags: openvpn, when: env.z.high_availability == True }
+ - { role: robertdebock.openvpn, tags: openvpn, when: env.z.high_availability == True }
+
+- hosts: localhost
+ tags: services, section_2, openvpn
+ gather_facts: false
+ tasks:
+ - name: Clean up tmp directories on controller for certificates and keys.
+ tags: openvpn
+ file:
+ state: absent
+ path: tmp
+ when: env.z.high_availability == True
+
+- hosts: bastion
+ tags: get_ocp, section_3
+ become: true
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ roles:
+ - common
+ - get_ocp
diff --git a/playbooks/6_create_nodes.yaml b/playbooks/6_create_nodes.yaml
new file mode 100644
index 00000000..278a7272
--- /dev/null
+++ b/playbooks/6_create_nodes.yaml
@@ -0,0 +1,83 @@
+---
+
+# Prepare and then create the temporary bootstrap node and the control nodes
+- name: 6 create nodes - prepare KVM guests
+ hosts: kvm_host
+ gather_facts: false
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ roles:
+ - prep_kvm_guests
+ # Delete control, compute and infra nodes, if exists
+ - delete_nodes
+
+- name: 6 create nodes - create bootstrap
+ hosts: kvm_host[0]
+ gather_facts: false
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ roles:
+ - common
+ - create_bootstrap
+
+- name: 6 create nodes - create control nodes
+ hosts: kvm_host
+ gather_facts: false
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ roles:
+ - common
+ - create_control_nodes
+
+- name: 6 create nodes - wait for bootstrap to connect control plane (for non-root user)
+ hosts: bastion
+ become: true
+ environment:
+ KUBECONFIG: "/home/{{ env.bastion.access.user }}/.kube/config"
+ gather_facts: true
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ roles:
+ - {role: wait_for_bootstrap, when: env.bastion.access.user != "root"}
+
+- name: 6 create nodes - wait for bootstrap to connect to control plane (for root user)
+ hosts: bastion
+ become: true
+ environment:
+ KUBECONFIG: "/{{ env.bastion.access.user }}/.kube/config"
+ gather_facts: true
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ roles:
+ - {role: wait_for_bootstrap, when: env.bastion.access.user == "root"}
+
+- name: 6 create nodes - once bootstrapping is complete, tear down bootstrap.
+ hosts: kvm_host[0]
+ tags: create_nodes, teardown_bootstrap
+ gather_facts: false
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ tasks:
+ - name: Destroy bootstrap. Expect ignored errors if bootstrap is already destroyed.
+ tags: create_nodes, teardown_bootstrap
+ community.libvirt.virt:
+ name: "{{ env.cluster.nodes.bootstrap.vm_name }}"
+ command: destroy
+ ignore_errors: true
+
+ - name: Undefine bootstrap. Expect ignored errors if bootstrap is already undefined.
+ tags: create_nodes, teardown_bootstrap
+ community.libvirt.virt:
+ name: "{{ env.cluster.nodes.bootstrap.vm_name }}"
+ command: undefine
+ ignore_errors: true
+
+- name: 6 create nodes - once bootstrapping is complete, create compute nodes.
+ hosts: kvm_host
+ tags: create_compute_nodes
+ gather_facts: false
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ roles:
+ - common
+ - create_compute_nodes
diff --git a/playbooks/7_ocp_verification.yaml b/playbooks/7_ocp_verification.yaml
new file mode 100644
index 00000000..eb360dec
--- /dev/null
+++ b/playbooks/7_ocp_verification.yaml
@@ -0,0 +1,18 @@
+---
+
+# Complete OpenShift verification
+- name: 7 OCP verification
+ hosts: bastion
+ become: true
+ environment:
+ KUBECONFIG: "{{ '/home/' if (env.bastion.access.user != 'root') else '/'}}{{ env.bastion.access.user }}/.kube/config"
+ gather_facts: true
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ roles:
+ - approve_certs
+ - check_nodes
+ - role: approve_certs
+ approve_certs_cleanup: true
+ - wait_for_cluster_operators
+ - wait_for_install_complete
diff --git a/playbooks/create_agents_and_wait_for_install_complete.yaml b/playbooks/create_agents_and_wait_for_install_complete.yaml
new file mode 100644
index 00000000..c3a858ec
--- /dev/null
+++ b/playbooks/create_agents_and_wait_for_install_complete.yaml
@@ -0,0 +1,16 @@
+- name: Create Agents
+ hosts: kvm_host_hypershift
+ become: true
+ roles:
+ - boot_agents_hypershift
+
+- name: Scale Nodepool & Configure Haproxy on bastion for hosted workers
+ hosts: bastion_hypershift
+ roles:
+ - scale_nodepool_and_wait_for_workers_hypershift
+ - add_hc_workers_to_haproxy_hypershift
+
+- name: Wait for all Console operators to come up
+ hosts: bastion_hypershift
+ roles:
+ - wait_for_hc_to_complete_hypershift
diff --git a/playbooks/create_compute_node.yaml b/playbooks/create_compute_node.yaml
new file mode 100644
index 00000000..7ed874e8
--- /dev/null
+++ b/playbooks/create_compute_node.yaml
@@ -0,0 +1,99 @@
+---
+###################################################################################################
+# To execute this playbook you need to create a node config yaml fiile with these parameters:
+# ---
+# day2_compute_node:
+# vm_name:
+# vm_hostname:
+# vm_ip:
+# hostname:
+# host_arch:
+#
+# Execute the playbook with '--extra-vars' option.
+# E.g.:
+# ansible-playbook playbooks/add_compute_node.yaml --extra-vars "@extra-cnode1.yml"
+
+- name: Add an additional compute node
+ # Select bastion host
+ hosts: bastion
+ gather_facts: false
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ pre_tasks:
+ - name: Check required variables
+ when: (day2_compute_node is not defined)
+ block:
+ - name: Check required variables
+ ansible.builtin.debug:
+ msg:
+ - "ERROR: Variable 'day2_compute_node' is not defined!"
+ - "Execute: 'ansible-playbook playbooks/add_compute_node.yaml --extra-vars \"@extra-cnode.yml\"'"
+ - name: Abort playbook
+ ansible.builtin.fail:
+ msg: "See above error!"
+
+ - name: Get RHCOS iso download url from machine-config-operator
+ ansible.builtin.shell: |
+ set -o pipefail
+ oc -n openshift-machine-config-operator get configmap/coreos-bootimages -o jsonpath='{.data.stream}' \
+ | jq -r '.architectures.{{ day2_compute_node.host_arch }}.artifacts.metal.formats.iso.disk.location'
+ register: _rhcos_iso_dl_url
+
+ - name: Check variable '_rhcos_iso_dl_url' is not 'null'
+ ansible.builtin.fail:
+ when: _rhcos_iso_dl_url.stdout == "null"
+
+ - name: Get RHCOS kernel download url from machine-config-operator
+ ansible.builtin.shell: |
+ set -o pipefail
+ oc -n openshift-machine-config-operator get configmap/coreos-bootimages -o jsonpath='{.data.stream}' \
+ | jq -r '.architectures.{{ day2_compute_node.host_arch }}.artifacts.metal.formats.pxe.kernel.location'
+ register: _rhcos_kernel_dl_url
+
+ - name: Get RHCOS initrd download url from machine-config-operator
+ ansible.builtin.shell: |
+ set -o pipefail
+ oc -n openshift-machine-config-operator get configmap/coreos-bootimages -o jsonpath='{.data.stream}' \
+ | jq -r '.architectures.{{ day2_compute_node.host_arch }}.artifacts.metal.formats.pxe.initramfs.location'
+ register: _rhcos_initrd_dl_url
+
+ - name: Get RHCOS rootfs download url from machine-config-operator
+ ansible.builtin.shell: |
+ set -o pipefail
+ oc -n openshift-machine-config-operator get configmap/coreos-bootimages -o jsonpath='{.data.stream}' \
+ | jq -r '.architectures.{{ day2_compute_node.host_arch }}.artifacts.metal.formats.pxe.rootfs.location'
+ register: _rhcos_rootfs_dl_url
+
+ - name: Redefine RHCOS variables
+ ansible.builtin.set_fact:
+ rhcos_live_kernel: "{{ (_rhcos_kernel_dl_url.stdout | split('/') | last) }}"
+ rhcos_live_initrd: "{{ (_rhcos_initrd_dl_url.stdout | split('/') | last) }}"
+ rhcos_live_rootfs: "{{ (_rhcos_rootfs_dl_url.stdout | split('/') | last) }}"
+ rhcos_live_iso: "{{ (_rhcos_iso_dl_url.stdout.split('/')[-1]) }}"
+
+ - name: Redefine RHCOS download url
+ ansible.builtin.set_fact:
+ # Assume all images have same download location
+ rhcos_download_url: "{{ (_rhcos_rootfs_dl_url.stdout | replace(rhcos_live_rootfs, '')) }}"
+
+ roles:
+ - role: common
+ - role: print_node_status
+ - role: update_ignition_files
+
+ - role: dns_update
+ when: env.bastion.options.dns is defined and env.bastion.options.dns
+ param_dns_cmd: add
+ param_dns_hostname: "{{ day2_compute_node.vm_hostname }}"
+ param_dns_ip: "{{ day2_compute_node.vm_ip }}"
+
+ - role: create_compute_node
+ # Some tasks will be delegated to the bastion host and target KVM host
+ param_compute_node: "{{ day2_compute_node }}"
+
+ - role: approve_certs
+ - role: wait_for_node
+ wait_for_node_name: "{{ day2_compute_node.vm_hostname }}"
+ - role: approve_certs
+ approve_certs_cleanup: true
+ - role: print_node_status
diff --git a/playbooks/create_hosted_cluster.yaml b/playbooks/create_hosted_cluster.yaml
new file mode 100644
index 00000000..a16b75ba
--- /dev/null
+++ b/playbooks/create_hosted_cluster.yaml
@@ -0,0 +1,79 @@
+---
+- name: Install Prerequisites on kvm_host
+ hosts: kvm_host_hypershift
+ become: true
+ vars_files:
+ - "{{playbook_dir}}/secrets.yaml"
+ tasks:
+ - name: Setting host
+ set_fact:
+ host: 'kvm_host_hypershift'
+ - name: Install Prereqs on host
+ import_role:
+ name: install_prerequisites_host_hypershift
+
+- name: Create macvtap network
+ hosts: kvm_host_hypershift
+ become: true
+ tasks:
+ - name: Setting interface name
+ set_fact:
+ networking:
+ device1: "{{ hypershift.networking_device }}"
+ - name: Creating macvtap network
+ import_role:
+ name: macvtap
+
+- name: Create bastion for hypershift
+ hosts: kvm_host_hypershift
+ become: true
+ vars_files:
+ - "{{playbook_dir}}/secrets.yaml"
+ tasks:
+ - name: Creating Bastion
+ include_role:
+ name: create_bastion_hypershift
+ when: hypershift.create_bastion == true
+
+- name: Configuring Bastion
+ hosts: bastion_hypershift
+ become: true
+ vars_files:
+ - "{{playbook_dir}}/secrets.yaml"
+ tasks:
+ - name: Setting host
+ set_fact:
+ host: 'bastion_hypershift'
+
+ - name: Install Prereqs
+ import_role:
+ name: install_prerequisites_host_hypershift
+
+ - name: Configure Bastion
+ import_role:
+ name: install_prereqs_bastion_hypershift
+
+ - name: Add ansible SSH key to ssh-agent
+ import_role:
+ name: ssh_agent
+
+- name: Create AgentServiceConfig Hosted Control Plane and InfraEnv
+ hosts: bastion_hypershift
+ vars_files:
+ - "{{playbook_dir}}/secrets.yaml"
+ roles:
+ - install_mce_operator
+ - create_agentserviceconfig_hypershift
+ - create_hcp_InfraEnv_hypershift
+
+- name: Download Required images for booting Agents
+ hosts: kvm_host_hypershift
+ become: true
+ roles:
+ - setup_for_agents_hypershift
+
+- name: Configure httpd on bastion for hosting rootfs
+ hosts: bastion_hypershift
+ roles:
+ - download_rootfs_hypershift
+
diff --git a/playbooks/delete_compute_node.yaml b/playbooks/delete_compute_node.yaml
new file mode 100644
index 00000000..0780cbd3
--- /dev/null
+++ b/playbooks/delete_compute_node.yaml
@@ -0,0 +1,44 @@
+---
+###################################################################################################
+# To execute this playbook you need to create a node config yaml fiile with these parameters:
+# ---
+# day2_compute_node:
+# vm_name:
+# vm_hostname:
+# vm_ip:
+# hostname:
+# host_arch:
+#
+# Execute the playbook with '--extra-vars' option.
+# E.g.:
+# ansible-playbook playbooks/delete_compute_node.yaml --extra-vars "@extra-cnode1.yml"
+
+- name: Delete compute node
+ hosts: bastion
+ gather_facts: false
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ pre_tasks:
+ - name: Check required variables
+ when: (day2_compute_node is not defined)
+ block:
+ - name: Check required variables
+ ansible.builtin.debug:
+ msg:
+ - "ERROR: Variable 'day2_compute_node' is not defined!"
+ - "Execute: 'ansible-playbook playbooks/delete_compute_node.yaml --extra-vars \"@extra-cnode.yml\"'"
+ - name: Abort playbook
+ ansible.builtin.fail:
+ msg: "See above error!"
+ roles:
+ - role: delete_compute_node
+ # Some tasks will be delegated to the bastion host and target KVM host
+ param_compute_node: "{{ day2_compute_node }}"
+
+ - role: dns_update
+ when: env.bastion.options.dns is defined and env.bastion.options.dns
+ param_dns_cmd: delete
+ param_dns_hostname: "{{ day2_compute_node.vm_hostname }}"
+ param_dns_ip: "{{ day2_compute_node.vm_ip }}"
+
+ - role: print_node_status
diff --git a/playbooks/destroy_cluster_hypershift.yaml b/playbooks/destroy_cluster_hypershift.yaml
new file mode 100644
index 00000000..01fd03a6
--- /dev/null
+++ b/playbooks/destroy_cluster_hypershift.yaml
@@ -0,0 +1,12 @@
+- name: Delete Cluster Resources
+ hosts: bastion_hypershift
+ vars_files:
+ - "{{playbook_dir}}/secrets.yaml"
+ roles:
+ - delete_resources_bastion_hypershift
+
+- name: Delete Resources on kvm host
+ hosts: kvm_host_hypershift
+ become: true
+ roles:
+ - delete_resources_kvm_host_hypershift
diff --git a/playbooks/hypershift.yaml b/playbooks/hypershift.yaml
new file mode 100644
index 00000000..a10f79ad
--- /dev/null
+++ b/playbooks/hypershift.yaml
@@ -0,0 +1,4 @@
+---
+
+- import_playbook: create_hosted_cluster.yaml
+- import_playbook: create_agents_and_wait_for_install_complete.yaml
diff --git a/playbooks/pre-existing_site.yaml b/playbooks/pre-existing_site.yaml
new file mode 100644
index 00000000..6456527c
--- /dev/null
+++ b/playbooks/pre-existing_site.yaml
@@ -0,0 +1,8 @@
+# If you are using a pre-existing LPAR with RHEL already installed, use this version of the master playbook.
+---
+
+- import_playbook: 0_setup.yaml
+- import_playbook: 4_create_bastion.yaml
+- import_playbook: 5_setup_bastion.yaml
+- import_playbook: 6_create_nodes.yaml
+- import_playbook: 7_ocp_verification.yaml
diff --git a/playbooks/reinstall_cluster.yaml b/playbooks/reinstall_cluster.yaml
new file mode 100644
index 00000000..9bdc19fb
--- /dev/null
+++ b/playbooks/reinstall_cluster.yaml
@@ -0,0 +1,86 @@
+# Use this if you want to re-install nodes with a new OCP version
+---
+
+- name: Re-Install cluster - Copy SSH key from localhost to access bastion
+ hosts: localhost
+ tags: ssh, ssh_copy_id, section_1
+ gather_facts: true
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ vars:
+ ssh_target: ["{{ env.bastion.networking.ip }}", "{{ env.bastion.access.user }}", "{{ env.bastion.access.pass }}","{{ path_to_key_pair }}"]
+ roles:
+ - ssh_copy_id
+
+- name: Re-Install cluster - Copy SSH key to access KVM host 1
+ hosts: localhost
+ tags: ssh, ssh_copy_id, section_1, kvm_host_1
+ connection: local
+ become: false
+ gather_facts: true
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ vars:
+ ssh_target: ["{{ env.z.lpar1.ip }}","{{ env.z.lpar1.user }}","{{ env.z.lpar1.pass }}","{{ path_to_key_pair }}"]
+ tasks:
+ - name: Include vars for the KVM host
+ ansible.builtin.include_vars:
+ file: "{{ inventory_dir }}/host_vars/{{ env.z.lpar1.hostname }}.yaml"
+
+ - name: Copy SSH key to KVM host
+ ansible.builtin.import_role:
+ name: ssh_copy_id
+
+- name: Re-Install cluster - Copy SSH key to access KVM host 2
+ hosts: localhost
+ tags: ssh, ssh_copy_id, section_1, kvm_host_2
+ connection: local
+ become: false
+ gather_facts: false
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ vars:
+ ssh_target: ["{{ env.z.lpar2.ip }}","{{ env.z.lpar2.user }}","{{ env.z.lpar2.pass }}","{{ path_to_key_pair }}"]
+ tasks:
+ - name: Include vars for second KVM host
+ ansible.builtin.include_vars:
+ file: "{{ inventory_dir }}/host_vars/{{ env.z.lpar2.hostname }}.yaml"
+ when: env.z.lpar2.hostname is defined
+
+ - name: copy SSH key to second KVM host, if cluster is to be highly available
+ ansible.builtin.import_role:
+ name: ssh_copy_id
+ when: env.z.lpar2.hostname is defined
+
+- name: Re-Install cluster - Copy SSH key to access KVM host 3
+ hosts: localhost
+ tags: ssh, ssh_copy_id, section_1, kvm_host_3
+ connection: local
+ become: false
+ gather_facts: false
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ vars:
+ ssh_target: [" {{ env.z.lpar3.ip }}","{{ env.z.lpar3.user }}","{{ env.z.lpar3.pass }}","{{ path_to_key_pair }}" ]
+ tasks:
+ - name: Include vars for third KVM host
+ ansible.builtin.include_vars:
+ file: "{{ inventory_dir }}/host_vars/{{ env.z.lpar3.hostname }}.yaml"
+ when: env.z.lpar3.hostname is defined
+
+ - name: copy SSH key to third KVM host, if cluster is to be highly available
+ ansible.builtin.import_role:
+ name: ssh_copy_id
+ when: env.z.lpar3.hostname is defined
+
+- name: Re-Install cluster - Update ignitions and other install files
+ hosts: bastion
+ become: true
+ vars_files:
+ - "{{ inventory_dir }}/group_vars/all.yaml"
+ roles:
+ - common
+ - get_ocp
+
+- import_playbook: 6_create_nodes.yaml
+- import_playbook: 7_ocp_verification.yaml
diff --git a/playbooks/setup_file_server.yaml b/playbooks/setup_file_server.yaml
new file mode 100644
index 00000000..2e9b42f4
--- /dev/null
+++ b/playbooks/setup_file_server.yaml
@@ -0,0 +1,157 @@
+# This playbook helps you setup the file server.
+# Run it after 0_setup.yaml and before 4_create_bastion.yaml
+# If you are using the KVM host as the file_server, run this playbook after 3_setup_kvm_host.yaml
+---
+- hosts: file_server
+ gather_facts: true
+ become: true
+ vars_prompt:
+ - name: iso_link
+ prompt: Paste in an active download link (expires after a few hours) for RHEL for IBM zSystems Binary DVD (iso) from Red Hat's Customer Portal ( https://access.redhat.com/downloads/content ) website
+ private: false
+ # This interactive prompt can be avoided by defining it with extra-vars when running ansible-playbook on the command-line, i.e:
+ # ansible-playbook playbooks/setup_file_server.yaml --extra-vars "iso_link=http://https://access.cdn.redhat.com/content/[...]"
+ vars:
+ protocol: "{{ env.file_server.protocol }}"
+ packages: "{{ ( 'httpd,firewalld,wget' if protocol == 'http' else 'vsftpd,firewalld,wget' if protocol == 'ftp' ).split(',') | list }}"
+ service_name: "{{ 'httpd' if protocol == 'http' else 'vsftpd' if protocol == 'ftp' }}"
+ iso_filename: "{{ iso_link.split('/')[-1].split('?')[0] }}"
+ iso_checksum: "{{ iso_link.split('/')[-2] }}"
+ tasks:
+ - name: Get user's home directory.
+ tags: always
+ become: false
+ command: 'echo ~'
+ register: echo_home
+ changed_when: false
+
+ - name: Set home_dir and mount_dir variables based on previous task for future use.
+ tags: always
+ set_fact:
+ home_dir: "{{ echo_home.stdout }}"
+ mount_dir: "{{ home_dir + '/' + env.file_server.iso_mount_dir if protocol == 'ftp' else '/var/www/html/' + env.file_server.iso_mount_dir if protocol == 'http' }}"
+
+ - block:
+ - name: Install packages.
+ tags: pkgs
+ include_role:
+ name: install_packages
+ rescue:
+ - name: Try to register system with Red Hat if unable to install packages.
+ tags: pkgs
+ include_role:
+ name: attach_subscription
+ - name: Try to install packages again, after registering.
+ tags: pkgs
+ include_role:
+ name: install_packages
+
+ - name: Start download of RHEL ISO from provided Customer Portal link to user's home. This task will continue to run even if the playbook run is terminated, and will pick back up where it left off if started again.
+ tags: iso
+ become: false
+ command: "wget -c '{{ iso_link }}' -O {{ home_dir }}/{{ iso_filename }}"
+ async: 6000
+ poll: -1
+ register: pull_iso
+
+ - name: Waiting for ISO download to complete - checking every 45 seconds. This may take 10 minutes or more, depending on server's bandwidth.
+ tags: iso
+ become: false
+ async_status:
+ jid: "{{ pull_iso.ansible_job_id }}"
+ register: iso_pull_check
+ until: iso_pull_check.finished
+ retries: 100
+ delay: 45
+
+ - name: Cleanup async job cache for ISO download.
+ tags: iso
+ become: false
+ async_status:
+ jid: "{{ pull_iso.ansible_job_id }}"
+ mode: cleanup
+
+ - name: Get SHA-256 sum of ISO.
+ tags: iso
+ stat:
+ path: "{{ home_dir }}/{{ iso_filename }}"
+ checksum_algorithm: sha256
+ get_checksum: yes
+ register: iso_stat
+
+ - name: Display checksums that will be verified in next step.
+ tags: iso
+ debug:
+ msg:
+ - "Checksum of download: {{ iso_stat.stat.checksum }}"
+ - "Checksum of Red Hat : {{ iso_checksum }}"
+
+ - name: Throw error if checksum verification fails, otherwise skip.
+ tags: iso
+ fail:
+ msg: "ERROR! Downloaded ISO file's checksum does not match the checksum from the Red Hat website."
+ when: iso_stat.stat.checksum != iso_checksum
+
+ - name: Create directory for storing configuration files, when using FTP.
+ tags: dir, cfgs
+ become: false
+ file:
+ path: '{{ home_dir }}/{{ env.file_server.cfgs_dir }}'
+ state: directory
+ mode: '0644'
+ when: protocol == 'ftp'
+
+ - name: Create directory for storing configuration files, when using HTTP.
+ tags: dir, cfgs
+ file:
+ path: '/var/www/html/{{ env.file_server.cfgs_dir }}'
+ state: directory
+ mode: '0644'
+ when: protocol == 'http'
+
+ - name: Unmount ISO, if one is already mounted, for idempotency.
+ tags: dir, mount
+ ansible.posix.mount:
+ state: unmounted
+ path: "{{ mount_dir }}"
+
+ - name: Create mounting directory for ISO when using FTP.
+ tags: dir, mount
+ become: false
+ file:
+ path: '{{ mount_dir }}'
+ state: directory
+ mode: '0644'
+ when: protocol == 'ftp'
+
+ - name: Create mounting directory for ISO when using HTTP.
+ tags: dir, mount
+ file:
+ path: '{{ mount_dir }}'
+ state: directory
+ mode: '0644'
+ when: protocol == 'http'
+
+ - name: Mount ISO.
+ tags: dir, mount
+ ansible.posix.mount:
+ state: mounted
+ path: "{{ mount_dir }}"
+ src: "{{ home_dir }}/{{ iso_filename }}"
+ fstype: iso9660
+ opts: ro,noauto
+
+ - name: Start and enable http or ftp service.
+ tags: service
+ service:
+ name: "{{ service_name }}"
+ state: started
+ enabled: true
+
+ - name: Permit http or ftp traffic through firewall.
+ tags: service, firewall
+ ansible.posix.firewalld:
+ service: "{{ protocol }}"
+ permanent: yes
+ state: enabled
+ immediate: true
\ No newline at end of file
diff --git a/playbooks/setup_for_hypershift.yaml b/playbooks/setup_for_hypershift.yaml
new file mode 100644
index 00000000..2e68fd99
--- /dev/null
+++ b/playbooks/setup_for_hypershift.yaml
@@ -0,0 +1,8 @@
+---
+
+- name: Setup on localhost
+ hosts: localhost
+ vars_files:
+ - "{{playbook_dir}}/secrets.yaml"
+ roles:
+ - create_inventory_setup_hypershift
diff --git a/playbooks/site.yaml b/playbooks/site.yaml
new file mode 100644
index 00000000..cda7fd23
--- /dev/null
+++ b/playbooks/site.yaml
@@ -0,0 +1,11 @@
+# Master playbook. If you want to do everything all in one, use this.
+---
+
+- import_playbook: 0_setup.yaml
+- import_playbook: 1_create_lpar.yaml
+- import_playbook: 2_create_kvm_host.yaml
+- import_playbook: 3_setup_kvm_host.yaml
+- import_playbook: 4_create_bastion.yaml
+- import_playbook: 5_setup_bastion.yaml
+- import_playbook: 6_create_nodes.yaml
+- import_playbook: 7_ocp_verification.yaml
\ No newline at end of file
diff --git a/playbooks/test.yaml b/playbooks/test.yaml
new file mode 100644
index 00000000..cf466ac3
--- /dev/null
+++ b/playbooks/test.yaml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ gather_facts: False
+ vars:
+ test_var: test
+ tasks:
+ - name: Test variables or anything here
+ debug:
+ msg: "{{ test_var }}"
+
diff --git a/roles/add_hc_workers_to_haproxy_hypershift/tasks/main.yaml b/roles/add_hc_workers_to_haproxy_hypershift/tasks/main.yaml
new file mode 100644
index 00000000..152c26b9
--- /dev/null
+++ b/roles/add_hc_workers_to_haproxy_hypershift/tasks/main.yaml
@@ -0,0 +1,26 @@
+---
+
+- name: Get the IPs of Hosted Cluster Workers
+ shell: oc get no -o wide --kubeconfig=/root/ansible_workdir/{{ hypershift.hcp.hosted_cluster_name }}-kubeconfig --no-headers|grep -i worker| awk '{print $6}'
+ register: hosted_workers
+
+- name: Configuring HAproxy for Hosted Cluster
+ blockinfile:
+ path: /etc/haproxy/haproxy.cfg
+ block: |
+ listen {{ hypershift.hcp.hosted_cluster_name }}-console
+ mode tcp
+ bind {{ hypershift.bastion_hypershift }}:443
+ bind {{ hypershift.bastion_hypershift }}:80
+
+- name: Add Hosted Cluster Worker IPs to Haproxy
+ lineinfile:
+ path: /etc/haproxy/haproxy.cfg
+ line: " server {{ hypershift.hcp.hosted_cluster_name }}-worker-{{item}} {{ hosted_workers.stdout_lines[item]}}"
+ loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}"
+
+- name: restart haproxy
+ service:
+ name: haproxy.service
+ state: restarted
+ enabled: true
diff --git a/roles/approve_certs/defaults/main.yml b/roles/approve_certs/defaults/main.yml
new file mode 100644
index 00000000..6d4458a4
--- /dev/null
+++ b/roles/approve_certs/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+approve_certs_cleanup: false
diff --git a/roles/approve_certs/tasks/main.yaml b/roles/approve_certs/tasks/main.yaml
new file mode 100644
index 00000000..58cf28bd
--- /dev/null
+++ b/roles/approve_certs/tasks/main.yaml
@@ -0,0 +1,25 @@
+---
+
+- name: Cancel async 'approve_certs_task', if exists
+ tags: approve_certs
+ ansible.builtin.async_status:
+ jid: "{{ approve_certs_task.ansible_job_id }}"
+ mode: cleanup
+ failed_when: false
+ when: approve_certs_task is defined and approve_certs_cleanup
+
+- name: Approve all pending CSRs in the next 30 min (async task)
+ tags: approve_certs
+ ansible.builtin.shell: |
+ set -o pipefail
+ for i in {1..120} ; do
+ # Approve all pending requests
+ LIST=$(oc get csr 2> /dev/null | grep -i pending | awk '{print $1}')
+ [ ! -z "${LIST}" ] && echo "${LIST}" | xargs oc adm certificate approve || true
+ sleep 15
+ done
+ # Run for 30 min
+ async: 1800
+ poll: 0
+ register: approve_certs_task
+ when: not approve_certs_cleanup
diff --git a/roles/attach_subscription/tasks/main.yaml b/roles/attach_subscription/tasks/main.yaml
new file mode 100644
index 00000000..3f016e0a
--- /dev/null
+++ b/roles/attach_subscription/tasks/main.yaml
@@ -0,0 +1,13 @@
+---
+
+- name: Register server with Red Hat
+ tags: attach_subscription
+ community.general.redhat_subscription:
+ state: present
+ username: "{{ env.redhat.username }}"
+ password: "{{ env.redhat.password }}"
+ auto_attach: true
+ register: registration
+ retries: 2
+ delay: 30
+ until: registration is not failed
diff --git a/roles/bastion_server/.DS_Store b/roles/bastion_server/.DS_Store
deleted file mode 100644
index 1fbd6892..00000000
Binary files a/roles/bastion_server/.DS_Store and /dev/null differ
diff --git a/roles/bastion_server/main.yaml b/roles/bastion_server/main.yaml
deleted file mode 100644
index 4c031b4a..00000000
--- a/roles/bastion_server/main.yaml
+++ /dev/null
@@ -1,92 +0,0 @@
----
-
-- hosts: bastion_server
- become: true
- tasks:
-
- - name: update repository index
- yum:
- update_cache: yes
-
- - name: Download RHEL ISO image to RHEL KVM
- script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/dl_rhel_iso.sh
-
- - name: start install process
- script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_rhel_install.sh
-
-#there has to be a way to do this through Ansible. Step 3 page 9
- - name: complete bastion install process
-
-#leaving this until I meet with Filipe
- - name: download software
-
-#leaving this until I meet with Filipe
- - name: DNS requirements and configuration
-
-#not sure what this instruction step is trying to say. Page 13
- - name: Load Balancer
-
-# Need to edit this script to automate changing port to 8080 and ensure latest versions of OpenShift mirrors
- - name: Create and configure the HTTP server
- script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/create_http.sh
-
- - name: Get installer and oc Client Tools
- script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/get_ocp_installer.sh
-
-##create install-config.yaml file
- - name: create install-config.yaml
- file:
- path: "~/files/install-config.yaml"
- state: touch
-
-##Needs variables from host_vars for baseDomain, cluster_name, pullsecret, and ssh-public-key.
-##I think it also needs cidr (pod's IP range) and service network IP range.
-
- - name: Fill contents of install-config.yaml file
- copy:
- dest: "~/files/macvtap.xml"
- content: |
- apiVersion: v1
- baseDomain:
- compute:
- - architecture: s390x
- hyperthreading: Enabled
- name: worker
- replicas: 0
- controlPlane:
- architecture: s390x
- hyperthreading: Enabled
- name: master
- replicas: 3
- metadata:
- name:
- networking:
- clusterNetwork:
- - cidr: 10.128.0.0/14
- hostPrefix: 23
- networkType: OpenShiftSDN
- serviceNetwork:
- - 172.30.0.0/16
- platform:
- none: {}
- fips: false
- pullSecret: ''
- sshKey: ''
-
-##need to use host_vars for
- - name: Generate the ignition files 1
- shell: ./openshift-install create manifests --dir=
-
-##also needs variable
- - name: Generate the ignition files 2
- shell: ./openshift-install create ignition-configs --dir=
-
-##also needs variable
- - name: Generate the ignition files 3
- shell: cp /*.ign /var/www/html/ignition
-
- - name: Generate the ignition files 4
- shell: chmod 775 /var/www/html/ignition/*.ign
-
- - name: Prepare the KVM OCP guests
- script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/prep_kvm_guests.sh
diff --git a/roles/boot_agents_hypershift/tasks/main.yaml b/roles/boot_agents_hypershift/tasks/main.yaml
new file mode 100644
index 00000000..4b9e9b99
--- /dev/null
+++ b/roles/boot_agents_hypershift/tasks/main.yaml
@@ -0,0 +1,30 @@
+---
+- name: Create qemu image for agents
+ command: "qemu-img create -f qcow2 /home/libvirt/images/{{ hypershift.hcp.hosted_cluster_name }}-agent{{ item }}.qcow2 {{ hypershift.agents_parms.disk_size }}"
+ loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}"
+
+- name: Boot Agents
+ shell: |
+ {% if hypershift.agents_parms.static_ip_parms.static_ip == true %}
+ mac_address=$(oc get NmStateConfig static-ip-nmstate-config-{{ hypershift.hcp.hosted_cluster_name }}-{{ item }} -n {{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }} -o json | jq -r '.spec.interfaces[] | .macAddress')
+ {% else %}
+ mac_address="{{ hypershift.agents_parms.agent_mac_addr[item] }}"
+ {% endif %}
+
+ virt-install \
+ --name "{{ hypershift.hcp.hosted_cluster_name }}-agent-{{ item }}" \
+ --autostart \
+ --ram="{{ hypershift.agents_parms.ram }}" \
+ --cpu host \
+ --vcpus="{{ hypershift.agents_parms.vcpus }}" \
+ --location "/var/lib/libvirt/images/pxeboot/,kernel=kernel.img,initrd=initrd.img" \
+ --disk /home/libvirt/images/{{ hypershift.hcp.hosted_cluster_name }}-agent{{ item }}.qcow2 \
+ --network network:{{ env.bridge_name }},mac=$mac_address \
+ --graphics none \
+ --noautoconsole \
+ --wait=-1 \
+ --extra-args "rd.neednet=1 nameserver={{ hypershift.agents_parms.nameserver }} coreos.live.rootfs_url=http://{{ hypershift.bastion_hypershift }}:8080/rootfs.img random.trust_cpu=on rd.luks.options=discard ignition.firstboot ignition.platform.id=metal console=tty1 console=ttyS1,115200n8 coreos.inst.persistent-kargs=console=tty1 console=ttyS1,115200n8"
+ async: 3600
+ poll: 0
+ loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}"
+
diff --git a/roles/bootstrap_server/.DS_Store b/roles/bootstrap_server/.DS_Store
deleted file mode 100644
index b50ed442..00000000
Binary files a/roles/bootstrap_server/.DS_Store and /dev/null differ
diff --git a/roles/bootstrap_server/tasks/main.yaml b/roles/bootstrap_server/tasks/main.yaml
deleted file mode 100644
index d0e92ea8..00000000
--- a/roles/bootstrap_server/tasks/main.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-
-- hosts: bootstrap_server
- become: true
- tasks:
-
-##need to implement wait for completion logic before starting the next one
- - name: create bootstrap
- script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/create_bootstrap.sh
-
-##needs to wait for previous to finish before starting
- - name: verify installation
- script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/verify_bootstrap.sh
diff --git a/roles/check_dns/tasks/main.yaml b/roles/check_dns/tasks/main.yaml
new file mode 100644
index 00000000..27a5280f
--- /dev/null
+++ b/roles/check_dns/tasks/main.yaml
@@ -0,0 +1,75 @@
+---
+
+- name: Check internal cluster DNS resolution for the bastion
+ tags: check_dns, dns
+ shell: "dig +short {{ env.bastion.networking.hostname }}.{{ env.bastion.networking.base_domain }} | tail -n1"
+ register: bastion_lookup
+ failed_when: env.bastion.networking.ip != bastion_lookup.stdout
+
+- name: Check internal cluster DNS resolution for external API and apps services
+ tags: check_dns, dns
+ shell: "dig +short {{ item }} | tail -n1"
+ loop:
+ - "api.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}"
+ - "apps.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}"
+ - "test.apps.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}"
+ register: services_lookup
+ failed_when: ((env.bastion.options.loadbalancer.on_bastion == True) and (env.bastion.networking.ip != services_lookup.stdout)) or ((env.bastion.options.loadbalancer.on_bastion == False) and (env.bastion.options.loadbalancer.public_ip != services_lookup.stdout) )
+
+- name: Check internal cluster DNS resolution for internal API services
+ tags: check_dns, dns
+ shell: "dig +short api-int.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} | tail -n1"
+ register: api_int_lookup
+ failed_when: ((env.bastion.options.loadbalancer.on_bastion == True) and (env.bastion.networking.ip != api_int_lookup.stdout)) or ((env.bastion.options.loadbalancer.on_bastion == False) and (env.bastion.options.loadbalancer.private_ip != api_int_lookup.stdout) )
+
+- name: Check internal cluster DNS resolution for bootstrap
+ tags: check_dns, dns
+ shell: "dig +short {{ env.cluster.nodes.bootstrap.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} | tail -n1"
+ register: bootstrap_lookup
+ failed_when: env.cluster.nodes.bootstrap.ip != bootstrap_lookup.stdout
+
+- name: Print results from bootstrap lookup
+ tags: check_dns, dns
+ debug:
+ var: bootstrap_lookup.stdout
+
+- name: Check control nodes DNS resolution
+ tags: check_dns, dns
+ shell: "dig +short {{ env.cluster.nodes.control.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} | tail -n1"
+ register: control_lookup
+ failed_when: env.cluster.nodes.control.ip[i] != control_lookup.stdout
+ with_sequence: start=0 end={{(env.cluster.nodes.control.hostname | length) - 1}} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+
+- name: Check compute nodes DNS resolution
+ tags: check_dns, dns
+ shell: "dig +short {{ env.cluster.nodes.compute.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} | tail -n1"
+ register: compute_lookup
+ failed_when: env.cluster.nodes.compute.ip[i] != compute_lookup.stdout
+ with_sequence: start=0 end={{(env.cluster.nodes.compute.hostname | length) - 1}} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+
+- name: Check infrastructure nodes DNS resolution
+ tags: check_dns, dns
+ shell: "dig +short {{ env.cluster.nodes.infra.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} | tail -n1"
+ register: infra_lookup
+ failed_when: env.cluster.nodes.infra.ip[i] != infra_lookup.stdout
+ with_sequence: start=0 end={{(env.cluster.nodes.infra.hostname | length) - 1}} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+ when: env.cluster.nodes.infra.hostname is defined
+
+- name: Check external DNS resolution from forwarder
+ tags: check_dns, dns
+ register: external_dns_check
+ failed_when: '"server can" in external_dns_check.stdout'
+ command: "nslookup {{ item }}"
+ loop:
+ - www.google.com
+ - www.ibm.com
+ - www.redhat.com
diff --git a/roles/check_nodes/tasks/main.yaml b/roles/check_nodes/tasks/main.yaml
new file mode 100644
index 00000000..b0891746
--- /dev/null
+++ b/roles/check_nodes/tasks/main.yaml
@@ -0,0 +1,16 @@
+---
+- name: Get and print nodes status
+ ansible.builtin.include_tasks: "{{ role_path }}/../common/tasks/print_ocp_node_status.yaml"
+
+- name: Make sure control and compute nodes are 'Ready' before continuing (retry every 20s)
+ tags: check_nodes
+ ansible.builtin.shell: |
+ set -o pipefail
+ oc get nodes | grep "^{{ node | lower }}" | awk '{print $2}'
+ loop: "{{ env.cluster.nodes.control.hostname + env.cluster.nodes.compute.hostname }}"
+ loop_control:
+ loop_var: node
+ register: cmd_output
+ until: ("Ready" == cmd_output.stdout)
+ retries: 90
+ delay: 20
diff --git a/roles/common/defaults/main.yaml b/roles/common/defaults/main.yaml
new file mode 100644
index 00000000..de7f681d
--- /dev/null
+++ b/roles/common/defaults/main.yaml
@@ -0,0 +1,2 @@
+---
+_vm_console: ""
diff --git a/roles/common/tasks/main.yaml b/roles/common/tasks/main.yaml
new file mode 100644
index 00000000..dcab7e17
--- /dev/null
+++ b/roles/common/tasks/main.yaml
@@ -0,0 +1,7 @@
+---
+- name: Gather facts
+ when: ansible_architecture is not defined
+ ansible.builtin.gather_facts:
+
+- name: Load variables based on architecture '{{ ansible_architecture }}'
+ ansible.builtin.include_vars: "../vars/{{ ansible_architecture }}/vars.yaml"
diff --git a/roles/common/tasks/print_ocp_node_status.yaml b/roles/common/tasks/print_ocp_node_status.yaml
new file mode 100644
index 00000000..d5e3a280
--- /dev/null
+++ b/roles/common/tasks/print_ocp_node_status.yaml
@@ -0,0 +1,19 @@
+---
+- name: Get and print OCP nodes status
+ block:
+ - name: Get OCP nodes status
+ # Print oc node status in such format:
+ # NAME STATUS ROLES AGE VERSION KERNEL-VERSION INTERNAL-IP
+ # master-1 Ready control-plane,master 35h v1.25.4+77bec7a 4.18.0-372.40.1.el8_6.s390x 172.23.232.131
+ ansible.builtin.shell: |
+ set -o pipefail
+ oc get nodes -o wide | awk -F ' +' '{ printf "%-24s %-26s %-22s %-7s %-17s %-29s %-15s\n", $1, $2, $3, $4, $5, $9, $6 }'
+ register: oc_get_nodes
+ until: oc_get_nodes.rc == 0
+ retries: 3
+ delay: 5
+ changed_when: false
+
+ - name: Print OCP nodes status
+ ansible.builtin.debug:
+ var: oc_get_nodes.stdout_lines
diff --git a/roles/common/vars/aarch64/vars.yaml b/roles/common/vars/aarch64/vars.yaml
new file mode 100644
index 00000000..c2f81e1d
--- /dev/null
+++ b/roles/common/vars/aarch64/vars.yaml
@@ -0,0 +1,2 @@
+---
+_vm_console: "console=ttyAMA0"
diff --git a/roles/common/vars/s390x/vars.yaml b/roles/common/vars/s390x/vars.yaml
new file mode 100644
index 00000000..f7695545
--- /dev/null
+++ b/roles/common/vars/s390x/vars.yaml
@@ -0,0 +1,2 @@
+---
+_vm_console: "console=ttysclp0"
diff --git a/roles/common/vars/x86_64/vars.yaml b/roles/common/vars/x86_64/vars.yaml
new file mode 100644
index 00000000..5ac858c9
--- /dev/null
+++ b/roles/common/vars/x86_64/vars.yaml
@@ -0,0 +1,2 @@
+---
+_vm_console: "console=ttyS0,115200n8"
diff --git a/roles/configure_storage/tasks/main.yaml b/roles/configure_storage/tasks/main.yaml
new file mode 100644
index 00000000..5bf8aab9
--- /dev/null
+++ b/roles/configure_storage/tasks/main.yaml
@@ -0,0 +1,90 @@
+---
+
+#- name: Create a new partition.
+# tags: configure_storage, storage_group_2, part
+# community.general.parted:
+# device: /dev/mapper/mpathb
+# number: 1
+# label: gpt
+# state: present
+# part_start: 2048MB
+# part_end: 100%
+# fs_type: xfs
+# register: partition_status
+# when: lpar.storage_group_2.name is defined and lpar.storage_group_2.auto_config == True
+#
+#- name: Print partition status.
+# tags: configure_storage, storage_group_2, part
+# debug:
+# var: partition_status
+# when: lpar.storage_group_2.name is defined and lpar.storage_group_2.auto_config == True
+#
+#- name: Reload partition table
+# tags: configure_storage, storage_group_2, part
+# command: partprobe
+# when: lpar.storage_group_2.name is defined and lpar.storage_group_2.auto_config == True
+#
+#- name: Add storage_group_2 disk to existing volume group.
+# tags: configure_storage, storage_group_2, pvcreate
+# command: pvcreate /dev/mapper/mpathb1
+# when: lpar.storage_group_2.name is defined and lpar.storage_group_2.auto_config == True
+#
+#- name: Extend logical volume group with new PV
+# tags: configure_storage, storage_group_2, vgextend
+# command: vgextend rhel_aoznode1 /dev/mapper/mpathb1 and lpar.storage_group_2.auto_config == True
+# when: lpar.storage_group_2.name is defined and lpar.storage_group_2.auto_config == True
+#
+#- name: Extend the root logical volume to consume all remaining space in the volume group.
+# tags: configure_storage, lvextend
+# community.general.lvol:
+# vg: "rhel_{{ networking.hostname | lower }}"
+# lv: root
+# size: +100%FREE
+# resizefs: true
+
+- name: Create directory in for this cluster's storage pool.
+ tags: configure_storage, pool
+ ansible.builtin.file:
+ path: "{{ storage.pool_path }}/{{ env.cluster.networking.metadata_name }}"
+ state: directory
+ owner: "{{ ansible_user }}"
+ group: qemu
+ seuser: system_u
+ setype: virt_image_t
+ mode: '0770'
+ recurse: true
+
+- name: Ensure qemu has execute permissions for all parent directories of the directory that will be used as a storage pool for this cluster.
+ tags: configure_storage, pool
+ file:
+ path: "{{ storage.pool_path.split('/')[:i] | join('/') }}"
+ state: directory
+ group: qemu
+ mode: g+x
+ recurse: false
+ loop: "{{ range( 0, storage.pool_path.split('/') | length + 1 ) }}"
+ loop_control:
+ extended: yes
+ index_var: i
+ extended_allitems: false
+ label: "{{ storage.pool_path.split('/')[:i] | join('/') }}"
+ when: "storage.pool_path.split('/')[:i] | join('/') != ''"
+
+- name: Define a new storage pool
+ tags: configure_storage, pool
+ community.libvirt.virt_pool:
+ command: define
+ name: "{{ env.cluster.networking.metadata_name }}-vdisk"
+ xml: '{{ lookup ( "template", "vdisk.xml.j2" ) }}'
+
+- name: Set storage pool to active.
+ tags: configure_storage, pool
+ community.libvirt.virt_pool:
+ name: "{{ env.cluster.networking.metadata_name }}-vdisk"
+ state: active
+
+- name: Set storage pool to autostart.
+ tags: configure_storage, pool
+ community.libvirt.virt_pool:
+ name: "{{ env.cluster.networking.metadata_name }}-vdisk"
+ autostart: yes
diff --git a/roles/configure_storage/templates/vdisk.xml.j2 b/roles/configure_storage/templates/vdisk.xml.j2
new file mode 100644
index 00000000..334f5181
--- /dev/null
+++ b/roles/configure_storage/templates/vdisk.xml.j2
@@ -0,0 +1,11 @@
+
+ {{ env.cluster.networking.metadata_name }}-vdisk
+ 0
+ 0
+ 0
+
+
+
+ {{ storage.pool_path }}/{{ env.cluster.networking.metadata_name }}
+
+
diff --git a/roles/control_nodes/.DS_Store b/roles/control_nodes/.DS_Store
deleted file mode 100644
index 78fb1903..00000000
Binary files a/roles/control_nodes/.DS_Store and /dev/null differ
diff --git a/roles/create_agentserviceconfig_hypershift/tasks/main.yaml b/roles/create_agentserviceconfig_hypershift/tasks/main.yaml
new file mode 100644
index 00000000..edca4968
--- /dev/null
+++ b/roles/create_agentserviceconfig_hypershift/tasks/main.yaml
@@ -0,0 +1,34 @@
+---
+- name: Get OCP Release Version
+ shell: curl -s {{ hypershift.asc.url_for_ocp_release_file }} | awk '/machine-os / { print $2 }'
+ register: ocp_release_version
+
+- name: Create Config map mirror-config ( For updating AgentServiceConfig with the brew mirror information )
+ template:
+ src: mirror-config.yml.j2
+ dest: /root/ansible_workdir/mirror-config.yaml
+
+- name: Deploy Config map - mirror config
+ shell: oc apply -f /root/ansible_workdir/mirror-config.yaml
+
+- name: Create agenterviceconfig.yaml
+ template:
+ src: agent_service_config.yaml.j2
+ dest: /root/ansible_workdir/agentserviceconfig.yaml
+
+- name: Deploy AgentServiceConfig
+ command: oc apply -f /root/ansible_workdir/agentserviceconfig.yaml
+
+- name: Wait for Agent Service Deployment to be Succeeded
+ shell: oc get AgentServiceConfig agent -o json | jq -r '.status|.conditions[]|.status' | grep False | wc -l
+ register: asc
+ until: asc.stdout == '0'
+ retries: 60
+ delay: 20
+
+- name: Wait for MCE to be available
+ shell: oc get mce --no-headers | awk '{print $2}'
+ register: mce_status
+ until: mce_status.stdout == "Available"
+ retries: 40
+ delay: 10
diff --git a/roles/create_agentserviceconfig_hypershift/templates/agent_service_config.yaml.j2 b/roles/create_agentserviceconfig_hypershift/templates/agent_service_config.yaml.j2
new file mode 100644
index 00000000..a8fe0ec6
--- /dev/null
+++ b/roles/create_agentserviceconfig_hypershift/templates/agent_service_config.yaml.j2
@@ -0,0 +1,25 @@
+apiVersion: agent-install.openshift.io/v1beta1
+kind: AgentServiceConfig
+metadata:
+ name: agent
+spec:
+ mirrorRegistryRef:
+ name: mirror-config
+ databaseStorage:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "{{ hypershift.asc.db_volume_size}}"
+ filesystemStorage:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "{{ hypershift.asc.fs_volume_size }}"
+ osImages:
+ - openshiftVersion: "{{ hypershift.asc.ocp_version }}"
+ version: "{{ ocp_release_version.stdout_lines[0] }}"
+ url: "{{ hypershift.asc.iso_url }}"
+ rootFSUrl: "{{ hypershift.asc.root_fs_url }}"
+ cpuArchitecture: "{{ hypershift.hcp.arch }}"
diff --git a/roles/create_agentserviceconfig_hypershift/templates/mirror-config.yml.j2 b/roles/create_agentserviceconfig_hypershift/templates/mirror-config.yml.j2
new file mode 100644
index 00000000..2e64ac6f
--- /dev/null
+++ b/roles/create_agentserviceconfig_hypershift/templates/mirror-config.yml.j2
@@ -0,0 +1,32 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: mirror-config
+ namespace: "{{ hypershift.asc.mce_namespace }}" # please verify that this namespace is where MCE is installed.
+ labels:
+ app: assisted-service
+data:
+ registries.conf: |
+ unqualified-search-registries = ["registry.access.redhat.com", "docker.io"]
+
+ [[registry]]
+ location = "registry.stage.redhat.io"
+ insecure = false
+ blocked = false
+ mirror-by-digest-only = true
+ prefix = ""
+
+ [[registry.mirror]]
+ location = "brew.registry.redhat.io"
+ insecure = false
+
+ [[registry]]
+ location = "registry.redhat.io/multicluster-engine"
+ insecure = false
+ blocked = false
+ mirror-by-digest-only = true
+ prefix = ""
+
+ [[registry.mirror]]
+ location = "brew.registry.redhat.io/multicluster-engine"
+ insecure = false
diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml
new file mode 100644
index 00000000..6a9c263b
--- /dev/null
+++ b/roles/create_bastion/tasks/main.yaml
@@ -0,0 +1,86 @@
+---
+- name: Create a directory on the KVM host server for bastion configuration files
+ become: false
+ tags: create_bastion
+ ansible.builtin.file:
+ path: "{{ env.file_server.cfgs_dir }}/{{ env.bastion.networking.hostname }}"
+ mode: "0755"
+ state: directory
+
+- name: Copy template kickstart file to KVM host server
+ tags: create_bastion
+ become: false
+ ansible.builtin.template:
+ src: "bastion-ks.cfg.j2"
+ dest: "{{ env.file_server.cfgs_dir }}/{{ env.bastion.networking.hostname }}/bastion-ks.cfg"
+ mode: "0644"
+ force: true
+
+- name: Create hash from bastion's root password to input in kickstart file
+ tags: create_bastion
+ ansible.builtin.shell: |
+ set -o pipefail
+ echo "{{ env.bastion.access.root_pass }}" | openssl passwd -6 -in -
+ register: root_pass_hash
+
+- name: Add hashed root password to bastion's RHEL kickstart config file
+ tags: create_bastion
+ become: false
+ ansible.builtin.lineinfile:
+ path: "{{ env.file_server.cfgs_dir }}/{{ env.bastion.networking.hostname }}/bastion-ks.cfg"
+ insertafter: "Root password"
+ line: "rootpw --iscrypted {{ root_pass_hash.stdout }}"
+
+- name: Create hash from bastion user password to input in kickstart file
+ tags: create_bastion
+ ansible.builtin.shell: |
+ set -o pipefail
+ echo "{{ env.bastion.access.pass }}" | openssl passwd -6 -in -
+ register: user_pass_hash
+
+- name: Add hashed user password to bastion's RHEL kickstart config file
+ tags: create_bastion
+ become: false
+ ansible.builtin.lineinfile:
+ path: "{{ env.file_server.cfgs_dir }}/{{ env.bastion.networking.hostname }}/bastion-ks.cfg"
+ insertafter: "Users and Groups Definitions"
+ line: "user --groups=wheel --name={{ env.bastion.access.user }} --password={{ user_pass_hash.stdout }} --iscrypted"
+
+- name: Get KVM host home for use in next step
+ tags: create_bastion, virt-install
+ become: false
+ ansible.builtin.command: pwd
+ register: kvm_host_home
+
+- name: Boot and kickstart bastion. To monitor, login to your KVM host and run 'virsh console '
+ tags: create_bastion, virt-install
+ ansible.builtin.shell: |
+ virsh destroy {{ env.bastion.vm_name }} || true
+ virsh undefine {{ env.bastion.vm_name }} --remove-all-storage || true
+ virt-install \
+ --name {{ env.bastion.vm_name }} \
+ --osinfo detect=on,name={{ ('rhel8.6') if rhcos_os_variant is not defined else (rhcos_os_variant) }} \
+ --autostart \
+ --memory={{ env.bastion.resources.ram }} \
+ --vcpus={{ env.bastion.resources.vcpu }} \
+ --location {{ env.file_server.protocol }}://{{ env.file_server.user + ':' + env.file_server.pass + '@' if env.file_server.protocol == 'ftp' else '' }}{{ env.file_server.ip }}/{{ env.file_server.iso_mount_dir }} \
+ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.bastion.resources.disk_size }} \
+ --network network={{ env.bridge_name }} \
+ --graphics none \
+ --console pty,target_type=serial \
+ --noautoconsole --wait=-1 \
+ --initrd-inject "/{{ kvm_host_home.stdout }}/{{ env.file_server.cfgs_dir }}/{{ env.bastion.networking.hostname }}/bastion-ks.cfg" \
+ --extra-args "inst.ks=file:/bastion-ks.cfg ip={{ env.bastion.networking.ip }}::{{ env.bastion.networking.gateway }}\
+ :{{ env.bastion.networking.subnetmask }}:{{ env.bastion.networking.hostname }}::none {{ _vm_console }}"
+ timeout: 420
+ register: cmd_output
+
+- name: Debug, print above command output
+ tags: create_bastion, virt-install
+ ansible.builtin.debug:
+ var: cmd_output
+
+- name: Waiting 1 minute for automated bastion installation and configuration to complete
+ tags: create_bastion, virt-install
+ ansible.builtin.pause:
+ minutes: 1
diff --git a/roles/create_bastion/templates/bastion-ks.cfg.j2 b/roles/create_bastion/templates/bastion-ks.cfg.j2
new file mode 100644
index 00000000..64e265fa
--- /dev/null
+++ b/roles/create_bastion/templates/bastion-ks.cfg.j2
@@ -0,0 +1,94 @@
+# Template for bastion kickstart configuration file. Some parts come from the create_bastion role.
+# This kickstart file was tested with RHEL 8.7
+
+%pre --log=/root/pre.log
+%end
+
+# Reboot after installation
+reboot
+
+# Use text mode install
+text --non-interactive
+
+# Run the Setup Agent on first boot
+firstboot --enable
+
+# Use network installation
+url --url={{ env.file_server.protocol }}://{{ env.file_server.user + ':' + env.file_server.pass + '@' if env.file_server.protocol == 'ftp' else '' }}{{ env.file_server.ip }}/{{ env.file_server.iso_mount_dir }}
+
+# Add yum repositories
+repo --install --name="AppStream" --baseurl={{ env.file_server.protocol }}://{{ env.file_server.user + ':' + env.file_server.pass + '@' if env.file_server.protocol == 'ftp' else '' }}{{ env.file_server.ip }}/{{ env.file_server.iso_mount_dir }}/AppStream/
+repo --install --name="BaseOS" --baseurl={{ env.file_server.protocol }}://{{ env.file_server.user + ':' + env.file_server.pass + '@' if env.file_server.protocol == 'ftp' else '' }}{{ env.file_server.ip }}/{{ env.file_server.iso_mount_dir }}/BaseOS/
+
+# Keyboard layouts
+keyboard --vckeymap={{ env.keyboard }} --xlayouts='{{ env.keyboard }}'
+
+# System language
+lang {{ env.language }}
+
+# System timezone
+timezone {{ env.timezone }}
+
+# Mark the End-User License Agreement (EULA) as agreed
+eula --agreed
+
+# Network information
+network --bootproto=static --device={{ env.bastion.networking.interface }} --ip={{ env.bastion.networking.ip }} --gateway={{ env.bastion.networking.gateway }} --netmask={{ env.bastion.networking.subnetmask }} --noipv6 --nameserver={{ env.bastion.networking.nameserver1 }}{{ (',' + env.bastion.networking.nameserver2) if env.bastion.networking.nameserver2 is defined else '' }} --activate
+network --hostname={{ env.bastion.networking.hostname }}.{{ env.cluster.networking.base_domain }}
+
+# Firewall and SELinux
+firewall --enabled --http --ftp --smtp --ssh --port=443,9090
+selinux --enforcing
+
+# Root password (will fill in during create_bastion role)
+
+# Users and Groups Definitions (will fill in during create_bastion role)
+
+# The following is the partition information you requested
+ignoredisk --only-use=vda
+
+# System bootloader configuration
+bootloader --append="crashkernel=auto" --location=mbr --boot-drive=vda
+
+# Partition clearing information
+clearpart --all --initlabel --drives=vda
+
+# Disk partitioning information
+part /boot --fstype="xfs" --asprimary --ondisk=vda --size=1024
+part pv.01 --fstype="lvmpv" --grow --size=1 --ondisk=vda
+volgroup vgsystem --pesize=4096 pv.01
+logvol swap --fstype=swap --name=swap --vgname=vgsystem --size={{ env.bastion.resources.swap }}
+logvol / --fstype=xfs --name=root --vgname=vgsystem --size=1 --grow
+
+# Packages selection
+%packages --multilib --ignoremissing
+@^minimal-environment
+bind-utils
+curl
+jq
+mc
+net-tools
+# TODO: python3.6 is not supported anymore on RHEL8
+python3
+python3-pip
+rsync
+vim
+wget
+network-scripts
+%end
+
+%addon com_redhat_kdump --disable
+%end
+
+%post --log=/root/post.log
+#!/usr/bin/env bash
+#
+# Install and update pip packages
+pip3 install --upgrade pip setuptools wheel
+#
+# Yum repository configuration adjustments
+echo "gpgcheck=0" >> /etc/yum.repos.d/AppStream.repo
+echo "skip_if_unavailable=True" >> /etc/yum.repos.d/AppStream.repo
+echo "gpgcheck=0" >> /etc/yum.repos.d/BaseOS.repo
+echo "skip_if_unavailable=True" >> /etc/yum.repos.d/BaseOS.repo
+%end
diff --git a/roles/create_bastion_hypershift/tasks/main.yaml b/roles/create_bastion_hypershift/tasks/main.yaml
new file mode 100644
index 00000000..4062efe1
--- /dev/null
+++ b/roles/create_bastion_hypershift/tasks/main.yaml
@@ -0,0 +1,68 @@
+---
+- name: Get ssh key of local host
+ ansible.builtin.shell: cat {{ lookup('env', 'HOME') }}/.ssh/{{ env.ansible_key_name }}.pub
+ register: ssh_output
+ delegate_to: localhost
+
+- name: Load ssh_key into a variable
+ set_fact:
+ ssh_key: "{{ ssh_output.stdout_lines[0] }}"
+
+- name: Create Directory for images and bastion.ks
+ file:
+ path: /home/libvirt/images/
+ recurse: true
+ state: directory
+
+- name: Create bastion.ks file
+ template:
+ src: ../create_bastion/templates/bastion-ks.cfg.j2
+ dest: /home/libvirt/bastion.ks
+
+- name: Removing network configurations
+ lineinfile:
+ path: /home/libvirt/bastion.ks
+ state: absent
+ regexp: '^network*'
+
+- name: Adding root password for bastion to bastion.ks
+ lineinfile:
+ path: /home/libvirt/bastion.ks
+ insertafter: '^lang.*'
+ line: "rootpw {{ bastion_root_pw }}"
+
+- name: Adding ssh key to bastion
+ blockinfile:
+ path: /home/libvirt/bastion.ks
+ insertafter: '^echo.*'
+ block: |
+ mkdir -p /root/.ssh
+ echo "{{ ssh_key }}" > /root/.ssh/authorized_keys
+ chmod 0700 /root/.ssh
+ chmod 0600 /root/.ssh/authorized_keys
+
+- name: Create qemu image for bastion
+ command: qemu-img create -f qcow2 /home/libvirt/images/{{ hypershift.hcp.hosted_cluster_name }}-bastion.qcow2 100G
+
+- name: Create bastion
+ shell: |
+ virt-install \
+ --name {{ hypershift.hcp.hosted_cluster_name }}-bastion \
+ --memory 4096 \
+ --vcpus sockets=1,cores=4,threads=1 \
+ --disk /home/libvirt/images/{{ hypershift.hcp.hosted_cluster_name }}-bastion.qcow2,format=qcow2,bus=virtio,cache=none \
+ --os-variant "rhel{{hypershift.bastion_parms.os_variant}}" \
+ --network network:{{ env.bridge_name }} \
+ --location '{{ env.file_server.protocol }}://{{ env.file_server.user + ':' + env.file_server.pass + '@' if env.file_server.protocol == 'ftp' else '' }}{{ env.file_server.ip }}/{{ env.file_server.iso_mount_dir }}/' \
+ --rng=/dev/urandom --initrd-inject /home/libvirt/bastion.ks \
+ --extra-args="ks=file:/bastion.ks ip={{ hypershift.bastion_hypershift }}::{{hypershift.bastion_parms.gateway}}:{{hypershift.bastion_parms.subnet_mask}}:{{ hypershift.bastion_parms.hostname }}.{{ hypershift.bastion_parms.base_domain }}:{{ hypershift.bastion_parms.interface }}:none console=ttysclp0 nameserver={{hypershift.bastion_parms.nameserver}}" \
+ --noautoconsole \
+ --wait -1
+
+- name: Waiting 1 minute for automated bastion installation and configuration to complete
+ ansible.builtin.pause:
+ minutes: 1
+
+- name: Add route to bastion from kvm_host
+ command: "ip route add {{ hypershift.bastion_hypershift }} via {{ hypershift.gateway }}"
+ ignore_errors: yes
diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml
new file mode 100644
index 00000000..40ce8823
--- /dev/null
+++ b/roles/create_bootstrap/tasks/main.yaml
@@ -0,0 +1,34 @@
+---
+
+# Always creates a new bootstrap VM
+- name: Start bootstrap installation
+ tags: create_bootstrap
+ ansible.builtin.shell: |
+ virsh destroy {{ env.cluster.nodes.bootstrap.vm_name }} || true
+ virsh undefine {{ env.cluster.nodes.bootstrap.vm_name }} --remove-all-storage || true
+ virt-install \
+ --name {{ env.cluster.nodes.bootstrap.vm_name }} \
+ --osinfo detect=on,name={{ ('rhel8.6') if rhcos_os_variant is not defined else (rhcos_os_variant) }} \
+ --autostart \
+ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.bootstrap.disk_size }} \
+ --ram {{ env.cluster.nodes.bootstrap.ram }} \
+ --cpu host \
+ --vcpus {{ env.cluster.nodes.bootstrap.vcpu }} \
+ --network network={{ env.bridge_name }} \
+ --location {{ rhcos_download_url }},kernel={{ rhcos_live_kernel }},initrd={{ rhcos_live_initrd }} \
+ --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda \
+ coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/{{ rhcos_live_rootfs }} \
+ ip={{ env.cluster.nodes.bootstrap.ip }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ env.cluster.nodes.bootstrap.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}::none:1500 \
+ nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} \
+ coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/bootstrap.ign {{ _vm_console }}" \
+ --graphics none \
+ --console pty,target_type=serial \
+ --wait=-1 \
+ --noautoconsole
+ timeout: 360
+ register: cmd_output
+
+- name: Debug, print above command output
+ tags: create_bootstrap
+ ansible.builtin.debug:
+ var: cmd_output
diff --git a/roles/create_compute_node/tasks/main.yaml b/roles/create_compute_node/tasks/main.yaml
new file mode 100644
index 00000000..e45ee5bf
--- /dev/null
+++ b/roles/create_compute_node/tasks/main.yaml
@@ -0,0 +1,80 @@
+---
+
+# Paranoia check. This task should be executed only from ONE host
+- name: Check role/task parameters
+ tags: create_compute_node
+ ansible.builtin.fail:
+ msg: "Error: too many hosts defined in ansible play: {{ ansible_play_hosts }}"
+ when: ((ansible_play_hosts | length) > 1)
+
+- name: "If not available, download Red Hat CoreOS rootfs for {{ param_compute_node.host_arch }}"
+ tags: create_compute_node
+ delegate_to: "{{ groups['bastion'][0] }}"
+ # Need sudo or root access to write the file
+ become: true
+ block:
+ - name: Load variables based on target architecture
+ ansible.builtin.include_vars: "{{ role_path }}/../common/vars/{{ param_compute_node.host_arch }}/vars.yaml"
+
+ - name: "If not available, download Red Hat CoreOS rootfs file {{ rhcos_live_rootfs }}"
+ ansible.builtin.get_url:
+ url: "{{ item.baseurl }}{{ item.file }}"
+ dest: "/var/www/html/bin/{{ item.file }}"
+ mode: "0644"
+ loop:
+ - { baseurl: "{{ rhcos_download_url }}", file: "{{ rhcos_live_rootfs }}" }
+
+- name: "Create compute node on KVM host {{ param_compute_node.hostname }}"
+ tags: create_compute_node
+ delegate_to: "{{ param_compute_node.hostname }}"
+ block:
+ - name: Delete compute node VM, if already exists
+ tags: create_compute_node
+ ansible.builtin.shell: |
+ virsh destroy {{ param_compute_node.vm_name }} || true
+ virsh undefine {{ param_compute_node.vm_name }} --remove-all-storage --nvram || true
+
+ - name: Get and print virsh list
+ block:
+ - name: Get virsh list
+ community.libvirt.virt:
+ command: list_vms
+ register: cmd_virsh_list
+ - name: Print virsh list
+ ansible.builtin.debug:
+ var: cmd_virsh_list
+
+ - name: Create compute node VM and print results
+ block:
+ - name: Load variables based on architecture
+ ansible.builtin.include_vars: "{{ role_path }}/../common/vars/{{ param_compute_node.host_arch }}/vars.yaml"
+
+ - name: "Create compute node VM on {{ param_compute_node.host_arch }}"
+ ansible.builtin.shell: |
+ CPU_MODEL="--cpu host"
+ # CPU host model is not supported on arm64 yet, we need to disable it
+ {{ ("CPU_MODEL=''") if param_compute_node.host_arch == 'aarch64' }}
+ virt-install \
+ --name {{ param_compute_node.vm_name }} \
+ --osinfo detect=on,name={{ ('rhel8.6') if rhcos_os_variant is not defined else (rhcos_os_variant) }} \
+ --autostart \
+ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.compute.disk_size }} \
+ --ram {{ env.cluster.nodes.compute.ram }} \
+ ${CPU_MODEL} \
+ --vcpus {{ env.cluster.nodes.compute.vcpu }} \
+ --network network={{ env.bridge_name }} \
+ --graphics none \
+ --console pty,target_type=serial \
+ --wait -1 \
+ --noautoconsole \
+ --location {{ rhcos_download_url }},kernel={{ rhcos_live_kernel }},initrd={{ rhcos_live_initrd }} \
+ --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda \
+ ip={{ param_compute_node.vm_ip }}::{{ env.bastion.networking.gateway }}:{{ env.bastion.networking.subnetmask }}:{{ param_compute_node.vm_hostname }}::none:1500 \
+ nameserver={{ env.cluster.networking.nameserver1 }}{{ (',' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} \
+ coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/{{ rhcos_live_rootfs }} \
+ coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/worker.ign {{ _vm_console }}"
+ timeout: 360
+ register: cmd_output
+ - name: Debug, print above command output
+ ansible.builtin.debug:
+ var: cmd_output
diff --git a/roles/create_compute_node/vars/main.yaml b/roles/create_compute_node/vars/main.yaml
new file mode 100644
index 00000000..99f849d5
--- /dev/null
+++ b/roles/create_compute_node/vars/main.yaml
@@ -0,0 +1,7 @@
+---
+param_compute_node:
+ # vm_name:
+ # vm_hostname:
+ # vm_ip:
+ # hostname:
+ # host_arch:
diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml
new file mode 100644
index 00000000..9918b5db
--- /dev/null
+++ b/roles/create_compute_nodes/tasks/main.yaml
@@ -0,0 +1,110 @@
+---
+
+- name: Install CoreOS on compute nodes
+ tags: create_compute_nodes
+ shell: |
+ virsh destroy {{ env.cluster.nodes.compute.vm_name[i] }} || true
+ virsh undefine {{ env.cluster.nodes.compute.vm_name[i] }} --remove-all-storage || true
+ virt-install \
+ --name {{ env.cluster.nodes.compute.vm_name[i] }} \
+ --osinfo detect=on,name={{ ('rhel8.6') if rhcos_os_variant is not defined else (rhcos_os_variant) }} \
+ --autostart \
+ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.compute.disk_size }} \
+ --ram {{ env.cluster.nodes.compute.ram }} \
+ --cpu host \
+ --vcpus {{ env.cluster.nodes.compute.vcpu }} \
+ --network network={{ env.bridge_name }} \
+ --location {{ rhcos_download_url }},kernel={{ rhcos_live_kernel }},initrd={{ rhcos_live_initrd }} \
+ --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/{{ rhcos_live_rootfs }} ip={{ env.cluster.nodes.compute.ip[i] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ env.cluster.nodes.compute.hostname[i] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/worker.ign {{ _vm_console }}" \
+ --wait=-1 \
+ --noautoconsole
+ timeout: 360
+ with_sequence: start=0 end={{ (env.cluster.nodes.compute.hostname | length) - 1 }} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+ when: env.z.high_availability == False and inventory_hostname == env.z.lpar1.hostname
+
+- name: Install CoreOS on infra nodes
+ tags: create_compute_nodes
+ shell: |
+ virt-install \
+ --name {{ env.cluster.nodes.infra.vm_name[i] }} \
+ --osinfo detect=on,name={{ ('rhel8.6') if rhcos_os_variant is not defined else (rhcos_os_variant) }} \
+ --autostart \
+ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.infra.disk_size }} \
+ --ram {{ env.cluster.nodes.infra.ram }} \
+ --cpu host \
+ --vcpus {{ env.cluster.nodes.infra.vcpu }} \
+ --network network={{ env.bridge_name }} \
+ --location {{ rhcos_download_url }},kernel={{ rhcos_live_kernel }},initrd={{ rhcos_live_initrd }} \
+ --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/{{ rhcos_live_rootfs }} ip={{ env.cluster.nodes.infra.ip[i] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ env.cluster.nodes.infra.hostname[i] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/worker.ign {{ _vm_console }}" \
+ --wait=-1 \
+ --noautoconsole
+ with_sequence: start=0 end={{ ( env.cluster.nodes.infra.hostname | length ) - 1}} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+ when: env.cluster.nodes.infra.hostname is defined and env.z.high_availability == False and inventory_hostname == env.z.lpar1.hostname
+
+#If cluster is to be highly available, split control and infra nodes into lists corresponding to their future KVM hosts.
+
+- name: Split information from compute nodes into groups. The number of groups being equal to the number of KVM hosts there are.
+ tags: create_compute_nodes
+ set_fact:
+ compute_name: "{{ env.cluster.nodes.compute.vm_name[groups['kvm_host'].index(inventory_hostname)::groups['kvm_host'] | length] }}"
+ compute_hostname: "{{ env.cluster.nodes.compute.hostname[groups['kvm_host'].index(inventory_hostname)::groups['kvm_host'] | length] }}"
+ compute_ip: "{{ env.cluster.nodes.compute.ip[groups['kvm_host'].index(inventory_hostname)::groups['kvm_host'] | length] }}"
+ when: env.z.high_availability == True
+
+- name: Split information for infra nodes into groups. The number of groups being equal to the number of KVM hosts there are.
+ tags: create_compute_nodes
+ set_fact:
+ infra_name: "{{ env.cluster.nodes.infra.vm_name[groups['kvm_host'].index(inventory_hostname)::groups['kvm_host'] | length] }}"
+ infra_hostname: "{{ env.cluster.nodes.infra.hostname[groups['kvm_host'].index(inventory_hostname)::groups['kvm_host'] | length] }}"
+ infra_ip: "{{ env.cluster.nodes.infra.ip[groups['kvm_host'].index(inventory_hostname)::groups['kvm_host'] | length] }}"
+ when: env.z.high_availability == True and env.cluster.nodes.infra.hostname is defined
+
+- name: Create CoreOS compute nodes on KVM hosts, if cluster is to be highly available.
+ tags: create_compute_nodes
+ shell: |
+ virt-install \
+ --name {{ compute_name[i] }} \
+ --osinfo detect=on,name={{ ('rhel8.6') if rhcos_os_variant is not defined else (rhcos_os_variant) }} \
+ --autostart \
+ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.compute.disk_size }} \
+ --ram {{ env.cluster.nodes.compute.ram }} \
+ --cpu host \
+ --vcpus {{ env.cluster.nodes.compute.vcpu }} \
+ --network network={{ env.bridge_name }} \
+ --location {{ rhcos_download_url }},kernel={{ rhcos_live_kernel }},initrd={{ rhcos_live_initrd }} \
+ --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/{{ rhcos_live_rootfs }} ip={{ compute_ip[i] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ compute_hostname[i] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/worker.ign {{ _vm_console }}" \
+ --wait=-1 \
+ --noautoconsole
+ loop: "{{ compute_name | zip(compute_hostname, compute_ip) | list }}"
+ loop_control:
+ extended: yes
+ index_var: i
+ when: env.z.high_availability == True and compute_hostname[i] is defined
+
+- name: Create CoreOS infra nodes on KVM hosts, if cluster is to be highly available.
+ tags: create_compute_nodes
+ shell: |
+ virt-install \
+ --name {{ infra_name[i] }} \
+ --osinfo detect=on,name={{ ('rhel8.6') if rhcos_os_variant is not defined else (rhcos_os_variant) }} \
+ --autostart \
+ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.infra.disk_size }} \
+ --ram {{ env.cluster.nodes.infra.ram }} \
+ --cpu host \
+ --vcpus {{ env.cluster.nodes.infra.vcpu }} \
+ --network network={{ env.bridge_name }} \
+ --location {{ rhcos_download_url }},kernel={{ rhcos_live_kernel }},initrd={{ rhcos_live_initrd }} \
+ --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/{{ rhcos_live_rootfs }} ip={{ infra_ip[i] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ infra_hostname[i] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/worker.ign {{ _vm_console }}" \
+ --wait=-1 \
+ --noautoconsole
+ loop: "{{ infra_name | zip(infra_hostname, infra_ip) | list }}"
+ loop_control:
+ extended: yes
+ index_var: i
+ when: env.cluster.nodes.infra.hostname is defined and infra_hostname[i] is defined and env.z.high_availability == True
diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml
new file mode 100644
index 00000000..03377d97
--- /dev/null
+++ b/roles/create_control_nodes/tasks/main.yaml
@@ -0,0 +1,83 @@
+---
+
+- name: Create CoreOS control nodes on the the KVM host.
+ tags: create_control_nodes
+ shell: |
+ virt-install \
+ --name {{ env.cluster.nodes.control.vm_name[i] }} \
+ --osinfo detect=on,name={{ ('rhel8.6') if rhcos_os_variant is not defined else (rhcos_os_variant) }} \
+ --autostart \
+ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.control.disk_size }} \
+ --ram {{ env.cluster.nodes.control.ram }} \
+ --cpu host \
+ --vcpus {{ env.cluster.nodes.control.vcpu }} \
+ --network network={{ env.bridge_name }} \
+ --location {{ rhcos_download_url }},kernel={{ rhcos_live_kernel }},initrd={{ rhcos_live_initrd }} \
+ --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/{{ rhcos_live_rootfs }} ip={{ env.cluster.nodes.control.ip[i] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ env.cluster.nodes.control.hostname[i] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/master.ign {{ _vm_console }}" \
+ --graphics none \
+ --console pty,target_type=serial \
+ --wait=-1 \
+ --noautoconsole
+ timeout: 360
+ with_sequence: start=0 end={{ (env.cluster.nodes.control.hostname | length) - 1 }} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+ when: env.z.high_availability == False and inventory_hostname == env.z.lpar1.hostname
+
+- name: Create the first CoreOS control node on the first KVM host, if cluster is to be highly available.
+ tags: create_control_nodes
+ shell: |
+ virt-install \
+ --name {{ env.cluster.nodes.control.vm_name[0] }} \
+ --osinfo detect=on,name={{ ('rhel8.6') if rhcos_os_variant is not defined else (rhcos_os_variant) }} \
+ --autostart \
+ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.control.disk_size }} \
+ --ram {{ env.cluster.nodes.control.ram }} \
+ --cpu host \
+ --vcpus {{ env.cluster.nodes.control.vcpu }} \
+ --network network={{ env.bridge_name }} \
+ --location {{ rhcos_download_url }},kernel={{ rhcos_live_kernel }},initrd={{ rhcos_live_initrd }} \
+ --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/{{ rhcos_live_rootfs }} ip={{ env.cluster.nodes.control.ip[0] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ env.cluster.nodes.control.hostname[0] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/master.ign {{ _vm_console }}" \
+ --graphics none \
+ --wait=-1 \
+ --noautoconsole
+ when: env.z.high_availability == True and inventory_hostname == env.z.lpar1.hostname
+
+- name: Create the second CoreOS control node on the second KVM host, if cluster is to be highly available.
+ tags: create_control_nodes
+ shell: |
+ virt-install \
+ --name {{ env.cluster.nodes.control.vm_name[1] }} \
+ --osinfo detect=on,name={{ ('rhel8.6') if rhcos_os_variant is not defined else (rhcos_os_variant) }} \
+ --autostart \
+ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.control.disk_size }} \
+ --ram {{ env.cluster.nodes.control.ram }} \
+ --cpu host \
+ --vcpus {{ env.cluster.nodes.control.vcpu }} \
+ --network network={{ env.bridge_name }} \
+ --location {{ rhcos_download_url }},kernel={{ rhcos_live_kernel }},initrd={{ rhcos_live_initrd }} \
+ --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/{{ rhcos_live_rootfs }} ip={{ env.cluster.nodes.control.ip[1] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ env.cluster.nodes.control.hostname[1] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/master.ign {{ _vm_console }}" \
+ --graphics none \
+ --wait=-1 \
+ --noautoconsole
+ when: env.z.high_availability == True and inventory_hostname == env.z.lpar2.hostname
+
+- name: Create the third CoreOS control node on the third KVM host, if cluster is to be highly available.
+ tags: create_control_nodes
+ shell: |
+ virt-install \
+ --name {{ env.cluster.nodes.control.vm_name[2] }} \
+ --osinfo detect=on,name={{ ('rhel8.6') if rhcos_os_variant is not defined else (rhcos_os_variant) }} \
+ --autostart \
+ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.control.disk_size }} \
+ --ram {{ env.cluster.nodes.control.ram }} \
+ --cpu host \
+ --vcpus {{ env.cluster.nodes.control.vcpu }} \
+ --network network={{ env.bridge_name }} \
+ --location {{ rhcos_download_url }},kernel={{ rhcos_live_kernel }},initrd={{ rhcos_live_initrd }} \
+ --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/{{ rhcos_live_rootfs }} ip={{ env.cluster.nodes.control.ip[2] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ env.cluster.nodes.control.hostname[2] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/master.ign {{ _vm_console }}" \
+ --graphics none \
+ --wait=-1 \
+ --noautoconsole
+ when: env.z.high_availability == True and inventory_hostname == env.z.lpar3.hostname
diff --git a/roles/create_hcp_InfraEnv_hypershift/tasks/main.yaml b/roles/create_hcp_InfraEnv_hypershift/tasks/main.yaml
new file mode 100644
index 00000000..b01b1898
--- /dev/null
+++ b/roles/create_hcp_InfraEnv_hypershift/tasks/main.yaml
@@ -0,0 +1,129 @@
+---
+- name: Getting Hosted Control Plane Namespace
+ set_fact:
+ hosted_control_plane_namespace: "{{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }}"
+
+- name: Check if Hosted Control Plane Namespace exists
+ k8s_info:
+ api_version: v1
+ kind: Namespace
+ name: "{{ hosted_control_plane_namespace }}"
+ register: namespace_check
+ ignore_errors: yes
+
+- name: Create Hosted Control Plane Namespace
+ k8s:
+ api_version: v1
+ kind: Namespace
+ name: "{{ hosted_control_plane_namespace }}"
+ state: present
+ when: namespace_check.resources | length == 0
+
+- name: Get ssh key
+ ansible.builtin.shell: cat ~/.ssh/{{ env.ansible_key_name }}.pub
+ register: ssh_output
+
+- name: Load ssh_key into a variable
+ set_fact:
+ ssh_key: "{{ ssh_output.stdout_lines[0] }}"
+
+- name: Get pod name for hypershift-cli-download
+ kubernetes.core.k8s_info:
+ api_version: v1
+ kind: Pod
+ namespace: "{{ hypershift.asc.mce_namespace }}"
+ label_selectors:
+ - app= hypershift-cli-download
+ register: hypershift_pod_name
+
+- name: Get hypershift.tar.gz file from pod
+ kubernetes.core.k8s_cp:
+ namespace: "{{ hypershift.asc.mce_namespace }}"
+ pod: "{{ hypershift_pod_name.resources[0].metadata.name }}"
+ remote_path: "/opt/app-root/src/linux/s390x/"
+ local_path: "/root/ansible_workdir"
+ state: from_pod
+
+- name: Extract binary from hypershift.tar.gz
+ unarchive:
+ src: /root/ansible_workdir/hypershift.tar.gz
+ dest: /usr/local/bin/
+ remote_src: true
+
+- name: Create a Hosted Cluster
+ command: >
+ hypershift create cluster agent
+ --name={{ hypershift.hcp.hosted_cluster_name }}
+ --pull-secret={{ hypershift.hcp.pull_secret_file }}
+ --agent-namespace={{ hosted_control_plane_namespace }}
+ --namespace={{ hypershift.hcp.clusters_namespace }}
+ --base-domain={{ hypershift.hcp.basedomain }}
+ --api-server-address=api.{{ hypershift.hcp.hosted_cluster_name }}.{{ hypershift.hcp.basedomain }}
+ --ssh-key ~/.ssh/{{ env.ansible_key_name }}.pub
+ --release-image=quay.io/openshift-release-dev/ocp-release:{{ hypershift.hcp.ocp_release }}
+ {% set release_image = lookup('env', 'HCP_RELEASE_IMAGE') %}
+ {% if release_image is defined and release_image != '' %}
+ --release-image={{ release_image }}
+ {% else %}
+ --release-image=quay.io/openshift-release-dev/ocp-release:{{ hypershift.hcp.ocp_release }}
+ {% endif %}
+
+- name: Waiting for Hosted Control Plane to be available
+ command: oc wait --timeout=30m --for=condition=Available --namespace={{ hypershift.hcp.clusters_namespace }} hostedcluster/{{ hypershift.hcp.hosted_cluster_name }}
+
+- name: Wait for pods to come up in Hosted Cluster Namespace
+ shell: oc get pods -n {{ hosted_control_plane_namespace }} | wc -l
+ register: pod_count
+ until: pod_count.stdout | int > 30
+ retries: 40
+ delay: 10
+
+- name: Wait for all pods to be in Running State in Hosted Cluster Namespace
+ shell: oc get pods -n {{ hosted_control_plane_namespace }} --no-headers | grep -v 'Running\|Completed\|Terminating' | wc -l
+ register: pod_status
+ until: pod_status.stdout == '0'
+ retries: 30
+ delay: 10
+
+- name: Create InfraEnv.yaml
+ template:
+ src: InfraEnv.yaml.j2
+ dest: /root/ansible_workdir/InfraEnv.yaml
+
+- name: Deploy InfraEnv Resource
+ command: oc apply -f /root/ansible_workdir/InfraEnv.yaml
+
+- name: Creating list of mac addresses
+ set_fact:
+ agent_mac_addr: []
+ when: hypershift.agents_parms.static_ip_parms.static_ip == true
+
+- name: Getting mac addresss for agents
+ set_fact:
+ agent_mac_addr: "{{ hypershift.agents_parms.agent_mac_addr }}"
+ when: ( hypershift.agents_parms.static_ip_parms.static_ip == true ) and ( hypershift.agents_parms.agent_mac_addr != None )
+
+- name: Generate mac addresses for agents
+ set_fact:
+ agent_mac_addr: "{{ agent_mac_addr + ['52:54:00' | community.general.random_mac] }}"
+ when: ( hypershift.agents_parms.static_ip_parms.static_ip == true ) and ( hypershift.agents_parms.agent_mac_addr == None )
+ loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}"
+
+- name: Create NMState Configs
+ template:
+ src: nmStateConfig.yaml.j2
+ dest: /root/ansible_workdir/nmStateConfig-agent-{{ item }}.yaml
+ when: hypershift.agents_parms.static_ip_parms.static_ip == true
+ loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}"
+
+- name: Deploy NMState Configs
+ command: oc apply -f /root/ansible_workdir/nmStateConfig-agent-{{ item }}.yaml
+ when: hypershift.agents_parms.static_ip_parms.static_ip == true
+ loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}"
+
+- name: Wait for ISO to generate in InfraEnv
+ shell: oc get InfraEnv -n {{ hosted_control_plane_namespace }} --no-headers
+ register: infra
+ until: infra.stdout.split(' ')[-1] != ''
+ retries: 60
+ delay: 20
diff --git a/roles/create_hcp_InfraEnv_hypershift/templates/InfraEnv.yaml.j2 b/roles/create_hcp_InfraEnv_hypershift/templates/InfraEnv.yaml.j2
new file mode 100644
index 00000000..1f3b3952
--- /dev/null
+++ b/roles/create_hcp_InfraEnv_hypershift/templates/InfraEnv.yaml.j2
@@ -0,0 +1,15 @@
+apiVersion: agent-install.openshift.io/v1beta1
+kind: InfraEnv
+metadata:
+ name: "{{ hypershift.hcp.hosted_cluster_name }}"
+ namespace: "{{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }}"
+spec:
+{% if hypershift.agents_parms.static_ip_parms.static_ip == true %}
+ nmStateConfigLabelSelector:
+ matchLabels:
+ infraenv: "static-ip-{{ hypershift.hcp.hosted_cluster_name }}"
+{% endif %}
+ cpuArchitecture: "{{ hypershift.hcp.arch }}"
+ pullSecretRef:
+ name: pull-secret
+ sshAuthorizedKey: "{{ ssh_key }}"
diff --git a/roles/create_hcp_InfraEnv_hypershift/templates/nmStateConfig.yaml.j2 b/roles/create_hcp_InfraEnv_hypershift/templates/nmStateConfig.yaml.j2
new file mode 100644
index 00000000..b396dbff
--- /dev/null
+++ b/roles/create_hcp_InfraEnv_hypershift/templates/nmStateConfig.yaml.j2
@@ -0,0 +1,34 @@
+apiVersion: agent-install.openshift.io/v1beta1
+kind: NMStateConfig
+metadata:
+ name: "static-ip-nmstate-config-{{ hypershift.hcp.hosted_cluster_name }}-{{ item }}"
+ namespace: "{{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }}"
+ labels:
+ infraenv: "static-ip-{{ hypershift.hcp.hosted_cluster_name }}"
+spec:
+ config:
+ interfaces:
+ - name: "{{ hypershift.agents_parms.static_ip_parms.interface }}"
+ type: ethernet
+ state: up
+ mac-address: "{{ agent_mac_addr[item] }}"
+ ipv4:
+ enabled: true
+ address:
+ - ip: "{{ hypershift.agents_parms.static_ip_parms.ip[item] }}"
+ prefix-length: 16
+ dhcp: false
+ routes:
+ config:
+ - destination: 0.0.0.0/0
+ next-hop-address: "{{ hypershift.gateway }}"
+ next-hop-interface: "{{ hypershift.agents_parms.static_ip_parms.interface }}"
+ table-id: 254
+ dns-resolver:
+ config:
+ server:
+ - "{{ hypershift.agents_parms.nameserver }}"
+
+ interfaces:
+ - name: "{{ hypershift.agents_parms.static_ip_parms.interface }}"
+ macAddress: "{{ agent_mac_addr[item] }}"
diff --git a/roles/create_inventory_setup_hypershift/tasks/main.yaml b/roles/create_inventory_setup_hypershift/tasks/main.yaml
new file mode 100644
index 00000000..4d73ac94
--- /dev/null
+++ b/roles/create_inventory_setup_hypershift/tasks/main.yaml
@@ -0,0 +1,42 @@
+---
+
+- name: Find inventory directory from ansible.cfg
+ tags: set_inventory
+ shell: cat {{ ansible_config_file }} | grep 'inventory=' | cut -f2 -d"="
+ register: find_inventory
+
+- name: Find absolute path to project.
+ tags: set_inventory
+ shell: |
+ ansible_config="{{ ansible_config_file }}"
+ echo "${ansible_config%/*}/"
+ register: find_project
+
+- name: Create inventory
+ template:
+ src: inventory_template.j2
+ dest: "{{ find_project.stdout }}{{ find_inventory.stdout }}/inventory_hypershift"
+
+- name: Check if SSH key exists
+ stat:
+ path: "~/.ssh/{{ env.ansible_key_name }}.pub"
+ register: ssh_key
+
+- name: Generate SSH key
+ command: ssh-keygen -t rsa -b 4096 -N "" -f "~/.ssh/{{ env.ansible_key_name }}"
+ when: ssh_key.stat.exists == false
+
+- name: Create expect file
+ template:
+ src: ssh-key.exp.j2
+ dest: "{{ find_project.stdout }}{{ find_inventory.stdout }}/ssh-key.exp.sh"
+ mode: "+rx"
+
+- name: Add ssh-key to kvm_host Authorised Keys
+ block:
+ - name: Adding ssh key
+ shell: "{{ find_project.stdout }}{{ find_inventory.stdout }}/ssh-key.exp.sh"
+ rescue:
+ - name: Key already added
+ debug:
+ msg: "Ignore the above error if ssh-key already added"
diff --git a/roles/create_inventory_setup_hypershift/templates/inventory_template.j2 b/roles/create_inventory_setup_hypershift/templates/inventory_template.j2
new file mode 100644
index 00000000..615034c4
--- /dev/null
+++ b/roles/create_inventory_setup_hypershift/templates/inventory_template.j2
@@ -0,0 +1,7 @@
+[kvm_host_hypershift]
+kvm_host_hypershift ansible_host={{ hypershift.kvm_host }} ansible_user={{ hypershift.kvm_host_user }} ansible_become_password={{ kvm_host_password }}
+
+
+
+[bastion_hypershift]
+bastion_hypershift ansible_host={{ hypershift.bastion_hypershift }} ansible_user={{ hypershift.bastion_hypershift_user }}
diff --git a/roles/create_inventory_setup_hypershift/templates/ssh-key.exp.j2 b/roles/create_inventory_setup_hypershift/templates/ssh-key.exp.j2
new file mode 100644
index 00000000..2d375510
--- /dev/null
+++ b/roles/create_inventory_setup_hypershift/templates/ssh-key.exp.j2
@@ -0,0 +1,6 @@
+#!/usr/bin/expect
+set password "{{ kvm_host_password }}"
+spawn ssh-copy-id -i {{ lookup('env', 'HOME') }}/.ssh/{{ env.ansible_key_name }} {{ hypershift.kvm_host_user }}@{{ hypershift.kvm_host }}
+expect "{{ hypershift.kvm_host_user }}@{{ hypershift.kvm_host }}'s password:"
+send "$password\r"
+expect eof
diff --git a/roles/create_kvm_host/files/hmccreds.yaml b/roles/create_kvm_host/files/hmccreds.yaml
new file mode 100644
index 00000000..6fca3669
--- /dev/null
+++ b/roles/create_kvm_host/files/hmccreds.yaml
@@ -0,0 +1,19 @@
+examples:
+ api_version:
+ hmc: 9.60.86.110
+ verify_cert: false
+ show_os_messages:
+ hmc: 9.60.86.110
+ cpcname: P0007DE8
+ partname: Distrib-KVM01
+ verify_cert: false
+"9.60.86.110":
+ userid: jacob.emery@ibm.com
+ password: vpb-ubp_CGZ9cnz8ctk
+ verify_cert: false
+cpcs:
+ P0007DE8:
+ hmc_host: 9.60.86.110
+ hmc_userid: jacob.emery@ibm.com
+ hmc_password: vpb-ubp_CGZ9cnz8ctk
+ verify_cert: false
diff --git a/roles/create_kvm_host/files/os_messages.py b/roles/create_kvm_host/files/os_messages.py
new file mode 100755
index 00000000..aa053a13
--- /dev/null
+++ b/roles/create_kvm_host/files/os_messages.py
@@ -0,0 +1,158 @@
+#!/usr/bin/env python
+# Copyright 2017-2021 IBM Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+#Example that shows the OS messages of the OS in a Partition or LPAR.
+"""
+
+import sys
+import logging
+import yaml
+import requests
+import zhmcclient
+
+# Print metadata for each OS message, before each message
+PRINT_METADATA = False
+
+requests.packages.urllib3.disable_warnings()
+
+if len(sys.argv) != 2:
+ print("Usage: %s hmccreds.yaml" % sys.argv[0])
+ sys.exit(2)
+hmccreds_file = sys.argv[1]
+
+with open(hmccreds_file, 'r') as fp:
+ hmccreds = yaml.safe_load(fp)
+
+examples = hmccreds.get("examples", None)
+if examples is None:
+ print("examples not found in credentials file %s" % \
+ (hmccreds_file))
+ sys.exit(1)
+
+show_os_messages = examples.get("show_os_messages", None)
+if show_os_messages is None:
+ print("show_os_messages not found in credentials file %s" % \
+ (hmccreds_file))
+ sys.exit(1)
+
+loglevel = show_os_messages.get("loglevel", None)
+if loglevel is not None:
+ level = getattr(logging, loglevel.upper(), None)
+ if level is None:
+ print("Invalid value for loglevel in credentials file %s: %s" % \
+ (hmccreds_file, loglevel))
+ sys.exit(1)
+ logmodule = show_os_messages.get("logmodule", None)
+ if logmodule is None:
+ logmodule = '' # root logger
+ print("Logging for module %s with level %s" % (logmodule, loglevel))
+ handler = logging.StreamHandler()
+ format_string = '%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s'
+ handler.setFormatter(logging.Formatter(format_string))
+ logger = logging.getLogger(logmodule)
+ logger.addHandler(handler)
+ logger.setLevel(level)
+
+hmc = show_os_messages["hmc"]
+cpcname = show_os_messages["cpcname"]
+partname = show_os_messages["partname"]
+
+cred = hmccreds.get(hmc, None)
+if cred is None:
+ print("Credentials for HMC %s not found in credentials file %s" % \
+ (hmc, hmccreds_file))
+ sys.exit(1)
+
+userid = cred['userid']
+password = cred['password']
+
+print(__doc__)
+
+print("Using HMC %s with userid %s ..." % (hmc, userid))
+session = zhmcclient.Session(hmc, userid, password, verify_cert=False)
+cl = zhmcclient.Client(session)
+
+timestats = show_os_messages.get("timestats", False)
+if timestats:
+ session.time_stats_keeper.enable()
+
+try:
+ cpc = cl.cpcs.find(name=cpcname)
+except zhmcclient.NotFound:
+ print("Could not find CPC %s on HMC %s" % (cpcname, hmc))
+ sys.exit(1)
+
+try:
+ if cpc.dpm_enabled:
+ partkind = "partition"
+ partition = cpc.partitions.find(name=partname)
+ else:
+ partkind = "LPAR"
+ partition = cpc.lpars.find(name=partname)
+except zhmcclient.NotFound:
+ print("Could not find %s %s on CPC %s" % (partkind, partname, cpcname))
+ sys.exit(1)
+
+#break_id = show_os_messages.get("breakid", None)
+#if break_id:
+# print("Breaking upon receipt of message with ID %s ..." % break_id)
+
+print("Opening OS message channel for %s %s on CPC %s ..." %
+ (partkind, partname, cpcname))
+topic = partition.open_os_message_channel(include_refresh_messages=True)
+print("OS message channel topic: %s" % topic)
+
+receiver = zhmcclient.NotificationReceiver(topic, hmc, userid, password)
+print("Showing OS messages (including refresh messages) ...")
+sys.stdout.flush()
+
+try:
+ for headers, message in receiver.notifications():
+ # print("# HMC notification #%s:" % headers['session-sequence-nr'])
+ # sys.stdout.flush()
+ os_msg_list = message['os-messages']
+ for os_msg in os_msg_list:
+ if PRINT_METADATA:
+ msg_id = os_msg['message-id']
+ held = os_msg['is-held']
+ priority = os_msg['is-priority']
+ prompt = os_msg.get('prompt-text', None)
+ print("# OS message %s (held: %s, priority: %s, prompt: %r):" %
+ (msg_id, held, priority, prompt))
+ msg_txt = os_msg['message-text'].strip('\n')
+ print(msg_txt)
+ sys.stdout.flush()
+# if msg_id == break_id:
+# raise NameError
+#except KeyboardInterrupt:
+# print("Keyboard interrupt - leaving receiver loop")
+# sys.stdout.flush()
+#except NameError:
+# print("Message with ID %s occurred - leaving receiver loop" % break_id)
+# sys.stdout.flush()
+finally:
+ print("Closing receiver...")
+ sys.stdout.flush()
+ receiver.close()
+
+print("Logging off...")
+sys.stdout.flush()
+session.logoff()
+
+if timestats:
+ print(session.time_stats_keeper)
+
+print("Done.")
diff --git a/roles/create_kvm_host/tasks/main.yaml b/roles/create_kvm_host/tasks/main.yaml
new file mode 100644
index 00000000..210298ad
--- /dev/null
+++ b/roles/create_kvm_host/tasks/main.yaml
@@ -0,0 +1,67 @@
+---
+
+- name: Start LPAR
+ tags: create_kvm_host
+ ibm.ibm_zhmc.zhmc_partition:
+ hmc_host: "{{ hmc.host }}"
+ hmc_auth:
+ userid: "{{ hmc.auth.user }}"
+ password: "{{ hmc.auth.pass }}"
+ verify: false
+ cpc_name: "{{ cpc_name }}"
+ name: "{{ lpar.name }}"
+ properties:
+ boot_ftp_host: "{{ env.file_server.ip }}"
+ boot_ftp_username: "{{ env.file_server.user }}"
+ boot_ftp_password: "{{ env.file_server.pass }}"
+ boot_ftp_insfile: "{{ env.file_server.cfgs_dir }}/{{ networking.hostname }}/kvm_host.ins"
+ boot_device: "ftp"
+ state: active
+ register: _create_instances
+ async: 600
+ poll: 0
+
+- name: Wait for creation to finish
+ tags: create_kvm_host
+ async_status:
+ jid: "{{ _create_instances.ansible_job_id }}"
+ register: _jobs
+ until: _jobs.finished
+ delay: 15 # Check every 15 seconds
+ retries: 40 # Retry up to 40 times
+
+#- name: Template hmccreds.yaml for use with os_messages.py
+# tags: create_kvm_host, test2
+# template:
+# src: hmccreds.yaml.j2
+# dest: "{{ role_path }}/files/hmccreds.yaml"
+
+#- name: Execute os_messages.py
+# tags: create_kvm_host, test2
+# command: "{{ role_path }}/files/os_messages.py {{ role_path }}/files/hmcclient.yaml"
+# register: os_messages_output
+
+#- name: Show LPAR OS messages from HMC.
+# tags: create_kvm_host, test2
+# debug:
+# msg: "{{ os_messages_output }}"
+# until: "'login:' in os_messages_output"
+
+- name: Wait 7 minutes for automated RHEL installation and configuration to complete.
+ tags: create_kvm_host
+ pause:
+ minutes: 7
+
+#- name: Change LPAR's boot source to storage adapter instead of FTP for future booting
+# tags: create_kvm_host
+# ibm.ibm_zhmc.zhmc_partition:
+# hmc_host: "{{ hmc.host }}"
+# hmc_auth:
+# userid: "{{ hmc.auth.user }}"
+# password: "{{ hmc.auth.pass }}"
+# verify: false
+# cpc_name: "{{ cpc_name }}"
+# name: "{{ lpar.name }}"
+# properties:
+# boot_device: "storage-adapter"
+# state: active
diff --git a/roles/create_kvm_host/templates/hmccreds.yaml.j2 b/roles/create_kvm_host/templates/hmccreds.yaml.j2
new file mode 100644
index 00000000..1f538866
--- /dev/null
+++ b/roles/create_kvm_host/templates/hmccreds.yaml.j2
@@ -0,0 +1,19 @@
+examples:
+ api_version:
+ hmc: {{ networking.ip }}
+ verify_cert: false
+ show_os_messages:
+ hmc: {{ networking.ip }}
+ cpcname: {{ cpc_name }}
+ partname: {{ lpar.name }}
+ verify_cert: false
+"{{ networking.ip }}":
+ userid: {{ hmc.auth.user }}
+ password: {{ hmc.auth.pass }}
+ verify_cert: false
+cpcs:
+ {{ cpc_name }}:
+ hmc_host: {{ networking.ip }}
+ hmc_userid: {{ hmc.auth.user }}
+ hmc_password: {{ hmc.auth.pass }}
+ verify_cert: false
diff --git a/roles/create_lpar/tasks/main.yaml b/roles/create_lpar/tasks/main.yaml
new file mode 100644
index 00000000..aa18813e
--- /dev/null
+++ b/roles/create_lpar/tasks/main.yaml
@@ -0,0 +1,121 @@
+---
+
+#Create LPAR Profile
+- name: Create logical partition.
+ tags: create_lpar, lpar
+ ibm.ibm_zhmc.zhmc_partition:
+ hmc_host: "{{ hmc.host }}"
+ hmc_auth:
+ userid: "{{ hmc.auth.user }}"
+ password: "{{ hmc.auth.pass }}"
+ verify: false
+ cpc_name: "{{ cpc_name }}"
+ name: "{{ lpar.name }}"
+ state: stopped
+ properties:
+ description: "{{ lpar.description }}"
+ ifl_processors: "{{ lpar.ifl.count }}"
+ initial_memory: "{{ lpar.ifl.initial_memory }}"
+ maximum_memory: "{{ lpar.ifl.max_memory }}"
+ minimum_ifl_processing_weight: "{{ lpar.ifl.min_weight }}"
+ maximum_ifl_processing_weight: "{{ lpar.ifl.max_weight }}"
+ initial_ifl_processing_weight: "{{ lpar.ifl.initial_weight }}"
+ register: create_lpar
+
+- name: Print the result
+ tags: create_lpar, lpar
+ debug:
+ var: create_lpar
+
+# Attach storge group
+- name: Ensure storage group is attached to partition.
+ tags: create_lpar, storage_group, storage_group_1
+ ibm.ibm_zhmc.zhmc_storage_group_attachment:
+ hmc_host: "{{ hmc.host }}"
+ hmc_auth:
+ userid: "{{ hmc.auth.user }}"
+ password: "{{ hmc.auth.pass }}"
+ verify: false
+ cpc_name: "{{ cpc_name }}"
+ storage_group_name: "{{ lpar.storage_group_1.name }}"
+ partition_name: "{{ lpar.name }}"
+ state: attached
+ register: sglparattach
+
+- name: Print the result.
+ tags: create_lpar, storage_group, storage_group_1
+ debug:
+ var: sglparattach
+
+# Attach second storge group, if defined
+- name: Ensure second storage group is attached to partition, if defined.
+ tags: create_lpar, storage_group, storage_group_2
+ ibm.ibm_zhmc.zhmc_storage_group_attachment:
+ hmc_host: "{{ hmc.host }}"
+ hmc_auth:
+ userid: "{{ hmc.auth.user }}"
+ password: "{{ hmc.auth.pass }}"
+ verify: false
+ cpc_name: "{{ cpc_name }}"
+ storage_group_name: "{{ lpar.storage_group_2.name }}"
+ partition_name: "{{ lpar.name }}"
+ state: attached
+ register: sglparattach
+ when: lpar.storage_group_2.name is defined
+
+- name: Print the result.
+ tags: create_lpar, storage_group, storage_group_2
+ debug:
+ var: sglparattach
+ when: lpar.storage_group_2.name is defined
+
+#Attach Network Adapter
+- name: Ensure NIC1 exists in the partition.
+ tags: create_lpar, nic, nic1
+ ibm.ibm_zhmc.zhmc_nic:
+ hmc_host: "{{ hmc.host }}"
+ hmc_auth:
+ userid: "{{ hmc.auth.user }}"
+ password: "{{ hmc.auth.pass }}"
+ verify: false
+ cpc_name: "{{ cpc_name }}"
+ partition_name: "{{ lpar.name }}"
+ name: "{{ lpar.networking.nic.card1.name }}"
+ state: present
+ properties:
+ adapter_name: "{{ lpar.networking.nic.card1.adapter }}"
+ adapter_port: "{{ lpar.networking.nic.card1.port }}"
+ description: "NIC1"
+ device_number: "{{ '%04x' % lpar.networking.nic.card1.dev_num | int }}"
+ register: nic1
+
+- name: Print the result.
+ tags: create_lpar, nic, nic1
+ debug:
+ var: nic1
+
+- name: Ensure NIC2 exists in the partition, if defined.
+ tags: create_lpar, nic, nic2
+ ibm.ibm_zhmc.zhmc_nic:
+ hmc_host: "{{ hmc.host }}"
+ hmc_auth:
+ userid: "{{ hmc.auth.user }}"
+ password: "{{ hmc.auth.pass }}"
+ verify: false
+ cpc_name: "{{ cpc_name }}"
+ partition_name: "{{ lpar.name }}"
+ name: "{{ lpar.networking.nic.card2.name }}"
+ state: present
+ properties:
+ adapter_name: "{{ lpar.networking.nic.card2.adapter }}"
+ adapter_port: "{{ lpar.networking.nic.card2.port }}"
+ description: "NIC2"
+ device_number: "{{ '%04x' % ( lpar.networking.nic.card2.dev_num | int ) }}"
+ register: nic2
+ when: lpar.networking.nic.card2 is defined
+
+- name: Print the result.
+ tags: create_lpar, nic, nic2
+ debug:
+ var: nic2
+ when: lpar.networking.nic.card2 is defined
diff --git a/roles/defaults/main.yml b/roles/defaults/main.yml
deleted file mode 100644
index 06bce909..00000000
--- a/roles/defaults/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-# defaults file for playbooks/roles/kvm-vm
-kvm_vm_hostname: []
-kvm_vm_public_ip: []
-kvm_vm_root_pwd: []
-kvm_vm_base_img: [] #NOTE: This should be the name of a base image in /var/lib/libvirt/images on your KVM host
-kvm_vm_vcpus: "1"
-kvm_vm_ram: "8196"
-# kvm_vm_ram: "16384"
-kvm_vm_os_disk_name: "{{ kvm_vm_hostname }}"
-kvm_vm_os_disk_size: "70G"
-kvm_vm_nics: [] #NOTE: see example playbook for structure
diff --git a/roles/delete_compute_node/tasks/main.yaml b/roles/delete_compute_node/tasks/main.yaml
new file mode 100644
index 00000000..db39dbc8
--- /dev/null
+++ b/roles/delete_compute_node/tasks/main.yaml
@@ -0,0 +1,34 @@
+---
+
+- name: Delete compute node from OCP cluster and print command output
+ tags: delete_compute_node
+ delegate_to: "{{ groups['bastion'][0] }}"
+ block:
+ - name: Delete compute node from OCP cluster
+ ansible.builtin.shell: |
+ set -o pipefail
+ if ! oc get nodes --no-headers=true | grep "^{{ param_compute_node.vm_hostname | lower }}" ; then
+ echo "INFO: Node '{{ param_compute_node.vm_hostname }}' not found or is already deleted"
+ exit 0
+ fi
+ oc adm cordon {{ param_compute_node.vm_hostname | lower }}
+ oc adm drain {{ param_compute_node.vm_hostname | lower }} --force --delete-emptydir-data --ignore-daemonsets --timeout=30s
+ oc delete nodes {{ param_compute_node.vm_hostname | lower }}
+ register: cmd_output
+ - name: Print cmd output
+ ansible.builtin.debug:
+ var: cmd_output.stdout_lines
+
+- name: Delete compute node VM on KVM host and print command output
+ tags: delete_compute_node
+ delegate_to: "{{ param_compute_node.hostname }}"
+ block:
+ - name: Delete compute node VM on KVM host
+ ansible.builtin.shell: |
+ set -o pipefail
+ virsh destroy {{ param_compute_node.vm_name }} || true
+ virsh undefine {{ param_compute_node.vm_name }} --remove-all-storage || true
+ register: cmd_output
+ - name: Print cmd output
+ ansible.builtin.debug:
+ var: cmd_output.stdout_lines
diff --git a/roles/delete_compute_node/vars/main.yaml b/roles/delete_compute_node/vars/main.yaml
new file mode 100644
index 00000000..99f849d5
--- /dev/null
+++ b/roles/delete_compute_node/vars/main.yaml
@@ -0,0 +1,7 @@
+---
+param_compute_node:
+ # vm_name:
+ # vm_hostname:
+ # vm_ip:
+ # hostname:
+ # host_arch:
diff --git a/roles/delete_nodes/tasks/main.yaml b/roles/delete_nodes/tasks/main.yaml
new file mode 100644
index 00000000..df4defa2
--- /dev/null
+++ b/roles/delete_nodes/tasks/main.yaml
@@ -0,0 +1,33 @@
+---
+
+- name: Delete bootstrap node, if exists
+ tags: delete_nodes
+ ansible.builtin.shell: |
+ set -o pipefail
+ virsh destroy "{{ env.cluster.nodes.bootstrap.vm_name }}" || true
+ virsh undefine "{{ env.cluster.nodes.bootstrap.vm_name }}" --remove-all-storage || true
+ register: delete_bootstrap
+ changed_when: "('destroyed' in delete_bootstrap.stdout) or ('undefined' in delete_bootstrap.stdout)"
+
+- name: Delete control, compute and infra nodes, if exists
+ tags: delete_nodes
+ ansible.builtin.shell: |
+ set -o pipefail
+ virsh destroy {{ item }} || true
+ virsh undefine {{ item }} --remove-all-storage || true
+ loop: "{{ env.cluster.nodes.control.vm_name + env.cluster.nodes.compute.vm_name \
+ if env.cluster.nodes.infra.vm_name is not defined \
+ else env.cluster.nodes.control.vm_name + env.cluster.nodes.compute.vm_name + env.cluster.nodes.infra.vm_name }}"
+ register: delete_nodes
+ changed_when: "('destroyed' in delete_nodes.stdout) or ('undefined' in delete_nodes.stdout)"
+
+- name: Get and print virsh list
+ tags: delete_nodes
+ block:
+ - name: Get virsh list
+ ansible.builtin.command: virsh list
+ register: cmd_virsh_list
+
+ - name: Print virsh list
+ ansible.builtin.debug:
+ var: cmd_virsh_list.stdout_lines
diff --git a/roles/delete_resources_bastion_hypershift/tasks/main.yaml b/roles/delete_resources_bastion_hypershift/tasks/main.yaml
new file mode 100644
index 00000000..b9ff8e47
--- /dev/null
+++ b/roles/delete_resources_bastion_hypershift/tasks/main.yaml
@@ -0,0 +1,136 @@
+---
+
+- name: Login to Management Cluster
+ command: oc login {{ api_server }} -u {{ user_name }} -p {{ password }} --insecure-skip-tls-verify=true
+
+- name: Scale in Nodepool
+ command: oc -n {{ hypershift.hcp.clusters_namespace }} scale nodepool {{ hypershift.hcp.hosted_cluster_name }} --replicas 0
+
+- name: Wait for Worker Nodes to Detach
+ k8s_info:
+ api_version: v1
+ kind: Node
+ kubeconfig: "/root/ansible_workdir/{{ hypershift.hcp.hosted_cluster_name }}-kubeconfig"
+ register: nodes
+ until: nodes.resources | length == 0
+ retries: 30
+ delay: 10
+
+- name: Wait for Agentmachines to delete
+ k8s_info:
+ api_version: capi-provider.agent-install.openshift.io/v1alpha1
+ kind: AgentMachine
+ register: agent_machines
+ until: agent_machines.resources | length == 0
+ retries: 30
+ delay: 10
+
+- name: Wait for Machines to delete
+ k8s_info:
+ api_version: cluster.x-k8s.io/v1beta1
+ kind: Machine
+ register: machines
+ until: machines.resources | length == 0
+ retries: 30
+ delay: 10
+
+- name: Get agent names
+ command: oc get agents -n {{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }} --no-headers
+ register: agents_info
+
+- name: Create List for agents
+ set_fact:
+ agents: []
+
+- name: Get a List of agents
+ set_fact:
+ agents: "{{ agents + [agents_info.stdout.split('\n')[item].split(' ')[0]] }}"
+ loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}"
+
+- name: Delete Agents
+ command: oc delete agent {{ agents[item] }} -n {{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }}
+ loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}"
+
+- name: Remove workdir
+ file:
+ path: /root/ansible_workdir
+ state: absent
+
+- name: Delete InfraEnv resource
+ k8s:
+ state: absent
+ api_version: agent-install.openshift.io/v1beta1
+ kind: InfraEnv
+ name: "{{ hypershift.hcp.hosted_cluster_name }}"
+ namespace: "{{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }}"
+
+- name: Destroy Hosted Control Plane
+ command: hypershift destroy cluster agent --name {{ hypershift.hcp.hosted_cluster_name }} --namespace {{ hypershift.hcp.clusters_namespace }}
+
+- name: Delete Clusters Namespace
+ k8s:
+ api_version: v1
+ kind: Namespace
+ name: "{{ hypershift.hcp.clusters_namespace }}"
+ state: absent
+
+- name: Delete AgentServiceConfig
+ k8s:
+ api_version: agent-install.openshift.io/v1beta1
+ kind: AgentServiceConfig
+ name: agent
+ state: absent
+ when: hypershift.mce.delete == true
+
+- name: Delete Provisioning
+ k8s:
+ name: provisioning-configuration
+ api_version: metal3.io/v1alpha1
+ kind: Provisioning
+ state: absent
+ when: hypershift.mce.delete == true
+
+- name: Delete ClusterImageSet
+ k8s:
+ name: "img{{ hypershift.hcp.hosted_cluster_name }}-appsub"
+ api_version: hive.openshift.io/v1
+ kind: ClusterImageSet
+ state: absent
+ when: hypershift.mce.delete == true
+
+- name: Delete MCE Instance
+ k8s:
+ name: "{{ hypershift.mce.instance_name }}"
+ namespace: "{{ hypershift.asc.mce_namespace }}"
+ api_version: multicluster.openshift.io/v1
+ kind: MultiClusterEngine
+ state: absent
+ wait: yes
+ wait_timeout: 400
+ when: hypershift.mce.delete == true
+
+- name: Delete MCE Subscription
+ k8s:
+ name: multicluster-engine
+ namespace: "{{ hypershift.asc.mce_namespace }}"
+ api_version: operators.coreos.com/v1alpha1
+ kind: Subscription
+ state: absent
+ when: hypershift.mce.delete == true
+
+- name: Delete Operator Group - MCE
+ k8s:
+ name: multicluster-engine
+ namespace: "{{ hypershift.asc.mce_namespace }}"
+ api_version: operators.coreos.com/v1
+ kind: OperatorGroup
+ state: absent
+ when: hypershift.mce.delete == true
+
+- name: Delete MCE Namespace
+ k8s:
+ api_version: v1
+ kind: Namespace
+ name: "{{ hypershift.asc.mce_namespace }}"
+ state: absent
+ when: hypershift.mce.delete == true
diff --git a/roles/delete_resources_kvm_host_hypershift/tasks/main.yaml b/roles/delete_resources_kvm_host_hypershift/tasks/main.yaml
new file mode 100644
index 00000000..abe809dd
--- /dev/null
+++ b/roles/delete_resources_kvm_host_hypershift/tasks/main.yaml
@@ -0,0 +1,25 @@
+---
+
+- name: Destroy Agent VMs
+ command: virsh destroy {{ hypershift.hcp.hosted_cluster_name }}-agent-{{ item }}
+ loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}"
+
+- name: Undefine Agents
+ command: virsh undefine {{ hypershift.hcp.hosted_cluster_name }}-agent-{{ item }} --remove-all-storage
+ loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}"
+
+- name: Delete initrd.img
+ file:
+ path: /var/lib/libvirt/images/pxeboot/initrd.img
+ state: absent
+
+- name: Delete kernel.img
+ file:
+ path: /var/lib/libvirt/images/pxeboot/kernel.img
+ state: absent
+
+- name: Destroy bastion
+ command: virsh destroy {{ hypershift.hcp.hosted_cluster_name }}-bastion
+
+- name: Undefine bastion
+ command: virsh undefine {{ hypershift.hcp.hosted_cluster_name }}-bastion --remove-all-storage
diff --git a/roles/dns/files/90-dns-none.conf b/roles/dns/files/90-dns-none.conf
new file mode 100644
index 00000000..d435aba9
--- /dev/null
+++ b/roles/dns/files/90-dns-none.conf
@@ -0,0 +1,2 @@
+[main]
+dns=none
diff --git a/roles/dns/tasks/initial-resolv.yaml b/roles/dns/tasks/initial-resolv.yaml
new file mode 100644
index 00000000..bb5ab1b5
--- /dev/null
+++ b/roles/dns/tasks/initial-resolv.yaml
@@ -0,0 +1,24 @@
+- name: Template out bastion's resolv.conf file for initial installation.
+ tags: resolv
+ ansible.builtin.template:
+ src: initial-resolv.conf.j2
+ dest: /etc/resolv.conf
+ owner: root
+ group: root
+ mode: "644"
+
+# NetworkManager modifies our /etc/resolv.conf file on next restart or reboot, we need to disable it
+- name: Disable management of /etc/resolv.conf by NetworkManager
+ tags: resolv
+ ansible.builtin.copy:
+ src: 90-dns-none.conf
+ dest: /etc//NetworkManager/conf.d/90-dns-none.conf
+ group: root
+ owner: root
+ mode: "644"
+
+- name: Restart network to update changes made to /etc/resolv.conf
+ tags: resolv
+ ansible.builtin.service:
+ name: network
+ state: restarted
\ No newline at end of file
diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml
new file mode 100644
index 00000000..bb48cf62
--- /dev/null
+++ b/roles/dns/tasks/main.yaml
@@ -0,0 +1,138 @@
+---
+
+- name: Enable named
+ tags: dns
+ ansible.builtin.systemd:
+ name: named
+ enabled: yes
+
+- name: Start named
+ tags: dns
+ ansible.builtin.systemd:
+ name: named
+ state: started
+
+- name: Split IP addresses for use in templates
+ tags: dns
+ ansible.builtin.set_fact:
+ bastion_split_ip: "{{ env.bastion.networking.ip.split('.') }}"
+ bootstrap_split_ip: "{{ env.cluster.nodes.bootstrap.ip.split('.') }}"
+
+- name: Template named.conf file to bastion
+ tags: dns
+ ansible.builtin.template:
+ src: dns-named.conf.j2
+ dest: /etc/named.conf
+ owner: root
+ group: root
+ mode: "0644"
+ backup: yes
+
+- name: Template DNS forwarding file to bastion
+ tags: dns
+ ansible.builtin.template:
+ src: dns.db.j2
+ dest: /var/named/{{ env.cluster.networking.metadata_name }}.db
+ owner: named
+ group: named
+ mode: "0644"
+ backup: yes
+
+- name: Add control nodes to DNS forwarding file on bastion
+ tags: dns
+ ansible.builtin.lineinfile:
+ path: /var/named/{{ env.cluster.networking.metadata_name }}.db
+ insertafter: "entries for the control nodes"
+ line: "{{ env.cluster.nodes.control.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. IN A {{ env.cluster.nodes.control.ip[i] }}"
+ with_sequence: start=0 end={{ (env.cluster.nodes.control.hostname | length) - 1 }} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+
+- name: Add compute nodes to DNS forwarding file on bastion
+ tags: dns
+ ansible.builtin.lineinfile:
+ path: /var/named/{{ env.cluster.networking.metadata_name }}.db
+ insertafter: "entries for the compute nodes"
+ line: "{{ env.cluster.nodes.compute.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. IN A {{ env.cluster.nodes.compute.ip[i] }}"
+ with_sequence: start=0 end={{ (env.cluster.nodes.compute.hostname | length) - 1 }} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+
+- name: Add infrastructure nodes to DNS forwarding file on bastion if requested
+ tags: dns
+ ansible.builtin.lineinfile:
+ path: /var/named/{{ env.cluster.networking.metadata_name }}.db
+ insertafter: "entries for extra RHEL VMs"
+ line: "{{ env.cluster.nodes.infra.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. IN A {{ env.cluster.nodes.infra.ip[i] }}"
+ with_sequence: start=0 end={{ (env.cluster.nodes.infra.hostname | length) - 1 }} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+ when: env.cluster.nodes.infra.hostname is defined
+
+- name: Template DNS reverse lookup file to bastion
+ tags: dns
+ ansible.builtin.template:
+ src: dns.rev.j2
+ dest: /var/named/{{ env.cluster.networking.metadata_name }}.rev
+ owner: named
+ group: named
+ mode: "0644"
+ backup: yes
+
+- name: Add control nodes to DNS reverse lookup file on bastion
+ tags: dns
+ ansible.builtin.lineinfile:
+ path: /var/named/{{ env.cluster.networking.metadata_name }}.rev
+ insertafter: "PTR Record IP address to Hostname"
+ line: "{{ env.cluster.nodes.control.ip[i].split('.').3 }} IN PTR {{ env.cluster.nodes.control.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}."
+ with_sequence: start=0 end={{ (env.cluster.nodes.control.hostname | length) - 1 }} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+
+- name: Add compute nodes to DNS reverse lookup file on bastion
+ tags: dns
+ ansible.builtin.lineinfile:
+ path: /var/named/{{ env.cluster.networking.metadata_name }}.rev
+ insertafter: "PTR Record IP address to Hostname"
+ line: "{{ env.cluster.nodes.compute.ip[i].split('.').3 }} IN PTR {{ env.cluster.nodes.compute.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}."
+ with_sequence: start=0 end={{ (env.cluster.nodes.compute.hostname | length) - 1 }} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+
+- name: Add infrastructure nodes to DNS reverse lookup file on bastion
+ tags: dns
+ ansible.builtin.lineinfile:
+ path: /var/named/{{ env.cluster.networking.metadata_name }}.rev
+ insertafter: "PTR Record IP address to Hostname"
+ line: "{{ env.cluster.nodes.infra.ip[i].split('.').3 }} IN PTR {{ env.cluster.nodes.infra.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}."
+ with_sequence: start=0 end={{ (env.cluster.nodes.infra.hostname | length) - 1 }} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+ when: env.cluster.nodes.infra.hostname is defined
+
+- name: Restart named to update changes made to DNS
+ tags: dns, resolv
+ ansible.builtin.systemd:
+ name: named
+ state: restarted
+
+- name: Template out bastion's resolv.conf file, replacing initial resolv.conf
+ tags: dns, resolv
+ ansible.builtin.template:
+ src: resolv.conf.j2
+ dest: /etc/resolv.conf
+ owner: root
+ group: root
+ mode: "644"
+
+- name: Restart network to update changes made to /etc/resolv.conf
+ tags: dns, resolv
+ ansible.builtin.service:
+ name: network
+ state: restarted
diff --git a/roles/dns/templates/dns-named.conf.j2 b/roles/dns/templates/dns-named.conf.j2
new file mode 100644
index 00000000..da793c75
--- /dev/null
+++ b/roles/dns/templates/dns-named.conf.j2
@@ -0,0 +1,77 @@
+//
+// named.conf
+//
+// Provided by Red Hat bind package to configure the ISC BIND named(8) DNS
+// server as a caching only nameserver (as a localhost DNS resolver only).
+//
+// See /usr/share/doc/bind*/sample/ for example named configuration files.
+//
+
+options {
+// listen-on port 53 { 127.0.0.1; };
+ listen-on port 53 { any; };
+ listen-on-v6 port 53 { ::1; };
+ directory "/var/named";
+ dump-file "/var/named/data/cache_dump.db";
+ statistics-file "/var/named/data/named_stats.txt";
+ memstatistics-file "/var/named/data/named_mem_stats.txt";
+ secroots-file "/var/named/data/named.secroots";
+ recursing-file "/var/named/data/named.recursing";
+ allow-query { any; };
+ forwarders { {{ env.cluster.networking.forwarder }}; };
+
+ /*
+ - If you are building an AUTHORITATIVE DNS server, do NOT enable recursion.
+ - If you are building a RECURSIVE (caching) DNS server, you need to enable
+ recursion.
+ - If your recursive DNS server has a public IP address, you MUST enable access
+ control to limit queries to your legitimate users. Failing to do so will
+ cause your server to become part of large scale DNS amplification
+ attacks. Implementing BCP38 within your network would greatly
+ reduce such attack surface
+ */
+ recursion yes;
+
+ dnssec-enable no;
+ dnssec-validation no;
+
+ managed-keys-directory "/var/named/dynamic";
+
+ pid-file "/run/named/named.pid";
+ session-keyfile "/run/named/session.key";
+
+ /* https://fedoraproject.org/wiki/Changes/CryptoPolicy */
+ include "/etc/crypto-policies/back-ends/bind.config";
+};
+
+logging {
+ channel default_debug {
+ file "data/named.run";
+ severity dynamic;
+ };
+};
+
+zone "." IN {
+ type hint;
+ file "named.ca";
+};
+
+include "/etc/named.rfc1912.zones";
+include "/etc/named.root.key";
+
+//forward zone
+zone "{{ env.cluster.networking.base_domain }}" IN {
+ type master;
+ file "/var/named/{{ env.cluster.networking.metadata_name }}.db";
+ allow-update { any; };
+ allow-query { any; };
+};
+
+//backward zone
+zone "{{ bastion_split_ip.2 }}.{{ bastion_split_ip.1 }}.{{ bastion_split_ip.0 }}.in-addr.arpa" IN {
+ type master;
+ file "/var/named/{{ env.cluster.networking.metadata_name }}.rev";
+ allow-update { any; };
+ allow-query { any; };
+};
+
diff --git a/roles/dns/templates/dns.db.j2 b/roles/dns/templates/dns.db.j2
new file mode 100644
index 00000000..29835e00
--- /dev/null
+++ b/roles/dns/templates/dns.db.j2
@@ -0,0 +1,31 @@
+$TTL 86400
+@ IN SOA {{ env.bastion.networking.hostname }}.{{ env.bastion.networking.base_domain }}. admin.{{ env.bastion.networking.base_domain }}.(
+ 2020021821 ;Serial
+ 3600 ;Refresh
+ 1800 ;Retry
+ 604800 ;Expire
+ 86400 ;Minimum TTL
+)
+
+;Name Server / Bastion Information
+@ IN NS {{ env.bastion.networking.hostname }}.{{ env.bastion.networking.base_domain }}.
+
+;IP Address for Name Server
+{{ env.bastion.networking.hostname }} IN A {{ env.bastion.networking.ip }}
+
+;entry for bootstrap host.
+{{ env.cluster.nodes.bootstrap.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. IN A {{ env.cluster.nodes.bootstrap.ip }}
+
+;entries for the control nodes
+
+;entries for the compute nodes
+
+;The api identifies the IP of your load balancer.
+api.{{ env.cluster.networking.metadata_name }} IN CNAME {{ env.bastion.networking.hostname }}.{{ env.cluster.networking.base_domain }}.
+api-int.{{ env.cluster.networking.metadata_name }} IN CNAME {{ env.bastion.networking.hostname }}.{{ env.cluster.networking.base_domain }}.
+
+;The wildcard also identifies the load balancer.
+apps.{{ env.cluster.networking.metadata_name }} IN CNAME {{ env.bastion.networking.hostname }}.{{ env.cluster.networking.base_domain }}.
+*.apps.{{ env.cluster.networking.metadata_name }} IN CNAME {{ env.bastion.networking.hostname }}.{{ env.cluster.networking.base_domain }}.
+
+;EOF
diff --git a/roles/dns/templates/dns.rev.j2 b/roles/dns/templates/dns.rev.j2
new file mode 100644
index 00000000..f0f1aa19
--- /dev/null
+++ b/roles/dns/templates/dns.rev.j2
@@ -0,0 +1,19 @@
+$TTL 86400
+@ IN SOA {{ env.bastion.networking.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. admin.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} (
+ 2020011800 ;Serial
+ 3600 ;Refresh
+ 1800 ;Retry
+ 604800 ;Expire
+ 86400 ;Minimum TTL
+)
+;Name Server Information
+@ IN NS {{ env.bastion.networking.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}.
+{{ env.bastion.networking.hostname }} IN A {{ env.bastion.networking.ip }}
+
+;Reverse lookup for Name Server
+{{ bastion_split_ip.3 }} IN PTR {{ env.bastion.networking.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}.
+
+;PTR Record IP address to Hostname
+{{ bootstrap_split_ip.3 }} IN PTR {{ env.cluster.nodes.bootstrap.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}.
+{{ bastion_split_ip.3 }} IN PTR api-int.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}.
+{{ bastion_split_ip.3 }} IN PTR api.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}.
diff --git a/roles/dns/templates/initial-resolv.conf.j2 b/roles/dns/templates/initial-resolv.conf.j2
new file mode 100644
index 00000000..5ef84020
--- /dev/null
+++ b/roles/dns/templates/initial-resolv.conf.j2
@@ -0,0 +1,2 @@
+search {{ env.bastion.networking.base_domain }}
+nameserver {{ env.bastion.networking.forwarder }}
\ No newline at end of file
diff --git a/roles/dns/templates/resolv.conf.j2 b/roles/dns/templates/resolv.conf.j2
new file mode 100644
index 00000000..e640d670
--- /dev/null
+++ b/roles/dns/templates/resolv.conf.j2
@@ -0,0 +1,3 @@
+search {{ env.cluster.networking.base_domain }}
+nameserver {{ env.bastion.networking.nameserver1 }}
+{{ ('nameserver ' + env.bastion.networking.nameserver2) if env.bastion.networking.nameserver2 is defined else '' }}
diff --git a/roles/dns_update/tasks/add.yaml b/roles/dns_update/tasks/add.yaml
new file mode 100644
index 00000000..1b2b1048
--- /dev/null
+++ b/roles/dns_update/tasks/add.yaml
@@ -0,0 +1,17 @@
+---
+- name: Add forward and reverse DNS entry
+ become: true
+ block:
+ - name: Add forward DNS
+ ansible.builtin.lineinfile:
+ path: /var/named/{{ env.cluster.networking.metadata_name }}.db
+ insertafter: ";entries for the compute nodes"
+ line: "{{ param_dns_hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. IN A {{ param_dns_ip }}"
+ state: present
+
+ - name: Add reverse DNS
+ ansible.builtin.lineinfile:
+ path: /var/named/{{ env.cluster.networking.metadata_name }}.rev
+ insertafter: "PTR Record IP address to Hostname"
+ line: "{{ param_dns_ip.split('.').3 }} IN PTR {{ param_dns_hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}."
+ state: present
diff --git a/roles/dns_update/tasks/delete.yaml b/roles/dns_update/tasks/delete.yaml
new file mode 100644
index 00000000..46713725
--- /dev/null
+++ b/roles/dns_update/tasks/delete.yaml
@@ -0,0 +1,28 @@
+---
+
+- name: Delete forward and reverse DNS entry
+ become: true
+ block:
+ - name: Delete forward DNS using hostname
+ ansible.builtin.lineinfile:
+ path: /var/named/{{ env.cluster.networking.metadata_name }}.db
+ state: absent
+ regexp: "^{{ param_dns_hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}"
+
+ - name: Delete forward DNS using ip
+ ansible.builtin.lineinfile:
+ path: /var/named/{{ env.cluster.networking.metadata_name }}.db
+ state: absent
+ regex: "A {{ param_dns_ip }}"
+
+ - name: Delete reverse DNS using hostname
+ ansible.builtin.lineinfile:
+ path: /var/named/{{ env.cluster.networking.metadata_name }}.rev
+ state: absent
+ regexp: "{{ param_dns_hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}."
+
+ - name: Delete reverse DNS using IP
+ ansible.builtin.lineinfile:
+ path: /var/named/{{ env.cluster.networking.metadata_name }}.rev
+ state: absent
+ regexp: "^{{ param_dns_ip.split('.').3 }} "
diff --git a/roles/dns_update/tasks/main.yaml b/roles/dns_update/tasks/main.yaml
new file mode 100644
index 00000000..a7dab68e
--- /dev/null
+++ b/roles/dns_update/tasks/main.yaml
@@ -0,0 +1,10 @@
+---
+
+- name: Load DNS update task
+ ansible.builtin.include_tasks: "{{ param_dns_cmd }}.yaml"
+
+- name: Restart 'named' service to update changes
+ become: true
+ ansible.builtin.systemd:
+ name: named
+ state: restarted
diff --git a/roles/dns_update/vars/main.yaml b/roles/dns_update/vars/main.yaml
new file mode 100644
index 00000000..7165d434
--- /dev/null
+++ b/roles/dns_update/vars/main.yaml
@@ -0,0 +1,4 @@
+---
+param_dns_cmd: add
+param_dns_ip:
+param_dns_hostname:
diff --git a/roles/download_rootfs_hypershift/tasks/main.yaml b/roles/download_rootfs_hypershift/tasks/main.yaml
new file mode 100644
index 00000000..858bd664
--- /dev/null
+++ b/roles/download_rootfs_hypershift/tasks/main.yaml
@@ -0,0 +1,64 @@
+---
+- name: install apache httpd
+ package:
+ name: httpd
+ state: latest
+
+- name: bind httpd to port 8080
+ lineinfile:
+ dest: /etc/httpd/conf/httpd.conf
+ regexp: ^Listen 80
+ line: Listen 8080
+
+- name: bind https to port 8443
+ lineinfile:
+ dest: /etc/httpd/conf.d/ssl.conf
+ regexp: ^Listen 443 https
+ line: Listen 8443 https
+
+- name: allow traffic at 8080 for apache
+ tags: firewall
+ firewalld:
+ port: 8080/tcp
+ zone: "{{ item }}"
+ state: enabled
+ permanent: true
+ with_items:
+ - internal
+ - public
+
+- name: allow traffic at 8443 for apache
+ tags: firewall
+ firewalld:
+ port: 8443/tcp
+ zone: "{{ item }}"
+ state: enabled
+ permanent: true
+ with_items:
+ - internal
+ - public
+
+- name: Download ipxe script
+ shell: curl -k -L $(oc -n {{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }} get InfraEnv {{ hypershift.hcp.hosted_cluster_name }} -ojsonpath="{.status.bootArtifacts.ipxeScript}")
+ register: ipxe_script
+
+- name: Get URL for rootfs
+ set_fact:
+ rootfs_url: "{{ ipxe_script.stdout_lines[2].split(' ')[3].split('url=')[1] }}"
+
+- name: Download rootfs.img
+ get_url:
+ url: "{{ rootfs_url }}"
+ dest: /var/www/html/rootfs.img
+ validate_certs: false
+
+- name: restart httpd
+ service:
+ name: httpd.service
+ state: started
+ enabled: yes
+
+- name: Restart firewalld.service
+ service:
+ name: firewalld.service
+ state: restarted
diff --git a/roles/get_ocp/tasks/main.yaml b/roles/get_ocp/tasks/main.yaml
new file mode 100644
index 00000000..3c8f2ad4
--- /dev/null
+++ b/roles/get_ocp/tasks/main.yaml
@@ -0,0 +1,207 @@
+---
+- name: Delete ignition folder for idempotency
+ tags: get_ocp
+ file:
+ path: /var/www/html/ignition
+ state: absent
+
+- name: Create directory bin for mirrors
+ tags: get_ocp
+ become: true
+ file:
+ path: /var/www/html/bin
+ state: directory
+ mode: "0755"
+ owner: "root"
+ group: "root"
+
+- name: Delete OCP download directory for idempotency, because ignition files deprecate after 24 hours.
+ tags: get_ocp
+ become: true
+ file:
+ path: /root/ocpinst
+ state: absent
+
+- name: Create OCP download directory
+ tags: get_ocp
+ file:
+ path: /root/ocpinst
+ state: directory
+
+- name: Get Red Hat CoreOS rootfs file if it's not there already.
+ tags: get_ocp
+ get_url:
+ url: "{{ rhcos_download_url }}{{ rhcos_live_rootfs }}"
+ dest: "/var/www/html/bin/{{ rhcos_live_rootfs }}"
+ mode: "0644"
+
+- name: Unzip OCP client and installer
+ tags: get_ocp
+ ansible.builtin.unarchive:
+ src: "{{ item }}"
+ dest: /root/ocpinst/
+ remote_src: yes
+ loop:
+ - "{{ ocp_download_url }}{{ ocp_client_tgz }}"
+ - "{{ ocp_download_url }}{{ ocp_install_tgz }}"
+
+- name: Copy kubectl, oc, and openshift-install binaries to /usr/local/sbin
+ tags: get_ocp
+ become: true
+ ansible.builtin.copy:
+ src: /root/ocpinst/{{ item }}
+ dest: /usr/sbin/{{ item }}
+ owner: root
+ group: root
+ mode: "755"
+ remote_src: yes
+ loop:
+ - kubectl
+ - oc
+ - openshift-install
+
+- name: Use template file to create install-config and backup.
+ tags: get_ocp
+ vars:
+ use_proxy: "{{ 'True' if (proxy_env.http_proxy is defined or proxy_env.https_proxy is defined or proxy_env.no_proxy is defined) else 'False' }}"
+ template:
+ src: install-config.yaml.j2
+ dest: "{{ item }}"
+ force: yes
+ loop:
+ - /root/ocpinst/install-config.yaml
+ - /root/ocpinst/install-config-backup.yaml
+
+- name: Capture OCP public key
+ tags: get_ocp
+ command: cat /root/.ssh/id_rsa.pub
+ register: ocp_pub_key
+
+- name: Place SSH key in install-config
+ tags: get_ocp
+ lineinfile:
+ line: "sshKey: '{{ ocp_pub_key.stdout }}'"
+ path: "{{ item }}"
+ loop:
+ - /root/ocpinst/install-config.yaml
+ - /root/ocpinst/install-config-backup.yaml
+
+- name: Create manifests
+ tags: get_ocp
+ command: /root/ocpinst/openshift-install create manifests --dir=/root/ocpinst/
+ become: true
+
+- name: Set masters schedulable parameter to false
+ tags: get_ocp
+ become: true
+ replace:
+ path: /root/ocpinst/manifests/cluster-scheduler-02-config.yml
+ regexp: ": true"
+ replace: ": false"
+
+- name: Set permissions for ocpinst directory contents to root
+ tags: get_ocp
+ become: true
+ command: chmod 0755 /root/ocpinst/{{item}}
+ loop:
+ - manifests
+ - openshift
+ - .openshift_install.log
+ - .openshift_install_state.json
+
+- name: Set ownership of ocpinst directory contents to root
+ tags: get_ocp
+ become: true
+ command: chown root:root /root/ocpinst/{{item}}
+ loop:
+ - manifests
+ - openshift
+ - .openshift_install.log
+ - .openshift_install_state.json
+
+- name: Create ignition files
+ tags: get_ocp
+ become: true
+ command: /root/ocpinst/openshift-install create ignition-configs --dir=/root/ocpinst/
+
+- name: Set ownership to root and permissions of ignitions and related files.
+ tags: get_ocp
+ file:
+ state: "{{ item.state }}"
+ path: /root/ocpinst/{{ item.path }}
+ owner: root
+ group: root
+ mode: "{{ item.mode }}"
+ loop:
+ - { state: file, path: bootstrap.ign, mode: "755" }
+ - { state: file, path: master.ign, mode: "755" }
+ - { state: file, path: worker.ign, mode: "755" }
+ - { state: directory, path: auth, mode: "755" }
+ - { state: file, path: metadata.json, mode: "755" }
+ - { state: file, path: auth/kubeconfig, mode: "644" }
+ - { state: file, path: auth/kubeadmin-password, mode: "644" }
+
+- name: Create directory in admin user's home for default kubeconfig.
+ tags: get_ocp, config
+ become: false
+ file:
+ state: directory
+ path: ~/.kube
+
+- name: Create directory in root's home for default kubeconfig.
+ tags: get_ocp, config
+ become: true
+ file:
+ state: directory
+ path: ~/.kube
+
+- name: Make kubeconfig admin user's default (for non-root user).
+ tags: get_ocp, config
+ copy:
+ src: /root/ocpinst/auth/kubeconfig
+ dest: /home/{{ env.bastion.access.user }}/.kube/config
+ owner: "{{ env.bastion.access.user }}"
+ group: "{{ env.bastion.access.user }}"
+ remote_src: yes
+ when: env.bastion.access.user != "root"
+
+- name: Make kubeconfig admin user's default (for root user).
+ tags: get_ocp, config
+ copy:
+ src: /root/ocpinst/auth/kubeconfig
+ dest: /{{ env.bastion.access.user }}/.kube/config
+ owner: "{{ env.bastion.access.user }}"
+ group: "{{ env.bastion.access.user }}"
+ remote_src: yes
+ when: env.bastion.access.user == "root"
+
+- name: Make kubeconfig root user's default.
+ tags: get_ocp, config
+ copy:
+ src: /root/ocpinst/auth/kubeconfig
+ dest: /root/.kube/config
+ owner: root
+ group: root
+ remote_src: yes
+
+- name: Create ignition directory in HTTP-accessible directory.
+ tags: get_ocp
+ become: true
+ file:
+ path: /var/www/html/ignition
+ state: directory
+
+- name: Copy ignition files to HTTP-accessible directory.
+ tags: get_ocp
+ become: true
+ copy:
+ src: /root/ocpinst/{{ item }}.ign
+ dest: /var/www/html/ignition
+ remote_src: yes
+ mode: "775"
+ group: root
+ owner: root
+ loop:
+ - bootstrap
+ - master
+ - worker
diff --git a/roles/get_ocp/templates/install-config.yaml.j2 b/roles/get_ocp/templates/install-config.yaml.j2
new file mode 100644
index 00000000..f36de9c6
--- /dev/null
+++ b/roles/get_ocp/templates/install-config.yaml.j2
@@ -0,0 +1,42 @@
+apiVersion: {{ env.install_config.api_version }}
+baseDomain: {{ env.cluster.networking.base_domain }}
+{% if use_proxy == true %}
+{{ 'proxy: ' }}
+{{ ' httpProxy: ' + env.proxy.http }}
+{{ ' httpsProxy: ' + env.proxy.https }}
+{{ ' noProxy: ' + env.proxy.no + ',' + '127.0.0.1,' + 'localhost,' +
+env.bastion.networking.ip + ',' +
+env.cluster.nodes.bootstrap.ip + ',' +
+env.cluster.nodes.control.ip|join(',') + ',' +
+env.cluster.nodes.compute.ip|join(',') + ',' +
+env.cluster.nodes.infra.ip|join(',')+ ',' +
+'etcd-0.' + env.cluster.networking.metadata_name + '.' + env.cluster.networking.base_domain + ',' +
+('etcd-1.' + env.cluster.networking.metadata_name + '.' + env.cluster.networking.base_domain + ',' if env.cluster.nodes.control.ip[1] is defined else '' ) +
+('etcd-2.' + env.cluster.networking.metadata_name + '.' + env.cluster.networking.base_domain + ',' if env.cluster.nodes.control.ip[2] is defined else '' ) +
+'api-int.' + env.cluster.networking.metadata_name + '.' + env.cluster.networking.base_domain + ',' +
+'api.' + env.cluster.networking.metadata_name + '.' + env.cluster.networking.base_domain + ',' +
+'.' + env.cluster.networking.metadata_name + '.' + env.cluster.networking.base_domain }}
+{% endif %}
+compute:
+- hyperthreading: {{ env.install_config.compute.hyperthreading }}
+ name: worker
+ replicas: {{(env.cluster.nodes.compute.ip | length)}}
+ architecture: {{ env.install_config.compute.architecture }}
+controlPlane:
+ hyperthreading: {{ env.install_config.control.hyperthreading }}
+ name: master
+ replicas: {{(env.cluster.nodes.control.ip | length)}}
+ architecture: {{ env.install_config.control.architecture }}
+metadata:
+ name: {{ env.cluster.networking.metadata_name }}
+networking:
+ clusterNetwork:
+ - cidr: {{ env.install_config.cluster_network.cidr }}
+ hostPrefix: {{ env.install_config.cluster_network.host_prefix }}
+ networkType: {{ env.install_config.cluster_network.type }}
+ serviceNetwork:
+ - {{ env.install_config.service_network }}
+platform:
+ none: {}
+fips: {{ env.install_config.fips }}
+pullSecret: '{{ env.redhat.pull_secret }}'
diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml
new file mode 100644
index 00000000..972c79d0
--- /dev/null
+++ b/roles/haproxy/tasks/main.yaml
@@ -0,0 +1,97 @@
+---
+
+- name: Change permissive domain for haproxy
+ tags: selinux,haproxy
+ selinux_permissive:
+ name: haproxy_t
+ permissive: true
+
+- name: Use template to create haproxy config file
+ tags: haproxy
+ template:
+ src: haproxy.cfg.j2
+ dest: /etc/haproxy/haproxy.cfg
+ backup: yes
+ force: yes
+
+- name: Add control node information to 6443 section in haproxy config
+ tags: haproxy
+ lineinfile:
+ line: " server {{ env.cluster.nodes.control.hostname[i] }} {{ env.cluster.nodes.control.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}:6443 check inter 1s"
+ path: /etc/haproxy/haproxy.cfg
+ insertafter: "6443 section"
+ with_sequence: start=0 end={{(env.cluster.nodes.control.hostname | length) - 1}} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+
+- name: Add control node information to 22623 section in haproxy config
+ tags: haproxy
+ lineinfile:
+ line: " server {{ env.cluster.nodes.control.hostname[i] }} {{ env.cluster.nodes.control.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}:22623 check inter 1s"
+ path: /etc/haproxy/haproxy.cfg
+ insertafter: "22623 section"
+ with_sequence: start=0 end={{(env.cluster.nodes.control.hostname | length) - 1}} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+
+- name: Add compute node information to 443 section in haproxy config
+ tags: haproxy
+ lineinfile:
+ line: " server {{ env.cluster.nodes.compute.hostname[i] }} {{ env.cluster.nodes.compute.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}:443 check inter 1s"
+ path: /etc/haproxy/haproxy.cfg
+ insertafter: "443 section"
+ with_sequence: start=0 end={{(env.cluster.nodes.compute.hostname | length) - 1}} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+
+- name: Add infrastructure node information to 443 section in haproxy config
+ tags: haproxy
+ lineinfile:
+ line: " server {{ env.cluster.nodes.infra.hostname[i] }} {{ env.cluster.nodes.infra.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}:443 check inter 1s"
+ path: /etc/haproxy/haproxy.cfg
+ insertafter: "443 section"
+ with_sequence: start=0 end={{(env.cluster.nodes.infra.hostname | length) - 1}} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+ when: env.cluster.nodes.infra.hostname is defined
+
+- name: Add compute node information to 80 section in haproxy config
+ tags: haproxy
+ lineinfile:
+ line: " server {{ env.cluster.nodes.compute.hostname[i] }} {{ env.cluster.nodes.compute.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}:80 check inter 1s"
+ path: /etc/haproxy/haproxy.cfg
+ with_sequence: start=0 end={{(env.cluster.nodes.compute.hostname | length) - 1}} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+
+- name: Add infrastructure node information to 80 section in haproxy config
+ tags: haproxy
+ lineinfile:
+ line: " server {{ env.cluster.nodes.infra.hostname[i] }} {{ env.cluster.nodes.infra.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}:80 check inter 1s"
+ path: /etc/haproxy/haproxy.cfg
+ with_sequence: start=0 end={{(env.cluster.nodes.infra.hostname | length) - 1}} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+ when: env.cluster.nodes.infra.hostname is defined
+
+- name: Set haproxy boolean to enable connections
+ tags: haproxy
+ command: setsebool -P haproxy_connect_any 1
+
+- name: Enable haproxy
+ tags: haproxy
+ ansible.builtin.systemd:
+ name: haproxy
+ enabled: yes
+
+- name: Restart haproxy
+ tags: haproxy
+ systemd:
+ state: restarted
+ name: haproxy
diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/haproxy/templates/haproxy.cfg.j2
new file mode 100644
index 00000000..a7b82fc0
--- /dev/null
+++ b/roles/haproxy/templates/haproxy.cfg.j2
@@ -0,0 +1,52 @@
+global
+ log 127.0.0.1 local2
+ pidfile /var/run/haproxy.pid
+ maxconn 4000
+ daemon
+defaults
+ mode http
+ log global
+ option dontlognull
+ option http-server-close
+ option redispatch
+ retries 3
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 1m
+ timeout server 1m
+ timeout http-keep-alive 10s
+ timeout check 10s
+ maxconn 3000
+frontend stats
+ bind *:1936
+ mode http
+ log global
+ maxconn 10
+ stats enable
+ stats hide-version
+ stats refresh 30s
+ stats show-node
+ stats show-desc Stats for {{env.cluster.networking.metadata_name}} cluster
+ stats auth admin:{{env.cluster.networking.metadata_name}}
+ stats uri /stats
+listen api-server-6443
+ bind *:6443
+ mode tcp
+ #6443 section
+ server {{ env.cluster.nodes.bootstrap.hostname }} {{ env.cluster.nodes.bootstrap.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}:6443 check inter 1s backup
+listen machine-config-server-22623
+ bind *:22623
+ mode tcp
+ #22623 section
+ server {{ env.cluster.nodes.bootstrap.hostname }} {{ env.cluster.nodes.bootstrap.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}:22623 check inter 1s backup
+listen ingress-router-443
+ bind *:443
+ mode tcp
+ balance source
+ #443 section
+listen ingress-router-80
+ bind *:80
+ mode tcp
+ balance source
+ #80 section
diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml
new file mode 100644
index 00000000..188bf31f
--- /dev/null
+++ b/roles/httpd/tasks/main.yaml
@@ -0,0 +1,28 @@
+---
+
+- name: Change permissive domain for httpd
+ tags: httpd
+ selinux_permissive:
+ name: httpd_t
+ permissive: true
+
+- name: Allow httpd to listen on tcp port 4443
+ tags: httpd
+ community.general.seport:
+ ports: 4443
+ proto: tcp
+ setype: http_port_t
+ state: present
+ reload: yes
+
+- name: enable httpd
+ tags: httpd
+ systemd:
+ name: httpd
+ enabled: yes
+
+- name: restart httpd
+ tags: httpd
+ service:
+ name: httpd
+ state: restarted
diff --git a/roles/install_mce_operator/tasks/main.yaml b/roles/install_mce_operator/tasks/main.yaml
new file mode 100644
index 00000000..9d227d0c
--- /dev/null
+++ b/roles/install_mce_operator/tasks/main.yaml
@@ -0,0 +1,80 @@
+---
+- name: Check if multicluster-engine Namespace exists
+ k8s_info:
+ api_version: v1
+ kind: Namespace
+ name: "{{ hypershift.asc.mce_namespace }}"
+ register: namespace_check
+ ignore_errors: yes
+
+- name: Create multicluster-engine Namespace
+ k8s:
+ api_version: v1
+ kind: Namespace
+ name: "{{ hypershift.asc.mce_namespace }}"
+ state: present
+ when: namespace_check.resources | length == 0
+
+- name: Create OperatorGroup.yaml
+ template:
+ src: OperatorGroup.yaml.j2
+ dest: /root/ansible_workdir/OperatorGroup.yaml
+
+- name: Deploy OperatorGroup
+ command: oc apply -f /root/ansible_workdir/OperatorGroup.yaml
+
+- name: Create Subscription.yaml
+ template:
+ src: Subscription.yaml.j2
+ dest: /root/ansible_workdir/Subscription.yaml
+
+- name: Deploy Subscription for MCE
+ command: oc apply -f /root/ansible_workdir/Subscription.yaml
+
+- name: Wait for MCE deployment to be created
+ shell: oc get all -n {{ hypershift.asc.mce_namespace }} | grep -i deployment | grep -i multicluster-engine | wc -l
+ register: mce_deploy
+ until: mce_deploy.stdout == '1'
+ retries: 20
+ delay: 5
+
+- name: Wait for MCE deployment to be available
+ shell: oc get deployment multicluster-engine-operator -n {{ hypershift.asc.mce_namespace }} -o=jsonpath='{.status.replicas}{" "}{.status.availableReplicas}'
+ register: mce_pod_status
+ until: mce_pod_status.stdout.split(' ')[0] == mce_pod_status.stdout.split(' ')[1]
+ retries: 20
+ delay: 5
+
+- name: Create MultiClusterEngine.yaml
+ template:
+ src: MultiClusterEngine.yaml.j2
+ dest: /root/ansible_workdir/MultiClusterEngine.yaml
+
+- name: Deploy MCE Instance
+ command: oc apply -f /root/ansible_workdir/MultiClusterEngine.yaml
+
+- name: Wait for MCE to be Available
+ shell: oc get mce --no-headers | awk '{print $2}'
+ register: mce_status
+ until: mce_status.stdout == "Available"
+ retries: 40
+ delay: 10
+
+- name: Enable hypershift-preview component in MCE
+ command: oc patch mce {{ hypershift.mce.instance_name }} -p '{"spec":{"overrides":{"components":[{"name":"hypershift-preview","enabled":true}]}}}' --type merge
+
+- name: Create ClusterImageSet.yaml
+ template:
+ src: ClusterImageSet.yaml.j2
+ dest: /root/ansible_workdir/ClusterImageSet.yaml
+
+- name: Deploy ClusterImageSet
+ command: oc apply -f /root/ansible_workdir/ClusterImageSet.yaml
+
+- name: Create Provisioning.yaml
+ template:
+ src: Provisioning.yaml.j2
+ dest: /root/ansible_workdir/Provisioning.yaml
+
+- name: Deploy Provisioning
+ command: oc apply -f /root/ansible_workdir/Provisioning.yaml
diff --git a/roles/install_mce_operator/templates/ClusterImageSet.yaml.j2 b/roles/install_mce_operator/templates/ClusterImageSet.yaml.j2
new file mode 100644
index 00000000..1157edcb
--- /dev/null
+++ b/roles/install_mce_operator/templates/ClusterImageSet.yaml.j2
@@ -0,0 +1,11 @@
+apiVersion: hive.openshift.io/v1
+kind: ClusterImageSet
+metadata:
+ name: img{{ hypershift.hcp.hosted_cluster_name }}-appsub
+spec:
+ {% set release_img = lookup('env', 'HCP_RELEASE_IMAGE') %}
+ {% if release_img is defined and release_img != '' %}
+ releaseImage: {{ release_img }}
+ {% else %}
+ releaseImage: quay.io/openshift-release-dev/ocp-release:{{ hypershift.hcp.ocp_release }}
+ {% endif %}
diff --git a/roles/install_mce_operator/templates/MultiClusterEngine.yaml.j2 b/roles/install_mce_operator/templates/MultiClusterEngine.yaml.j2
new file mode 100644
index 00000000..0e38b792
--- /dev/null
+++ b/roles/install_mce_operator/templates/MultiClusterEngine.yaml.j2
@@ -0,0 +1,6 @@
+apiVersion: multicluster.openshift.io/v1
+kind: MultiClusterEngine
+metadata:
+ name: {{ hypershift.mce.instance_name }}
+ namespace: "{{ hypershift.asc.mce_namespace }}"
+spec: {}
diff --git a/roles/install_mce_operator/templates/OperatorGroup.yaml.j2 b/roles/install_mce_operator/templates/OperatorGroup.yaml.j2
new file mode 100644
index 00000000..b4fae7d8
--- /dev/null
+++ b/roles/install_mce_operator/templates/OperatorGroup.yaml.j2
@@ -0,0 +1,8 @@
+apiVersion: operators.coreos.com/v1
+kind: OperatorGroup
+metadata:
+ name: multicluster-engine
+ namespace: "{{ hypershift.asc.mce_namespace }}"
+spec:
+ targetNamespaces:
+ - "{{ hypershift.asc.mce_namespace }}"
diff --git a/roles/install_mce_operator/templates/Provisioning.yaml.j2 b/roles/install_mce_operator/templates/Provisioning.yaml.j2
new file mode 100644
index 00000000..d23ddc41
--- /dev/null
+++ b/roles/install_mce_operator/templates/Provisioning.yaml.j2
@@ -0,0 +1,7 @@
+apiVersion: metal3.io/v1alpha1
+kind: Provisioning
+metadata:
+ name: provisioning-configuration
+spec:
+ provisioningNetwork: Disabled
+ watchAllNamespaces: true
diff --git a/roles/install_mce_operator/templates/Subscription.yaml.j2 b/roles/install_mce_operator/templates/Subscription.yaml.j2
new file mode 100644
index 00000000..e1e1250f
--- /dev/null
+++ b/roles/install_mce_operator/templates/Subscription.yaml.j2
@@ -0,0 +1,11 @@
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ name: multicluster-engine
+ namespace: "{{ hypershift.asc.mce_namespace }}"
+spec:
+ sourceNamespace: openshift-marketplace
+ source: redhat-operators
+ channel: stable-{{ hypershift.mce.version }}
+ installPlanApproval: Automatic
+ name: multicluster-engine
diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml
new file mode 100644
index 00000000..f6c9ad0d
--- /dev/null
+++ b/roles/install_packages/tasks/main.yaml
@@ -0,0 +1,24 @@
+---
+- name: Print the list of packages to be installed and updated.
+ tags: install_packages
+ debug:
+ var: packages
+
+- name: Installing required packages for Linux machines.
+ tags: install_packages
+ become: true
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: latest
+ update_cache: yes
+ loop: "{{ packages }}"
+ when: ansible_os_family != 'Darwin'
+
+- name: Installing required packages for Mac machines.
+ tags: install_packages
+ become: false
+ community.general.homebrew:
+ name: "{{ item }}"
+ state: latest
+ loop: "{{ packages }}"
+ when: ansible_os_family == 'Darwin'
diff --git a/roles/install_prereqs_bastion_hypershift/tasks/main.yaml b/roles/install_prereqs_bastion_hypershift/tasks/main.yaml
new file mode 100644
index 00000000..a2e67a6a
--- /dev/null
+++ b/roles/install_prereqs_bastion_hypershift/tasks/main.yaml
@@ -0,0 +1,128 @@
+---
+
+- name: Install ansible-kubernetes module
+ pip:
+ name:
+ - kubernetes
+ - openshift
+ extra_args: --ignore-installed PyYAML
+
+- name: Install Packages on bastion
+ package:
+ name: "{{ env.pkgs.bastion }}"
+ state: present
+
+# Creating one directory for Storing Files
+- name: Create Work Directory
+ file:
+ path: /root/ansible_workdir
+ state: directory
+
+- name: Copy pull secret to ansible_workdir
+ copy:
+ content: "{{ hypershift.hcp.pull_secret }}"
+ dest: /root/ansible_workdir/auth_file
+
+- name: create /etc/haproxy
+ file:
+ path: /etc/haproxy
+ state: directory
+
+- name: create /etc/haproxy/haproxy.cfg
+ template:
+ src: haproxy.cfg.j2
+ dest: /etc/haproxy/haproxy.cfg
+
+- name: Get the number of Management Cluster Worker Nodes
+ shell: oc get no -o wide --no-headers|grep -i worker| awk '{print $6}' | wc -l
+ register: mgmt_workers_count
+ changed_when: false
+
+- name: Get the IPs of Management Cluster Workers
+ shell: oc get no -o wide --no-headers|grep -i worker| awk '{print $6}'
+ register: mgmt_workers
+ changed_when: false
+
+- name: Add Management Cluster Worker IPs to Haproxy
+ lineinfile:
+ path: /etc/haproxy/haproxy.cfg
+ line: " server worker-{{item}} {{ mgmt_workers.stdout_lines[item]}}"
+ loop: "{{ range(mgmt_workers_count.stdout|int) | list }}"
+
+- name: allow http traffic
+ firewalld:
+ service: http
+ permanent: yes
+ zone: "{{ item }}"
+ state: enabled
+ with_items:
+ - internal
+ - public
+
+- name: allow https traffic
+ firewalld:
+ service: https
+ permanent: yes
+ zone: "{{ item }}"
+ state: enabled
+ with_items:
+ - internal
+ - public
+
+- name: allow traffic at port 443
+ firewalld:
+ port: 443/tcp
+ permanent: yes
+ zone: "{{ item }}"
+ state: enabled
+ with_items:
+ - internal
+ - public
+
+- name: allow traffic at port 80
+ firewalld:
+ port: 80/tcp
+ permanent: yes
+ zone: "{{ item }}"
+ state: enabled
+ with_items:
+ - internal
+ - public
+
+- name: allow traffic at port 6443
+ firewalld:
+ port: 6443/tcp
+ permanent: yes
+ zone: "{{ item }}"
+ state: enabled
+ with_items:
+ - internal
+ - public
+
+- name: allow traffic at ports 30000-33000
+ firewalld:
+ port: 30000-33000/tcp
+ permanent: yes
+ zone: "{{ item }}"
+ state: enabled
+ with_items:
+ - internal
+ - public
+
+- name: turn on haproxy_connect_any
+ ansible.posix.seboolean:
+ name: haproxy_connect_any
+ persistent: true
+ state: true
+
+- name: restart haproxy
+ service:
+ name: haproxy.service
+ state: restarted
+ enabled: true
+
+- name: Restart firewalld.service
+ service:
+ name: firewalld.service
+ state: restarted
+ enabled: true
diff --git a/roles/install_prereqs_bastion_hypershift/templates/haproxy.cfg.j2 b/roles/install_prereqs_bastion_hypershift/templates/haproxy.cfg.j2
new file mode 100644
index 00000000..0dff411b
--- /dev/null
+++ b/roles/install_prereqs_bastion_hypershift/templates/haproxy.cfg.j2
@@ -0,0 +1,39 @@
+global
+ log 127.0.0.1 local2
+ chroot /var/lib/haproxy
+ pidfile /var/run/haproxy.pid
+ maxconn 4000
+ user haproxy
+ group haproxy
+ daemon
+ stats socket /var/lib/haproxy/stats
+ ssl-default-bind-ciphers PROFILE=SYSTEM
+ ssl-default-server-ciphers PROFILE=SYSTEM
+
+defaults
+ mode http
+ log global
+ option httplog
+ option dontlognull
+ option http-server-close
+ option forwardfor except 127.0.0.0/8
+ option redispatch
+ retries 3
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 30m
+ timeout server 30m
+ timeout http-keep-alive 10s
+ timeout check 10s
+ maxconn 3000
+
+frontend {{ hypershift.hcp.hosted_cluster_name }}-api-server
+ mode tcp
+ option tcplog
+ bind {{hypershift.bastion_hypershift}}:30000-33000
+ default_backend {{hypershift.hcp.hosted_cluster_name}}-api-server
+
+backend {{ hypershift.hcp.hosted_cluster_name }}-api-server
+ mode tcp
+ balance source
diff --git a/roles/install_prerequisites_host_hypershift/tasks/main.yaml b/roles/install_prerequisites_host_hypershift/tasks/main.yaml
new file mode 100644
index 00000000..22c330ca
--- /dev/null
+++ b/roles/install_prerequisites_host_hypershift/tasks/main.yaml
@@ -0,0 +1,66 @@
+---
+- name: Check if SSH key exists
+ stat:
+ path: "~/.ssh/{{ env.ansible_key_name }}.pub"
+ register: ssh_key
+
+- name: Generate an OpenSSH keypair with the default values (4096 bits, RSA)
+ community.crypto.openssh_keypair:
+ path: "~/.ssh/{{ env.ansible_key_name }}"
+ passphrase: ""
+ comment: "Ansible-OpenShift-Provisioning SSH key"
+ regenerate: full_idempotence
+ register: ssh_key_creation
+ when: ssh_key.stat.exists == false
+
+- name: Create Work Directory
+ file:
+ path: /root/ansible_workdir
+ state: directory
+
+- name: Install Packages on kvm_host
+ yum:
+ name:
+ - "{{ item }}"
+ state: present
+ loop: "{{ env.pkgs.kvm }}"
+ when: host != 'bastion_hypershift'
+
+- name: Install Packages for Hypershift
+ package:
+ name:
+ - "{{ item }}"
+ state: present
+ loop: "{{ env.pkgs.hypershift }}"
+
+- name: Check if OC installed
+ command: oc
+ register: oc_installed
+ ignore_errors: yes
+
+- name: Download OC Client
+ get_url:
+ url: "{{ hypershift.oc_url }}"
+ dest: /root/ansible_workdir/
+ when: oc_installed.rc != 0
+
+- name: tar oc
+ command: tar -vxzf /root/ansible_workdir/{{ hypershift.oc_url.split('/')[-1] }}
+ when: oc_installed.rc != 0
+
+- name: Copy oc to /usr/local/bin/
+ shell: cp oc /usr/local/bin/oc
+ when: oc_installed.rc != 0
+
+- name: Copy oc to /usr/bin/
+ shell: cp oc /usr/bin/oc
+ when: oc_installed.rc != 0
+
+- name: Add Management Cluster Nameserver to /etc/resolv.conf
+ lineinfile:
+ dest: /etc/resolv.conf
+ insertbefore: BOF
+ line: nameserver {{ hypershift.mgmt_cluster_nameserver }}
+
+- name: Login to Management Cluster
+ command: oc login {{ api_server }} -u {{ user_name }} -p {{ password }} --insecure-skip-tls-verify=true
diff --git a/roles/kvm_host/tasks/main.yml b/roles/kvm_host/tasks/main.yml
deleted file mode 100644
index f35d18d5..00000000
--- a/roles/kvm_host/tasks/main.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-
-- hosts: kvm_host
- become: true
- tasks:
-
- - name: update repository index
- yum:
- update_cache: yes
-
- - name: Ensure pre-requisite packages are installed
- yum:
- name:
- - libvirt
- - libvirt-devel
- - libvirt-daemon-kvm
- - qemu-kvm
- - virt-manager
- - libvirt-daemon-config-network
- - libvirt-client
- - qemu-img
- state: latest
- update_cache: yes
-
- - name: Ensure libvirtd is started
- script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_libvirtd.sh
-
- - name: create macvtap xml file
- file:
- path: "~/files/macvtap.xml"
- state: touch
-
- - name: Fill contents of macvtap xml file
- copy:
- dest: "~/files/macvtap.xml"
- content: |
-
- macvtap-net
-
-
-
-
- - name: Set up macvtap bridge
- script: ~/files/shell_scripts/macvtap-net.sh
diff --git a/roles/macvtap/tasks/main.yaml b/roles/macvtap/tasks/main.yaml
new file mode 100644
index 00000000..98f1cac9
--- /dev/null
+++ b/roles/macvtap/tasks/main.yaml
@@ -0,0 +1,20 @@
+---
+
+- name: Set up macvtap bridge configuration xml from template to KVM host
+ tags: macvtap
+ community.libvirt.virt_net:
+ command: define
+ name: "{{ env.bridge_name }}"
+ xml: "{{ lookup ('template', 'macvtap.xml.j2') }}"
+
+- name: Start macvtap bridge
+ tags: macvtap
+ community.libvirt.virt_net:
+ command: start
+ name: "{{ env.bridge_name }}"
+
+- name: Set macvtap bridge to autostart
+ tags: macvtap
+ community.libvirt.virt_net:
+ autostart: yes
+ name: "{{ env.bridge_name }}"
diff --git a/roles/macvtap/templates/macvtap.xml.j2 b/roles/macvtap/templates/macvtap.xml.j2
new file mode 100644
index 00000000..a4857e24
--- /dev/null
+++ b/roles/macvtap/templates/macvtap.xml.j2
@@ -0,0 +1,6 @@
+
+ {{ env.bridge_name }}
+
+
+
+
diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml
new file mode 100644
index 00000000..a6baea35
--- /dev/null
+++ b/roles/prep_kvm_guests/tasks/main.yaml
@@ -0,0 +1,16 @@
+---
+
+# - name: Get Red Hat CoreOS kernel and initramfs from URL to
+# tags: prep_kvm_guests
+# vars:
+# major_minor_patch: "{{ env.openshift.version }}"
+# major_minor: "{{ ( env.openshift.version | string | split('.') )[:-1] | join('.') }}"
+# major: "{{ env.openshift.version | string | split('.') | first }}"
+# arch: "{{ env.install_config.control.architecture }}"
+# get_url:
+# url: "{{ item.url }}"
+# dest: "{{ item.dest }}"
+# mode: '0755'
+# loop:
+# - { url: "https://mirror.openshift.com/pub/openshift-v{{ major }}/{{ arch }}/dependencies/rhcos/{{ major_minor }}/{{ major_minor_patch }}/rhcos-live-kernel-{{ arch }}", dest: "{{ storage.pool_path }}/rhcos-live-kernel-{{ major_minor_patch }}-{{ arch }}" }
+# - { url: "https://mirror.openshift.com/pub/openshift-v{{ major }}/{{ arch }}/dependencies/rhcos/{{ major_minor }}/{{ major_minor_patch }}/rhcos-live-initramfs.{{ arch }}.img", dest: "{{ storage.pool_path }}/rhcos-live-initramfs-{{ major_minor_patch }}-{{ arch }}.img" }
diff --git a/roles/print_node_status/tasks/main.yaml b/roles/print_node_status/tasks/main.yaml
new file mode 100644
index 00000000..0779649a
--- /dev/null
+++ b/roles/print_node_status/tasks/main.yaml
@@ -0,0 +1,9 @@
+---
+# Paranoia check. This task should be executed only from one host
+- name: Check parameters
+ ansible.builtin.fail:
+ msg: "Too many hosts defined in ansible play: {{ ansible_play_hosts }}"
+ when: ((ansible_play_hosts | length) > 1)
+
+- name: Get and print nodes status
+ ansible.builtin.include_tasks: "{{ role_path }}/../common/tasks/print_ocp_node_status.yaml"
diff --git a/roles/robertdebock.epel/.ansible-lint b/roles/robertdebock.epel/.ansible-lint
new file mode 100644
index 00000000..cbd9e6cf
--- /dev/null
+++ b/roles/robertdebock.epel/.ansible-lint
@@ -0,0 +1,12 @@
+---
+#
+# Ansible managed
+#
+exclude_paths:
+ - ./meta/preferences.yml
+ - ./molecule/default/prepare.yml
+ - ./molecule/default/converge.yml
+ - ./molecule/default/verify.yml
+ - ./molecule/default/collections.yml
+ - ./.tox
+ - ./.cache
diff --git a/roles/robertdebock.epel/.github/FUNDING.yml b/roles/robertdebock.epel/.github/FUNDING.yml
new file mode 100644
index 00000000..67320f05
--- /dev/null
+++ b/roles/robertdebock.epel/.github/FUNDING.yml
@@ -0,0 +1,2 @@
+---
+github: robertdebock
diff --git a/roles/robertdebock.epel/.github/ISSUE_TEMPLATE/bug_report.md b/roles/robertdebock.epel/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 00000000..4bb9d98c
--- /dev/null
+++ b/roles/robertdebock.epel/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,32 @@
+---
+name: Bug report
+about: Create a report to help me improve
+
+---
+
+## Describe the bug
+
+A clear and concise description of what the bug is.
+
+## Playbook
+
+Please paste the playbook you are using. (Consider `requirements.yml` and
+optionally the command you've invoked.)
+
+
+```yaml
+---
+YOUR PLAYBOOK HERE
+```
+
+## Output
+
+Show at least the error, possible related output, maybe just all the output.
+
+## Environment
+
+- Control node OS: [e.g. Debian 9] (`cat /etc/os-release`)
+- Control node Ansible version: [e.g. 2.9.1] (`ansible --version`)
+- Managed node OS: [e.g. CentOS 7] (`cat /etc/os-release`)
+
+Please consider [sponsoring me](https://github.com/sponsors/robertdebock).
diff --git a/roles/robertdebock.epel/.github/ISSUE_TEMPLATE/feature_request.md b/roles/robertdebock.epel/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 00000000..55a93c40
--- /dev/null
+++ b/roles/robertdebock.epel/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,19 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+
+---
+
+## Proposed feature
+
+A clear and concise description of what you want to happen.
+
+## Rationale
+
+Why is this feature required?
+
+## Additional context
+
+Add any other context about the feature request here.
+
+Please consider [sponsoring me](https://github.com/sponsors/robertdebock).
diff --git a/roles/robertdebock.epel/.github/pull_request_template.md b/roles/robertdebock.epel/.github/pull_request_template.md
new file mode 100644
index 00000000..b1578c0c
--- /dev/null
+++ b/roles/robertdebock.epel/.github/pull_request_template.md
@@ -0,0 +1,11 @@
+---
+name: Pull request
+about: Describe the proposed change
+
+---
+
+**Describe the change**
+A clear and concise description of what the pull request is.
+
+**Testing**
+In case a feature was added, how were tests performed?
diff --git a/roles/robertdebock.epel/.github/settings.yml b/roles/robertdebock.epel/.github/settings.yml
new file mode 100644
index 00000000..aa2a8a36
--- /dev/null
+++ b/roles/robertdebock.epel/.github/settings.yml
@@ -0,0 +1,8 @@
+---
+#
+# Ansible managed
+#
+repository:
+ description: Install epel on your system.
+ homepage: https://robertdebock.nl/
+ topics: epel, repository, installer, packages, ansible, molecule, tox, playbook, hacktoberfest
diff --git a/roles/robertdebock.epel/.github/workflows/galaxy.yml b/roles/robertdebock.epel/.github/workflows/galaxy.yml
new file mode 100644
index 00000000..092e5449
--- /dev/null
+++ b/roles/robertdebock.epel/.github/workflows/galaxy.yml
@@ -0,0 +1,18 @@
+---
+#
+# Ansible managed
+#
+
+name: Release to Ansible Galaxy
+
+on:
+ release:
+ types: [created, edited, published, released]
+jobs:
+ release:
+ runs-on: ubuntu-20.04
+ steps:
+ - name: galaxy
+ uses: robertdebock/galaxy-action@1.2.0
+ with:
+ galaxy_api_key: ${{ secrets.galaxy_api_key }}
diff --git a/roles/robertdebock.epel/.github/workflows/molecule.yml b/roles/robertdebock.epel/.github/workflows/molecule.yml
new file mode 100644
index 00000000..e3087f9d
--- /dev/null
+++ b/roles/robertdebock.epel/.github/workflows/molecule.yml
@@ -0,0 +1,55 @@
+---
+#
+# Ansible managed
+#
+
+name: Ansible Molecule
+
+on:
+ push:
+ tags_ignore:
+ - '*'
+ pull_request:
+ schedule:
+ - cron: '9 5 5 * *'
+
+jobs:
+ lint:
+ runs-on: ubuntu-20.04
+ steps:
+ - name: checkout
+ uses: actions/checkout@v3
+ with:
+ path: "${{ github.repository }}"
+ - name: molecule
+ uses: robertdebock/molecule-action@4.0.7
+ with:
+ command: lint
+ test:
+ needs:
+ - lint
+ runs-on: ubuntu-20.04
+ strategy:
+ fail-fast: false
+ matrix:
+ config:
+ - image: "amazonlinux"
+ tag: "latest"
+ - image: "enterpriselinux"
+ tag: "7"
+ - image: "enterpriselinux"
+ tag: "latest"
+ steps:
+ - name: checkout
+ uses: actions/checkout@v2
+ with:
+ path: "${{ github.repository }}"
+ - name: disable apparmor for mysql
+ run: sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
+ - name: parse apparmor for mysql
+ run: sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
+ - name: molecule
+ uses: robertdebock/molecule-action@4.0.7
+ with:
+ image: ${{ matrix.config.image }}
+ tag: ${{ matrix.config.tag }}
diff --git a/roles/robertdebock.epel/.github/workflows/requirements2png.yml b/roles/robertdebock.epel/.github/workflows/requirements2png.yml
new file mode 100644
index 00000000..8386a0ac
--- /dev/null
+++ b/roles/robertdebock.epel/.github/workflows/requirements2png.yml
@@ -0,0 +1,34 @@
+---
+#
+# Ansible managed
+#
+
+on:
+ - push
+
+name: Ansible Graphviz
+
+jobs:
+ build:
+ runs-on: ubuntu-20.04
+ steps:
+ - name: checkout
+ uses: actions/checkout@v3
+ with:
+ path: ${{ github.repository }}
+ - name: create png
+ uses: robertdebock/graphviz-action@1.0.7
+ - name: Commit files
+ run: |
+ cd ${{ github.repository }}
+ git config --local user.email "github-actions[bot]@users.noreply.github.com"
+ git config --local user.name "github-actions[bot]"
+ git add requirements.dot requirements.png
+ git commit -m "Add generated files"
+ - name: save to png branch
+ uses: ad-m/github-push-action@master
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ directory: ${{ github.repository }}
+ force: true
+ branch: png
diff --git a/roles/robertdebock.epel/.github/workflows/todo.yml b/roles/robertdebock.epel/.github/workflows/todo.yml
new file mode 100644
index 00000000..3e6e4177
--- /dev/null
+++ b/roles/robertdebock.epel/.github/workflows/todo.yml
@@ -0,0 +1,20 @@
+---
+#
+# Ansible managed
+#
+
+name: "TODO 2 Issue"
+
+on:
+ push:
+
+jobs:
+ build:
+ runs-on: "ubuntu-20.04"
+ steps:
+ - uses: "actions/checkout@master"
+ - name: "TODO to Issue"
+ uses: "alstr/todo-to-issue-action@v2.3"
+ id: "todo"
+ with:
+ TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/roles/robertdebock.epel/.gitignore b/roles/robertdebock.epel/.gitignore
new file mode 100644
index 00000000..ad73ff64
--- /dev/null
+++ b/roles/robertdebock.epel/.gitignore
@@ -0,0 +1,6 @@
+.molecule
+*.log
+*.swp
+.tox
+.cache
+.DS_Store
diff --git a/roles/robertdebock.epel/.gitlab-ci.yml b/roles/robertdebock.epel/.gitlab-ci.yml
new file mode 100644
index 00000000..c873802e
--- /dev/null
+++ b/roles/robertdebock.epel/.gitlab-ci.yml
@@ -0,0 +1,30 @@
+---
+image: "robertdebock/github-action-molecule:4.0.6"
+
+services:
+ - docker:dind
+
+variables:
+ DOCKER_HOST: "tcp://docker:2375"
+ PY_COLORS: 1
+
+molecule:
+ script:
+ - image=${image} tag=${tag} molecule test
+ rules:
+ - if: $CI_COMMIT_REF_NAME == "master"
+ retry: 1
+ parallel:
+ matrix:
+ - image: "amazonlinux"
+ tag: "latest"
+ - image: "enterpriselinux"
+ tag: "7"
+ - image: "enterpriselinux"
+ tag: "latest"
+
+galaxy:
+ script:
+ - ansible-galaxy role import --api-key ${GALAXY_API_KEY} ${CI_PROJECT_NAMESPACE} ${CI_PROJECT_NAME}
+ rules:
+ - if: $CI_COMMIT_TAG != null
diff --git a/roles/robertdebock.epel/.pre-commit-config.yaml b/roles/robertdebock.epel/.pre-commit-config.yaml
new file mode 100644
index 00000000..0ccd530b
--- /dev/null
+++ b/roles/robertdebock.epel/.pre-commit-config.yaml
@@ -0,0 +1,25 @@
+---
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.3.0
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: check-added-large-files
+
+ - repo: https://github.com/adrienverge/yamllint
+ rev: v1.26.3
+ hooks:
+ - id: yamllint
+ args: [-c=.yamllint]
+
+ - repo: https://github.com/robertdebock/pre-commit
+ rev: 1.5.1
+ hooks:
+ - id: ansible_role_find_unused_variable
+ - id: ansible_role_find_empty_files
+ - id: ansible_role_find_empty_directories
+ - id: ansible_role_fix_readability
+ - id: ansible_role_find_undefined_handlers
+ - id: ansible_role_find_unquoted_values
+ - id: ansible_role_find_horizontal_when
diff --git a/roles/robertdebock.epel/.yamllint b/roles/robertdebock.epel/.yamllint
new file mode 100644
index 00000000..a7ff0986
--- /dev/null
+++ b/roles/robertdebock.epel/.yamllint
@@ -0,0 +1,16 @@
+---
+extends: default
+
+rules:
+ braces:
+ max-spaces-inside: 1
+ level: error
+ brackets:
+ max-spaces-inside: 1
+ level: error
+ line-length: disable
+ truthy: disable
+
+ignore: |
+ .tox/
+ .cache/
diff --git a/roles/robertdebock.epel/CODE_OF_CONDUCT.md b/roles/robertdebock.epel/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..0d97a6fb
--- /dev/null
+++ b/roles/robertdebock.epel/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behaviour that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behaviour by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behaviour and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behaviour.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviours that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behaviour may be reported by contacting the project team at robert@meinit.nl. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/roles/robertdebock.epel/CONTRIBUTING.md b/roles/robertdebock.epel/CONTRIBUTING.md
new file mode 100644
index 00000000..8e97819e
--- /dev/null
+++ b/roles/robertdebock.epel/CONTRIBUTING.md
@@ -0,0 +1,76 @@
+# [Please contribute](#please-contribute)
+
+You can really make a difference by:
+
+- [Making an issue](https://help.github.com/articles/creating-an-issue/). A well described issue helps a lot. (Have a look at the [known issues](https://github.com/search?q=user%3Arobertdebock+is%3Aissue+state%3Aopen).)
+- [Making a pull request](https://services.github.com/on-demand/github-cli/open-pull-request-github) when you see the error in code.
+
+I'll try to help and take every contribution seriously.
+
+It's a great opportunity for me to learn how you use the role and also an opportunity to get into the habit of contributing to open source software.
+
+## [Step by step](#step-by-step)
+
+Here is how you can help, a lot of steps are related to GitHub, not specifically my roles.
+
+### [1. Make an issue.](#1-make-an-issue)
+
+When you spot an issue, [create an issue](https://github.com/robertdebock/ansible-role-epel/issues).
+
+Making the issue help me and others to find similar problems in the future.
+
+### [2. Fork the project.](#2-fork-the-project)
+
+On the top right side of [the repository on GitHub](https://github.com/robertdebock/ansible-role-epel), click `fork`. This copies everything to your GitHub namespace.
+
+### [3. Make the changes](#3-make-the-changes)
+
+In you own GitHub namespace, make the required changes.
+
+I typically do that by cloning the repository (in your namespace) locally:
+
+```
+git clone git@github.com:YOURNAMESPACE/ansible-role-epel.git
+```
+
+Now you can start to edit on your laptop.
+
+### [4. Optionally: test your changes](#4-optionally-test-your-changes)
+
+Install [molecule](https://molecule.readthedocs.io/en/stable/) and [Tox](https://tox.readthedocs.io/):
+
+```
+pip install molecule tox ansible-lint docker
+```
+
+And run `molecule test`. If you want to test a specific distribution, set `image` and optionally `tag`:
+
+```
+image=centos tag=7 molecule test
+```
+
+Once it start to work, you can test multiple version of Ansible:
+
+```
+image=centos tag=7 tox
+```
+
+### [5. Optionally: Regenerate all dynamic content](#5-optionally-regenerate-all-dynamic-content)
+
+You can use [Ansible Generator](https://github.com/robertdebock/ansible-generator) to regenerate all dynamic content.
+
+If you don't do it, I'll do it later for you.
+
+### [6. Make a pull request](#6-make-a-pull-request)
+
+[GitHub](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork) on pull requests.
+
+In the comment-box, you can [refer to the issue number](https://help.github.com/en/github/writing-on-github/autolinked-references-and-urls) by using #123, where 123 is the issue number.
+
+### [7. Wait](#7-wait)
+
+Now I'll get a message that you've added some code. Thank you, really.
+
+CI starts to test your changes. You can follow the progress on Travis.
+
+Please consider [sponsoring me](https://github.com/sponsors/robertdebock).
diff --git a/roles/robertdebock.epel/LICENSE b/roles/robertdebock.epel/LICENSE
new file mode 100644
index 00000000..5c7d4d53
--- /dev/null
+++ b/roles/robertdebock.epel/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2022 Robert de Bock (robert@meinit.nl)
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/roles/robertdebock.epel/README.md b/roles/robertdebock.epel/README.md
new file mode 100644
index 00000000..afe58789
--- /dev/null
+++ b/roles/robertdebock.epel/README.md
@@ -0,0 +1,83 @@
+# [epel](#epel)
+
+Install epel on your system.
+
+|GitHub|GitLab|Quality|Downloads|Version|
+|------|------|-------|---------|-------|
+|[](https://github.com/robertdebock/ansible-role-epel/actions)|[](https://gitlab.com/robertdebock/ansible-role-epel)|[](https://galaxy.ansible.com/robertdebock/epel)|[](https://galaxy.ansible.com/robertdebock/epel)|[](https://github.com/robertdebock/ansible-role-epel/releases/)|
+
+## [Example Playbook](#example-playbook)
+
+This example is taken from `molecule/default/converge.yml` and is tested on each push, pull request and release.
+```yaml
+---
+- name: Converge
+ hosts: all
+ become: yes
+ gather_facts: yes
+
+ roles:
+ - role: robertdebock.epel
+```
+
+The machine needs to be prepared. In CI this is done using `molecule/default/prepare.yml`:
+```yaml
+---
+- name: Prepare
+ hosts: all
+ gather_facts: no
+ become: yes
+
+ roles:
+ - role: robertdebock.bootstrap
+```
+
+Also see a [full explanation and example](https://robertdebock.nl/how-to-use-these-roles.html) on how to use these roles.
+
+
+## [Requirements](#requirements)
+
+- pip packages listed in [requirements.txt](https://github.com/robertdebock/ansible-role-epel/blob/master/requirements.txt).
+
+## [Status of used roles](#status-of-requirements)
+
+The following roles are used to prepare a system. You can prepare your system in another way.
+
+| Requirement | GitHub | GitLab |
+|-------------|--------|--------|
+|[robertdebock.bootstrap](https://galaxy.ansible.com/robertdebock/bootstrap)|[](https://github.com/robertdebock/ansible-role-bootstrap/actions)|[](https://gitlab.com/robertdebock/ansible-role-bootstrap)|
+
+## [Context](#context)
+
+This role is a part of many compatible roles. Have a look at [the documentation of these roles](https://robertdebock.nl/) for further information.
+
+Here is an overview of related roles:
+
+
+## [Compatibility](#compatibility)
+
+This role has been tested on these [container images](https://hub.docker.com/u/robertdebock):
+
+|container|tags|
+|---------|----|
+|amazon|Candidate|
+|el|7, 8|
+
+The minimum version of Ansible required is 2.10, tests have been done to:
+
+- The previous version.
+- The current version.
+- The development version.
+
+
+If you find issues, please register them in [GitHub](https://github.com/robertdebock/ansible-role-epel/issues)
+
+## [License](#license)
+
+Apache-2.0
+
+## [Author Information](#author-information)
+
+[Robert de Bock](https://robertdebock.nl/)
+
+Please consider [sponsoring me](https://github.com/sponsors/robertdebock).
diff --git a/roles/robertdebock.epel/SECURITY.md b/roles/robertdebock.epel/SECURITY.md
new file mode 100644
index 00000000..45375dc9
--- /dev/null
+++ b/roles/robertdebock.epel/SECURITY.md
@@ -0,0 +1,25 @@
+# [Security Policy](#security-policy)
+
+This software implements other software, it's not very likely that this software introduces new vulnerabilities.
+
+## [Supported Versions](#supported-versions)
+
+The current major version is supported. For example if the current version is 3.4.1:
+
+| Version | Supported |
+| ------- | ------------------ |
+| 3.4.1 | :white_check_mark: |
+| 3.4.x | :white_check_mark: |
+| 3.x.x | :white_check_mark: |
+| 2.0.0 | :x: |
+| 1.0.0 | :x: |
+
+## [Reporting a Vulnerability](#reporting-a-vulnarability)
+
+Please [open an issue](https://github.com/robertdebock/ansible-role-epel/issues) describing the vulnerability.
+
+Tell them where to go, how often they can expect to get an update on a
+reported vulnerability, what to expect if the vulnerability is accepted or
+declined, etc.
+
+Please consider [sponsoring me](https://github.com/sponsors/robertdebock).
diff --git a/roles/robertdebock.epel/handlers/main.yml b/roles/robertdebock.epel/handlers/main.yml
new file mode 100644
index 00000000..4acf3dbf
--- /dev/null
+++ b/roles/robertdebock.epel/handlers/main.yml
@@ -0,0 +1,6 @@
+---
+# handlers file for epel
+
+- name: update package cache
+ ansible.builtin.package:
+ update_cache: yes
diff --git a/roles/robertdebock.epel/meta/.galaxy_install_info b/roles/robertdebock.epel/meta/.galaxy_install_info
new file mode 100644
index 00000000..9f655a94
--- /dev/null
+++ b/roles/robertdebock.epel/meta/.galaxy_install_info
@@ -0,0 +1,2 @@
+install_date: Wed Jul 6 04:15:35 2022
+version: 4.0.0
diff --git a/roles/robertdebock.epel/meta/main.yml b/roles/robertdebock.epel/meta/main.yml
new file mode 100644
index 00000000..756c754a
--- /dev/null
+++ b/roles/robertdebock.epel/meta/main.yml
@@ -0,0 +1,26 @@
+---
+galaxy_info:
+ author: Robert de Bock
+ namespace: robertdebock
+ role_name: epel
+ description: Install epel on your system.
+ license: Apache-2.0
+ company: none
+ min_ansible_version: "2.10"
+
+ platforms:
+ - name: Amazon
+ versions:
+ - Candidate
+ - name: EL
+ versions:
+ - 7
+ - 8
+
+ galaxy_tags:
+ - epel
+ - repository
+ - installer
+ - packages
+
+dependencies: []
diff --git a/roles/robertdebock.epel/meta/preferences.yml b/roles/robertdebock.epel/meta/preferences.yml
new file mode 100644
index 00000000..e7fdebfd
--- /dev/null
+++ b/roles/robertdebock.epel/meta/preferences.yml
@@ -0,0 +1,2 @@
+---
+tox_parallel: yes
diff --git a/roles/robertdebock.epel/molecule/default/converge.yml b/roles/robertdebock.epel/molecule/default/converge.yml
new file mode 100644
index 00000000..edaa8ef5
--- /dev/null
+++ b/roles/robertdebock.epel/molecule/default/converge.yml
@@ -0,0 +1,8 @@
+---
+- name: Converge
+ hosts: all
+ become: yes
+ gather_facts: yes
+
+ roles:
+ - role: ansible-role-epel
diff --git a/roles/robertdebock.epel/molecule/default/molecule.yml b/roles/robertdebock.epel/molecule/default/molecule.yml
new file mode 100644
index 00000000..e9ecdd27
--- /dev/null
+++ b/roles/robertdebock.epel/molecule/default/molecule.yml
@@ -0,0 +1,27 @@
+---
+#
+# Ansible managed
+#
+dependency:
+ name: galaxy
+ options:
+ role-file: requirements.yml
+ requirements-file: requirements.yml
+lint: |
+ set -e
+ yamllint .
+ ansible-lint
+driver:
+ name: docker
+platforms:
+ - name: "epel-${image:-fedora}-${tag:-latest}${TOX_ENVNAME}"
+ image: "${namespace:-robertdebock}/${image:-fedora}:${tag:-latest}"
+ command: /sbin/init
+ volumes:
+ - /sys/fs/cgroup:/sys/fs/cgroup:ro
+ privileged: yes
+ pre_build_image: yes
+provisioner:
+ name: ansible
+verifier:
+ name: ansible
diff --git a/roles/robertdebock.epel/molecule/default/prepare.yml b/roles/robertdebock.epel/molecule/default/prepare.yml
new file mode 100644
index 00000000..feb7d811
--- /dev/null
+++ b/roles/robertdebock.epel/molecule/default/prepare.yml
@@ -0,0 +1,8 @@
+---
+- name: Prepare
+ hosts: all
+ gather_facts: no
+ become: yes
+
+ roles:
+ - role: robertdebock.bootstrap
diff --git a/roles/robertdebock.epel/molecule/default/verify.yml b/roles/robertdebock.epel/molecule/default/verify.yml
new file mode 100644
index 00000000..12997851
--- /dev/null
+++ b/roles/robertdebock.epel/molecule/default/verify.yml
@@ -0,0 +1,11 @@
+---
+- name: Verify
+ hosts: all
+ become: yes
+ gather_facts: no
+
+ tasks:
+ - name: install a package from epel
+ ansible.builtin.package:
+ name: aalib
+ state: present
diff --git a/roles/robertdebock.epel/requirements.txt b/roles/robertdebock.epel/requirements.txt
new file mode 100644
index 00000000..ba1d384f
--- /dev/null
+++ b/roles/robertdebock.epel/requirements.txt
@@ -0,0 +1,10 @@
+# These role have been tested with these PIP component.
+# To install the required version yourself, use a command as:
+# `python -m pip --user install -r requirements.txt`
+# See the pip requirements file documentation for details:
+# https://pip.pypa.io/en/stable/user_guide/#requirements-files
+#
+# Tests run on the previous and current (latest) version of Ansible.
+ansible>=2.10
+# Some Jinja2 filters are used that are available in the newer releases.
+jinja2>=2.11.2
diff --git a/roles/robertdebock.epel/requirements.yml b/roles/robertdebock.epel/requirements.yml
new file mode 100644
index 00000000..4aca3813
--- /dev/null
+++ b/roles/robertdebock.epel/requirements.yml
@@ -0,0 +1,4 @@
+---
+roles:
+ - name: robertdebock.bootstrap
+collections:
diff --git a/roles/robertdebock.epel/tasks/assert.yml b/roles/robertdebock.epel/tasks/assert.yml
new file mode 100644
index 00000000..23ee52c9
--- /dev/null
+++ b/roles/robertdebock.epel/tasks/assert.yml
@@ -0,0 +1,7 @@
+---
+- name: Ensure that `epel_next` is set correctly.
+ ansible.builtin.assert:
+ quiet: yes
+ that:
+ - epel_next is defined
+ - epel_next is bool
diff --git a/roles/robertdebock.epel/tasks/main.yml b/roles/robertdebock.epel/tasks/main.yml
new file mode 100644
index 00000000..aa1ff7d4
--- /dev/null
+++ b/roles/robertdebock.epel/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+# tasks file for epel
+
+- name: install epel
+ block:
+ - name: install epel gpg key
+ ansible.builtin.rpm_key:
+ key: "{{ epel_gpg_key }}"
+ state: present
+
+ - name: install epel-release
+ ansible.builtin.package:
+ name: "{{ epel_url }}"
+ state: present
+ notify:
+ - update package cache
+
+ - name: install epel-next-release
+ ansible.builtin.package:
+ name: "{{ epel_next_url }}"
+ state: present
+ when:
+ - epel_next
+ notify:
+ - update package cache
+ when:
+ - (ansible_distribution == "Amazon" and
+ ansible_distribution_major_version == "2") or
+ (ansible_os_family == "RedHat" and
+ ansible_distribution_major_version in [ "7", "8" ])
diff --git a/roles/robertdebock.epel/tox.ini b/roles/robertdebock.epel/tox.ini
new file mode 100644
index 00000000..88f85456
--- /dev/null
+++ b/roles/robertdebock.epel/tox.ini
@@ -0,0 +1,24 @@
+#
+# Ansible managed
+#
+[tox]
+minversion = 3.21.4
+envlist = py{310}-ansible-{4,5}
+
+skipsdist = true
+
+[testenv]
+deps =
+ 4: ansible == 4.*
+ 5: ansible == 5.*
+ molecule[docker]
+ docker == 5.*
+ ansible-lint == 5.*
+commands = molecule test
+setenv =
+ TOX_ENVNAME={envname}
+ PY_COLORS=1
+ ANSIBLE_FORCE_COLOR=1
+ ANSIBLE_ROLES_PATH=../
+
+passenv = namespace image tag DOCKER_HOST
diff --git a/roles/robertdebock.epel/vars/main.yml b/roles/robertdebock.epel/vars/main.yml
new file mode 100644
index 00000000..71d4d505
--- /dev/null
+++ b/roles/robertdebock.epel/vars/main.yml
@@ -0,0 +1,23 @@
+---
+# vars file for epel
+
+_epel_version:
+ default: "{{ ansible_distribution_major_version }}"
+ Amazon-2018: 6
+ Amazon-2: 7
+
+epel_version: "{{ _epel_version[ansible_distribution ~ '-' ~ ansible_distribution_major_version] | default(_epel_version['default'] ) }}"
+
+epel_url: "https://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ epel_version }}.noarch.rpm"
+
+epel_next_url: "https://dl.fedoraproject.org/pub/epel/epel-next-release-latest-{{ epel_version }}.noarch.rpm"
+
+epel_gpg_key: "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ epel_version }}"
+
+# Whether to install the new `epel-next` repository. This is only installed by default
+# on CentOS Stream, as per https://docs.fedoraproject.org/en-US/epel/#_quickstart.
+_epel_next:
+ default: no
+ Stream: yes
+
+epel_next: "{{ _epel_next[ansible_distribution_release] | default(_epel_next['default'] ) }}"
diff --git a/roles/robertdebock.openvpn/.ansible-lint b/roles/robertdebock.openvpn/.ansible-lint
new file mode 100644
index 00000000..cbd9e6cf
--- /dev/null
+++ b/roles/robertdebock.openvpn/.ansible-lint
@@ -0,0 +1,12 @@
+---
+#
+# Ansible managed
+#
+exclude_paths:
+ - ./meta/preferences.yml
+ - ./molecule/default/prepare.yml
+ - ./molecule/default/converge.yml
+ - ./molecule/default/verify.yml
+ - ./molecule/default/collections.yml
+ - ./.tox
+ - ./.cache
diff --git a/roles/robertdebock.openvpn/.github/FUNDING.yml b/roles/robertdebock.openvpn/.github/FUNDING.yml
new file mode 100644
index 00000000..67320f05
--- /dev/null
+++ b/roles/robertdebock.openvpn/.github/FUNDING.yml
@@ -0,0 +1,2 @@
+---
+github: robertdebock
diff --git a/roles/robertdebock.openvpn/.github/ISSUE_TEMPLATE/bug_report.md b/roles/robertdebock.openvpn/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 00000000..4bb9d98c
--- /dev/null
+++ b/roles/robertdebock.openvpn/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,32 @@
+---
+name: Bug report
+about: Create a report to help me improve
+
+---
+
+## Describe the bug
+
+A clear and concise description of what the bug is.
+
+## Playbook
+
+Please paste the playbook you are using. (Consider `requirements.yml` and
+optionally the command you've invoked.)
+
+
+```yaml
+---
+YOUR PLAYBOOK HERE
+```
+
+## Output
+
+Show at least the error, possible related output, maybe just all the output.
+
+## Environment
+
+- Control node OS: [e.g. Debian 9] (`cat /etc/os-release`)
+- Control node Ansible version: [e.g. 2.9.1] (`ansible --version`)
+- Managed node OS: [e.g. CentOS 7] (`cat /etc/os-release`)
+
+Please consider [sponsoring me](https://github.com/sponsors/robertdebock).
diff --git a/roles/robertdebock.openvpn/.github/ISSUE_TEMPLATE/feature_request.md b/roles/robertdebock.openvpn/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 00000000..55a93c40
--- /dev/null
+++ b/roles/robertdebock.openvpn/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,19 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+
+---
+
+## Proposed feature
+
+A clear and concise description of what you want to happen.
+
+## Rationale
+
+Why is this feature required?
+
+## Additional context
+
+Add any other context about the feature request here.
+
+Please consider [sponsoring me](https://github.com/sponsors/robertdebock).
diff --git a/roles/robertdebock.openvpn/.github/pull_request_template.md b/roles/robertdebock.openvpn/.github/pull_request_template.md
new file mode 100644
index 00000000..b1578c0c
--- /dev/null
+++ b/roles/robertdebock.openvpn/.github/pull_request_template.md
@@ -0,0 +1,11 @@
+---
+name: Pull request
+about: Describe the proposed change
+
+---
+
+**Describe the change**
+A clear and concise description of what the pull request is.
+
+**Testing**
+In case a feature was added, how were tests performed?
diff --git a/roles/robertdebock.openvpn/.github/settings.yml b/roles/robertdebock.openvpn/.github/settings.yml
new file mode 100644
index 00000000..058c3ba4
--- /dev/null
+++ b/roles/robertdebock.openvpn/.github/settings.yml
@@ -0,0 +1,8 @@
+---
+#
+# Ansible managed
+#
+repository:
+ description: Install and configure openvpn server or client on your system.
+ homepage: https://robertdebock.nl/
+ topics: openvpn, ansible, molecule, tox, playbook, hacktoberfest
diff --git a/roles/robertdebock.openvpn/.github/workflows/galaxy.yml b/roles/robertdebock.openvpn/.github/workflows/galaxy.yml
new file mode 100644
index 00000000..092e5449
--- /dev/null
+++ b/roles/robertdebock.openvpn/.github/workflows/galaxy.yml
@@ -0,0 +1,18 @@
+---
+#
+# Ansible managed
+#
+
+name: Release to Ansible Galaxy
+
+on:
+ release:
+ types: [created, edited, published, released]
+jobs:
+ release:
+ runs-on: ubuntu-20.04
+ steps:
+ - name: galaxy
+ uses: robertdebock/galaxy-action@1.2.0
+ with:
+ galaxy_api_key: ${{ secrets.galaxy_api_key }}
diff --git a/roles/robertdebock.openvpn/.github/workflows/molecule.yml b/roles/robertdebock.openvpn/.github/workflows/molecule.yml
new file mode 100644
index 00000000..b4bad8fd
--- /dev/null
+++ b/roles/robertdebock.openvpn/.github/workflows/molecule.yml
@@ -0,0 +1,65 @@
+---
+#
+# Ansible managed
+#
+
+name: Ansible Molecule
+
+on:
+ push:
+ tags_ignore:
+ - '*'
+ pull_request:
+ schedule:
+ - cron: '29 15 15 * *'
+
+jobs:
+ lint:
+ runs-on: ubuntu-20.04
+ steps:
+ - name: checkout
+ uses: actions/checkout@v2
+ with:
+ path: "${{ github.repository }}"
+ - name: molecule
+ uses: robertdebock/molecule-action@4.0.7
+ with:
+ command: lint
+ test:
+ needs:
+ - lint
+ runs-on: ubuntu-20.04
+ strategy:
+ fail-fast: false
+ matrix:
+ config:
+ - image: "amazonlinux"
+ tag: "latest"
+ - image: "debian"
+ tag: "latest"
+ - image: "debian"
+ tag: "bookworm"
+ - image: "enterpriselinux"
+ tag: "latest"
+ - image: "fedora"
+ tag: "35"
+ - image: "debian"
+ tag: "latest"
+ - image: "debian"
+ tag: "bookworm"
+ - image: "ubuntu"
+ tag: "focal"
+ steps:
+ - name: checkout
+ uses: actions/checkout@v2
+ with:
+ path: "${{ github.repository }}"
+ - name: disable apparmor for mysql
+ run: sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
+ - name: parse apparmor for mysql
+ run: sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
+ - name: molecule
+ uses: robertdebock/molecule-action@4.0.7
+ with:
+ image: ${{ matrix.config.image }}
+ tag: ${{ matrix.config.tag }}
diff --git a/roles/robertdebock.openvpn/.github/workflows/requirements2png.yml b/roles/robertdebock.openvpn/.github/workflows/requirements2png.yml
new file mode 100644
index 00000000..e94938d4
--- /dev/null
+++ b/roles/robertdebock.openvpn/.github/workflows/requirements2png.yml
@@ -0,0 +1,34 @@
+---
+#
+# Ansible managed
+#
+
+on:
+ - push
+
+name: Ansible Graphviz
+
+jobs:
+ build:
+ runs-on: ubuntu-20.04
+ steps:
+ - name: checkout
+ uses: actions/checkout@v2
+ with:
+ path: ${{ github.repository }}
+ - name: create png
+ uses: robertdebock/graphviz-action@1.0.7
+ - name: Commit files
+ run: |
+ cd ${{ github.repository }}
+ git config --local user.email "github-actions[bot]@users.noreply.github.com"
+ git config --local user.name "github-actions[bot]"
+ git add requirements.dot requirements.png
+ git commit -m "Add generated files"
+ - name: save to png branch
+ uses: ad-m/github-push-action@master
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ directory: ${{ github.repository }}
+ force: true
+ branch: png
diff --git a/roles/robertdebock.openvpn/.github/workflows/todo.yml b/roles/robertdebock.openvpn/.github/workflows/todo.yml
new file mode 100644
index 00000000..3e6e4177
--- /dev/null
+++ b/roles/robertdebock.openvpn/.github/workflows/todo.yml
@@ -0,0 +1,20 @@
+---
+#
+# Ansible managed
+#
+
+name: "TODO 2 Issue"
+
+on:
+ push:
+
+jobs:
+ build:
+ runs-on: "ubuntu-20.04"
+ steps:
+ - uses: "actions/checkout@master"
+ - name: "TODO to Issue"
+ uses: "alstr/todo-to-issue-action@v2.3"
+ id: "todo"
+ with:
+ TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/roles/robertdebock.openvpn/.gitignore b/roles/robertdebock.openvpn/.gitignore
new file mode 100644
index 00000000..ad73ff64
--- /dev/null
+++ b/roles/robertdebock.openvpn/.gitignore
@@ -0,0 +1,6 @@
+.molecule
+*.log
+*.swp
+.tox
+.cache
+.DS_Store
diff --git a/roles/robertdebock.openvpn/.gitlab-ci.yml b/roles/robertdebock.openvpn/.gitlab-ci.yml
new file mode 100644
index 00000000..f0a5c328
--- /dev/null
+++ b/roles/robertdebock.openvpn/.gitlab-ci.yml
@@ -0,0 +1,40 @@
+---
+image: "robertdebock/github-action-molecule:4.0.6"
+
+services:
+ - docker:dind
+
+variables:
+ DOCKER_HOST: "tcp://docker:2375"
+ PY_COLORS: 1
+
+molecule:
+ script:
+ - image=${image} tag=${tag} molecule test
+ rules:
+ - if: $CI_COMMIT_REF_NAME == "master"
+ retry: 1
+ parallel:
+ matrix:
+ - image: "amazonlinux"
+ tag: "latest"
+ - image: "debian"
+ tag: "latest"
+ - image: "debian"
+ tag: "bookworm"
+ - image: "enterpriselinux"
+ tag: "latest"
+ - image: "fedora"
+ tag: "35"
+ - image: "debian"
+ tag: "latest"
+ - image: "debian"
+ tag: "bookworm"
+ - image: "ubuntu"
+ tag: "focal"
+
+galaxy:
+ script:
+ - ansible-galaxy role import --api-key ${GALAXY_API_KEY} ${CI_PROJECT_NAMESPACE} ${CI_PROJECT_NAME}
+ rules:
+ - if: $CI_COMMIT_TAG != null
diff --git a/roles/robertdebock.openvpn/.pre-commit-config.yaml b/roles/robertdebock.openvpn/.pre-commit-config.yaml
new file mode 100644
index 00000000..9adf4a92
--- /dev/null
+++ b/roles/robertdebock.openvpn/.pre-commit-config.yaml
@@ -0,0 +1,24 @@
+---
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.0.1
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: check-added-large-files
+
+ - repo: https://github.com/adrienverge/yamllint
+ rev: v1.26.3
+ hooks:
+ - id: yamllint
+ args: [-c=.yamllint]
+
+ - repo: https://github.com/robertdebock/pre-commit
+ rev: v1.4.4
+ hooks:
+ - id: ansible_role_find_unused_variable
+ - id: ansible_role_find_empty_files
+ - id: ansible_role_find_empty_directories
+ - id: ansible_role_fix_readability
+ - id: ansible_role_find_undefined_handlers
+ - id: ansible_role_find_unquoted_values
diff --git a/roles/robertdebock.openvpn/.yamllint b/roles/robertdebock.openvpn/.yamllint
new file mode 100644
index 00000000..a7ff0986
--- /dev/null
+++ b/roles/robertdebock.openvpn/.yamllint
@@ -0,0 +1,16 @@
+---
+extends: default
+
+rules:
+ braces:
+ max-spaces-inside: 1
+ level: error
+ brackets:
+ max-spaces-inside: 1
+ level: error
+ line-length: disable
+ truthy: disable
+
+ignore: |
+ .tox/
+ .cache/
diff --git a/roles/robertdebock.openvpn/CODE_OF_CONDUCT.md b/roles/robertdebock.openvpn/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..0d97a6fb
--- /dev/null
+++ b/roles/robertdebock.openvpn/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behaviour that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behaviour by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behaviour and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behaviour.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviours that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behaviour may be reported by contacting the project team at robert@meinit.nl. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/roles/robertdebock.openvpn/CONTRIBUTING.md b/roles/robertdebock.openvpn/CONTRIBUTING.md
new file mode 100644
index 00000000..d82fd66c
--- /dev/null
+++ b/roles/robertdebock.openvpn/CONTRIBUTING.md
@@ -0,0 +1,76 @@
+# [Please contribute](#please-contribute)
+
+You can really make a difference by:
+
+- [Making an issue](https://help.github.com/articles/creating-an-issue/). A well described issue helps a lot. (Have a look at the [known issues](https://github.com/search?q=user%3Arobertdebock+is%3Aissue+state%3Aopen).)
+- [Making a pull request](https://services.github.com/on-demand/github-cli/open-pull-request-github) when you see the error in code.
+
+I'll try to help and take every contribution seriously.
+
+It's a great opportunity for me to learn how you use the role and also an opportunity to get into the habit of contributing to open source software.
+
+## [Step by step](#step-by-step)
+
+Here is how you can help, a lot of steps are related to GitHub, not specifically my roles.
+
+### [1. Make an issue.](#1-make-an-issue)
+
+When you spot an issue, [create an issue](https://github.com/robertdebock/ansible-role-openvpn/issues).
+
+Making the issue help me and others to find similar problems in the future.
+
+### [2. Fork the project.](#2-fork-the-project)
+
+On the top right side of [the repository on GitHub](https://github.com/robertdebock/ansible-role-openvpn), click `fork`. This copies everything to your GitHub namespace.
+
+### [3. Make the changes](#3-make-the-changes)
+
+In you own GitHub namespace, make the required changes.
+
+I typically do that by cloning the repository (in your namespace) locally:
+
+```
+git clone git@github.com:YOURNAMESPACE/ansible-role-openvpn.git
+```
+
+Now you can start to edit on your laptop.
+
+### [4. Optionally: test your changes](#4-optionally-test-your-changes)
+
+Install [molecule](https://molecule.readthedocs.io/en/stable/) and [Tox](https://tox.readthedocs.io/):
+
+```
+pip install molecule tox ansible-lint docker
+```
+
+And run `molecule test`. If you want to test a specific distribution, set `image` and optionally `tag`:
+
+```
+image=centos tag=7 molecule test
+```
+
+Once it start to work, you can test multiple version of Ansible:
+
+```
+image=centos tag=7 tox
+```
+
+### [5. Optionally: Regenerate all dynamic content](#5-optionally-regenerate-all-dynamic-content)
+
+You can use [Ansible Generator](https://github.com/robertdebock/ansible-generator) to regenerate all dynamic content.
+
+If you don't do it, I'll do it later for you.
+
+### [6. Make a pull request](#6-make-a-pull-request)
+
+[GitHub](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork) on pull requests.
+
+In the comment-box, you can [refer to the issue number](https://help.github.com/en/github/writing-on-github/autolinked-references-and-urls) by using #123, where 123 is the issue number.
+
+### [7. Wait](#7-wait)
+
+Now I'll get a message that you've added some code. Thank you, really.
+
+CI starts to test your changes. You can follow the progress on Travis.
+
+Please consider [sponsoring me](https://github.com/sponsors/robertdebock).
diff --git a/roles/robertdebock.openvpn/LICENSE b/roles/robertdebock.openvpn/LICENSE
new file mode 100644
index 00000000..5c7d4d53
--- /dev/null
+++ b/roles/robertdebock.openvpn/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2022 Robert de Bock (robert@meinit.nl)
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/roles/robertdebock.openvpn/README.md b/roles/robertdebock.openvpn/README.md
new file mode 100644
index 00000000..751a194c
--- /dev/null
+++ b/roles/robertdebock.openvpn/README.md
@@ -0,0 +1,131 @@
+# [openvpn](#openvpn)
+
+Install and configure openvpn server or client on your system.
+
+|GitHub|GitLab|Quality|Downloads|Version|
+|------|------|-------|---------|-------|
+|[](https://github.com/robertdebock/ansible-role-openvpn/actions)|[](https://gitlab.com/robertdebock/ansible-role-openvpn)|[](https://galaxy.ansible.com/robertdebock/openvpn)|[](https://galaxy.ansible.com/robertdebock/openvpn)|[](https://github.com/robertdebock/ansible-role-openvpn/releases/)|
+
+## [Example Playbook](#example-playbook)
+
+This example is taken from `molecule/default/converge.yml` and is tested on each push, pull request and release.
+```yaml
+---
+- name: Converge
+ hosts: all
+ become: yes
+ gather_facts: yes
+
+ tasks:
+ - name: create openvpn server
+ ansible.builtin.include_role:
+ name: robertdebock.openvpn
+ vars:
+ openvpn_role: "server"
+
+ - name: copy certificates and keys from the server to the client
+ ansible.builtin.copy:
+ src: /etc/openvpn/easy-rsa/pki/{{ item }}
+ dest: /etc/openvpn/client/{{ item | basename }}
+ mode: "0640"
+ remote_src: yes
+ loop:
+ - ca.crt
+ - issued/client.crt
+ - private/client.key
+ - ta.key
+
+ - name: create openvpn client
+ ansible.builtin.include_role:
+ name: robertdebock.openvpn
+ vars:
+ openvpn_role: "client"
+ openvpn_client_server: "127.0.0.1"
+```
+
+The machine needs to be prepared. In CI this is done using `molecule/default/prepare.yml`:
+```yaml
+---
+- name: Prepare server
+ hosts: all
+ gather_facts: no
+ become: yes
+
+ roles:
+ - role: robertdebock.bootstrap
+ # - role: robertdebock.buildtools
+ - role: robertdebock.epel
+ # - role: robertdebock.python_pip
+ # - role: robertdebock.openssl
+```
+
+Also see a [full explanation and example](https://robertdebock.nl/how-to-use-these-roles.html) on how to use these roles.
+
+## [Role Variables](#role-variables)
+
+The default values for the variables are set in `defaults/main.yml`:
+```yaml
+---
+# defaults file for openvpn
+
+# You can setup both a client and a server using this role.
+# Use `server` or `client` for `openvpn_role`.
+
+openvpn_role: server
+
+# If you are configuring a client, setup these variables:
+# openvpn_role: client
+# openvpn_client_server: vpn.example.com
+```
+
+## [Requirements](#requirements)
+
+- pip packages listed in [requirements.txt](https://github.com/robertdebock/ansible-role-openvpn/blob/master/requirements.txt).
+
+## [Status of used roles](#status-of-requirements)
+
+The following roles are used to prepare a system. You can prepare your system in another way.
+
+| Requirement | GitHub | GitLab |
+|-------------|--------|--------|
+|[robertdebock.bootstrap](https://galaxy.ansible.com/robertdebock/bootstrap)|[](https://github.com/robertdebock/ansible-role-bootstrap/actions)|[](https://gitlab.com/robertdebock/ansible-role-bootstrap)|
+|[robertdebock.epel](https://galaxy.ansible.com/robertdebock/epel)|[](https://github.com/robertdebock/ansible-role-epel/actions)|[](https://gitlab.com/robertdebock/ansible-role-epel)|
+
+## [Context](#context)
+
+This role is a part of many compatible roles. Have a look at [the documentation of these roles](https://robertdebock.nl/) for further information.
+
+Here is an overview of related roles:
+
+
+## [Compatibility](#compatibility)
+
+This role has been tested on these [container images](https://hub.docker.com/u/robertdebock):
+
+|container|tags|
+|---------|----|
+|amazon|Candidate|
+|debian|all|
+|el|8|
+|fedora|35|
+|debian|bullseye, bookworm|
+|ubuntu|focal|
+
+The minimum version of Ansible required is 2.10, tests have been done to:
+
+- The previous version.
+- The current version.
+- The development version.
+
+
+If you find issues, please register them in [GitHub](https://github.com/robertdebock/ansible-role-openvpn/issues)
+
+## [License](#license)
+
+Apache-2.0
+
+## [Author Information](#author-information)
+
+[Robert de Bock](https://robertdebock.nl/)
+
+Please consider [sponsoring me](https://github.com/sponsors/robertdebock).
diff --git a/roles/robertdebock.openvpn/SECURITY.md b/roles/robertdebock.openvpn/SECURITY.md
new file mode 100644
index 00000000..46bfe49e
--- /dev/null
+++ b/roles/robertdebock.openvpn/SECURITY.md
@@ -0,0 +1,25 @@
+# [Security Policy](#security-policy)
+
+This software implements other software, it's not very likely that this software introduces new vulnerabilities.
+
+## [Supported Versions](#supported-versions)
+
+The current major version is supported. For example if the current version is 3.4.1:
+
+| Version | Supported |
+| ------- | ------------------ |
+| 3.4.1 | :white_check_mark: |
+| 3.4.x | :white_check_mark: |
+| 3.x.x | :white_check_mark: |
+| 2.0.0 | :x: |
+| 1.0.0 | :x: |
+
+## [Reporting a Vulnerability](#reporting-a-vulnarability)
+
+Please [open an issue](https://github.com/robertdebock/ansible-role-openvpn/issues) describing the vulnerability.
+
+Tell them where to go, how often they can expect to get an update on a
+reported vulnerability, what to expect if the vulnerability is accepted or
+declined, etc.
+
+Please consider [sponsoring me](https://github.com/sponsors/robertdebock).
diff --git a/roles/robertdebock.openvpn/defaults/main.yml b/roles/robertdebock.openvpn/defaults/main.yml
new file mode 100644
index 00000000..6f0ac074
--- /dev/null
+++ b/roles/robertdebock.openvpn/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+# defaults file for openvpn
+
+# You can setup both a client and a server using this role.
+# Use `server` or `client` for `openvpn_role`.
+
+openvpn_role: server
+
+# If you are configuring a client, setup these variables:
+# openvpn_role: client
+# openvpn_client_server: vpn.example.com
diff --git a/roles/robertdebock.openvpn/handlers/main.yml b/roles/robertdebock.openvpn/handlers/main.yml
new file mode 100644
index 00000000..1368c2c7
--- /dev/null
+++ b/roles/robertdebock.openvpn/handlers/main.yml
@@ -0,0 +1,9 @@
+---
+# handlers file for openvpn
+
+- name: restart openvpn
+ ansible.builtin.service:
+ name: "{{ openvpn_service }}"
+ state: restarted
+ when:
+ - not ansible_check_mode | bool
diff --git a/roles/robertdebock.openvpn/meta/.galaxy_install_info b/roles/robertdebock.openvpn/meta/.galaxy_install_info
new file mode 100644
index 00000000..0194dd56
--- /dev/null
+++ b/roles/robertdebock.openvpn/meta/.galaxy_install_info
@@ -0,0 +1,2 @@
+install_date: Wed Jul 6 04:02:59 2022
+version: 4.0.6
diff --git a/roles/robertdebock.openvpn/meta/main.yml b/roles/robertdebock.openvpn/meta/main.yml
new file mode 100644
index 00000000..4bae9d2b
--- /dev/null
+++ b/roles/robertdebock.openvpn/meta/main.yml
@@ -0,0 +1,35 @@
+---
+galaxy_info:
+ author: Robert de Bock
+ namespace: robertdebock
+ role_name: openvpn
+ description: Install and configure openvpn server or client on your system.
+ license: Apache-2.0
+ company: none
+ min_ansible_version: "2.10"
+
+ platforms:
+ - name: Amazon
+ versions:
+ - Candidate
+ - name: Debian
+ versions:
+ - all
+ - name: EL
+ versions:
+ - 8
+ - name: Fedora
+ versions:
+ - 35
+ - name: Debian
+ versions:
+ - bullseye
+ - bookworm
+ - name: Ubuntu
+ versions:
+ - focal
+
+ galaxy_tags:
+ - openvpn
+
+dependencies: []
diff --git a/roles/robertdebock.openvpn/meta/preferences.yml b/roles/robertdebock.openvpn/meta/preferences.yml
new file mode 100644
index 00000000..e7fdebfd
--- /dev/null
+++ b/roles/robertdebock.openvpn/meta/preferences.yml
@@ -0,0 +1,2 @@
+---
+tox_parallel: yes
diff --git a/roles/robertdebock.openvpn/molecule/default/converge.yml b/roles/robertdebock.openvpn/molecule/default/converge.yml
new file mode 100644
index 00000000..7b840019
--- /dev/null
+++ b/roles/robertdebock.openvpn/molecule/default/converge.yml
@@ -0,0 +1,31 @@
+---
+- name: Converge
+ hosts: all
+ become: yes
+ gather_facts: yes
+
+ tasks:
+ - name: create openvpn server
+ ansible.builtin.include_role:
+ name: ansible-role-openvpn
+ vars:
+ openvpn_role: "server"
+
+ - name: copy certificates and keys from the server to the client
+ ansible.builtin.copy:
+ src: /etc/openvpn/easy-rsa/pki/{{ item }}
+ dest: /etc/openvpn/client/{{ item | basename }}
+ mode: "0640"
+ remote_src: yes
+ loop:
+ - ca.crt
+ - issued/client.crt
+ - private/client.key
+ - ta.key
+
+ - name: create openvpn client
+ ansible.builtin.include_role:
+ name: ansible-role-openvpn
+ vars:
+ openvpn_role: "client"
+ openvpn_client_server: "127.0.0.1"
diff --git a/roles/robertdebock.openvpn/molecule/default/molecule.yml b/roles/robertdebock.openvpn/molecule/default/molecule.yml
new file mode 100644
index 00000000..3be33618
--- /dev/null
+++ b/roles/robertdebock.openvpn/molecule/default/molecule.yml
@@ -0,0 +1,27 @@
+---
+#
+# Ansible managed
+#
+dependency:
+ name: galaxy
+ options:
+ role-file: requirements.yml
+ requirements-file: requirements.yml
+lint: |
+ set -e
+ yamllint .
+ ansible-lint
+driver:
+ name: docker
+platforms:
+ - name: "openvpn-${image:-fedora}-${tag:-latest}${TOX_ENVNAME}"
+ image: "${namespace:-robertdebock}/${image:-fedora}:${tag:-latest}"
+ command: /sbin/init
+ volumes:
+ - /sys/fs/cgroup:/sys/fs/cgroup:ro
+ privileged: yes
+ pre_build_image: yes
+provisioner:
+ name: ansible
+verifier:
+ name: ansible
diff --git a/roles/robertdebock.openvpn/molecule/default/myvpn.tlsauth b/roles/robertdebock.openvpn/molecule/default/myvpn.tlsauth
new file mode 100644
index 00000000..43c2ae0b
--- /dev/null
+++ b/roles/robertdebock.openvpn/molecule/default/myvpn.tlsauth
@@ -0,0 +1,21 @@
+#
+# 2048 bit OpenVPN static key
+#
+-----BEGIN OpenVPN Static key V1-----
+0cb9611a6de0c09cf1eb0dce01d1c611
+d5edef055cf90b034b5d68650bb528e1
+47805c7775030a65bdaca605fb2363ea
+d1a8cdb2869a7b45edc98d2da999a9ca
+32dc0c43c50af70efc617c58f758811a
+9561be4632531ebbe843b85ec5bf9398
+c3aadb4d8472b563e09ac57208be6a2d
+e204cf78f088fbe51a9739f782602627
+f3b7c1db88fdd4c6352614dce97c396b
+a368ec81b0db515fe6b231fa4f02d62c
+310d21c366c929e2075c66958f116153
+b50d216ba13736cbcd0bdcda1f48cc58
+06ade232b5797753c5ec5178582cf57f
+84075eaa5de354c0cdf6fc03f2801d45
+de14d95905ecd652b4c4f4d8829a3124
+a6e44e782077324dba055848e26f40bc
+-----END OpenVPN Static key V1-----
diff --git a/roles/robertdebock.openvpn/molecule/default/prepare.yml b/roles/robertdebock.openvpn/molecule/default/prepare.yml
new file mode 100644
index 00000000..c8ee2042
--- /dev/null
+++ b/roles/robertdebock.openvpn/molecule/default/prepare.yml
@@ -0,0 +1,12 @@
+---
+- name: Prepare server
+ hosts: all
+ gather_facts: no
+ become: yes
+
+ roles:
+ - role: robertdebock.bootstrap
+ # - role: robertdebock.buildtools
+ - role: robertdebock.epel
+ # - role: robertdebock.python_pip
+ # - role: robertdebock.openssl
diff --git a/roles/robertdebock.openvpn/molecule/default/verify.yml b/roles/robertdebock.openvpn/molecule/default/verify.yml
new file mode 100644
index 00000000..37f1090a
--- /dev/null
+++ b/roles/robertdebock.openvpn/molecule/default/verify.yml
@@ -0,0 +1,9 @@
+---
+- name: Verify
+ hosts: all
+ become: yes
+ gather_facts: no
+
+ tasks:
+ - name: check if connection still works
+ ansible.builtin.ping:
diff --git a/roles/robertdebock.openvpn/requirements.txt b/roles/robertdebock.openvpn/requirements.txt
new file mode 100644
index 00000000..ba1d384f
--- /dev/null
+++ b/roles/robertdebock.openvpn/requirements.txt
@@ -0,0 +1,10 @@
+# These role have been tested with these PIP component.
+# To install the required version yourself, use a command as:
+# `python -m pip --user install -r requirements.txt`
+# See the pip requirements file documentation for details:
+# https://pip.pypa.io/en/stable/user_guide/#requirements-files
+#
+# Tests run on the previous and current (latest) version of Ansible.
+ansible>=2.10
+# Some Jinja2 filters are used that are available in the newer releases.
+jinja2>=2.11.2
diff --git a/roles/robertdebock.openvpn/requirements.yml b/roles/robertdebock.openvpn/requirements.yml
new file mode 100644
index 00000000..25dabda2
--- /dev/null
+++ b/roles/robertdebock.openvpn/requirements.yml
@@ -0,0 +1,5 @@
+---
+roles:
+ - name: robertdebock.bootstrap
+ - name: robertdebock.epel
+collections:
diff --git a/roles/robertdebock.openvpn/tasks/assert.yml b/roles/robertdebock.openvpn/tasks/assert.yml
new file mode 100644
index 00000000..8df17b6a
--- /dev/null
+++ b/roles/robertdebock.openvpn/tasks/assert.yml
@@ -0,0 +1,18 @@
+---
+
+- name: test if openvpn_role is set correctly
+ ansible.builtin.assert:
+ that:
+ - openvpn_role is defined
+ - openvpn_role is string
+ - openvpn_role in [ "client", "server" ]
+ quiet: yes
+
+- name: test if openvpn_client_server is set correctly
+ ansible.builtin.assert:
+ that:
+ - openvpn_client_server is defined
+ - openvpn_client_server is string
+ quiet: yes
+ when:
+ - openvpn_role == "client"
diff --git a/roles/robertdebock.openvpn/tasks/client.yml b/roles/robertdebock.openvpn/tasks/client.yml
new file mode 100644
index 00000000..fd008f8c
--- /dev/null
+++ b/roles/robertdebock.openvpn/tasks/client.yml
@@ -0,0 +1,19 @@
+---
+
+- name: ensure /etc/openvpn/client exists
+ ansible.builtin.file:
+ path: /etc/openvpn/client
+ state: directory
+ owner: root
+ group: "{{ openvpn_group }}"
+ mode: "0750"
+
+- name: place client.conf
+ ansible.builtin.template:
+ src: client.conf.j2
+ dest: "{{ openvpn_configuration_directory }}/client.conf"
+ owner: root
+ group: "{{ openvpn_group }}"
+ mode: "0640"
+ notify:
+ - restart openvpn
diff --git a/roles/robertdebock.openvpn/tasks/main.yml b/roles/robertdebock.openvpn/tasks/main.yml
new file mode 100644
index 00000000..83408c7b
--- /dev/null
+++ b/roles/robertdebock.openvpn/tasks/main.yml
@@ -0,0 +1,22 @@
+---
+# tasks file for openvpn
+
+- name: import assert.yml
+ ansible.builtin.import_tasks: assert.yml
+ run_once: True
+ delegate_to: localhost
+
+- name: install openvpn packages
+ ansible.builtin.package:
+ name: "{{ openvpn_packages }}"
+ state: present
+
+- name: setup openvpn server or client
+ ansible.builtin.include_tasks:
+ file: "{{ openvpn_role }}.yml"
+
+- name: start and enable openvpn
+ ansible.builtin.service:
+ name: "{{ openvpn_service }}"
+ state: started
+ enabled: yes
diff --git a/roles/robertdebock.openvpn/tasks/server.yml b/roles/robertdebock.openvpn/tasks/server.yml
new file mode 100644
index 00000000..86e2c72d
--- /dev/null
+++ b/roles/robertdebock.openvpn/tasks/server.yml
@@ -0,0 +1,86 @@
+---
+
+- name: ensure /etc/openvpn/easy-rsa exists
+ ansible.builtin.file:
+ path: /etc/openvpn/easy-rsa
+ state: directory
+ mode: "0755"
+
+- name: easyrsa init-pki
+ ansible.builtin.command:
+ cmd: "{{ openvpn_easyrsa_path }}/easyrsa init-pki"
+ chdir: /etc/openvpn/easy-rsa
+ creates: /etc/openvpn/easy-rsa/pki
+
+- name: easyrsa build-ca
+ ansible.builtin.command:
+ cmd: "{{ openvpn_easyrsa_path }}/easyrsa build-ca nopass"
+ chdir: /etc/openvpn/easy-rsa
+ creates: /etc/openvpn/easy-rsa/pki/ca.crt
+ environment:
+ EASYRSA_BATCH: "yes"
+
+- name: easyrsa gen-dh
+ ansible.builtin.command:
+ cmd: "{{ openvpn_easyrsa_path }}/easyrsa gen-dh"
+ chdir: /etc/openvpn/easy-rsa
+ creates: /etc/openvpn/easy-rsa/pki/dh.pem
+
+- name: easyrsa build-server-full server nopass
+ ansible.builtin.command:
+ cmd: "{{ openvpn_easyrsa_path }}/easyrsa build-server-full server nopass"
+ chdir: /etc/openvpn/easy-rsa
+ creates: /etc/openvpn/easy-rsa/pki/issued/server.crt
+
+- name: easyrsa build-client-full client nopass
+ ansible.builtin.command:
+ cmd: "{{ openvpn_easyrsa_path }}/easyrsa build-client-full client nopass"
+ chdir: /etc/openvpn/easy-rsa
+ creates: /etc/openvpn/easy-rsa/pki/issued/client.crt
+
+- name: easyrsa gen-crl
+ ansible.builtin.command:
+ cmd: "{{ openvpn_easyrsa_path }}/easyrsa gen-crl"
+ chdir: /etc/openvpn/easy-rsa
+ creates: /etc/openvpn/easy-rsa/pki/crl.pem
+
+- name: openvpn --genkey --secret /etc/openvpn/easy-rsa/pki/ta.key
+ ansible.builtin.command:
+ cmd: openvpn --genkey --secret /etc/openvpn/easy-rsa/pki/ta.key
+ creates: /etc/openvpn/easy-rsa/pki/ta.key
+
+- name: copy files to /etc/openvpn/server
+ ansible.builtin.copy:
+ src: /etc/openvpn/easy-rsa/pki/{{ item }}
+ dest: /etc/openvpn/server/{{ item | basename }}
+ mode: "0640"
+ remote_src: yes
+ loop:
+ - ca.crt
+ - dh.pem
+ - ta.key
+ - issued/client.crt
+ - issued/server.crt
+ - private/ca.key
+ - private/client.key
+ - private/server.key
+
+- name: copy files to /etc/openvpn
+ ansible.builtin.copy:
+ src: /etc/openvpn/easy-rsa/pki/{{ item }}
+ dest: /etc/openvpn/{{ item | basename }}
+ mode: "0640"
+ remote_src: yes
+ loop:
+ - ca.crt
+ - ta.key
+
+- name: place server.conf
+ ansible.builtin.template:
+ src: server.conf.j2
+ dest: "{{ openvpn_configuration_directory }}/server.conf"
+ owner: root
+ group: "{{ openvpn_group }}"
+ mode: "0640"
+ notify:
+ - restart openvpn
diff --git a/roles/robertdebock.openvpn/templates/client.conf.j2 b/roles/robertdebock.openvpn/templates/client.conf.j2
new file mode 100644
index 00000000..afcc1099
--- /dev/null
+++ b/roles/robertdebock.openvpn/templates/client.conf.j2
@@ -0,0 +1,23 @@
+{{ ansible_managed | comment }}
+
+client
+tls-client
+pull
+dev tun
+proto udp
+remote {{ openvpn_client_server }} 1194 udp
+resolv-retry infinite
+nobind
+dhcp-option DNS 8.8.8.8
+user nobody
+group {{ openvpn_group }}
+persist-key
+persist-tun
+key-direction 1
+tls-auth /etc/openvpn/client/ta.key 1
+comp-lzo
+verb 3
+ca /etc/openvpn/client/ca.crt
+cert /etc/openvpn/client/client.crt
+key /etc/openvpn/client/client.key
+auth SHA512
diff --git a/roles/robertdebock.openvpn/templates/server.conf.j2 b/roles/robertdebock.openvpn/templates/server.conf.j2
new file mode 100644
index 00000000..33cf2bf1
--- /dev/null
+++ b/roles/robertdebock.openvpn/templates/server.conf.j2
@@ -0,0 +1,26 @@
+{{ ansible_managed | comment }}
+
+port 1194
+proto udp
+dev tun
+ca /etc/openvpn/server/ca.crt
+cert /etc/openvpn/server/server.crt
+key /etc/openvpn/server/server.key
+dh /etc/openvpn/server/dh.pem
+topology subnet
+server 10.8.0.0 255.255.255.0
+ifconfig-pool-persist ipp.txt
+push "redirect-gateway def1 bypass-dhcp"
+push "dhcp-option DNS 1.1.1.1"
+push "dhcp-option DNS 1.0.0.1"
+keepalive 10 120
+# tls-crypt /etc/openvpn/server/myvpn.tlsauth 0
+cipher AES-256-CBC
+user nobody
+group {{ openvpn_group }}
+persist-key
+persist-tun
+status openvpn-status.log
+verb 3
+explicit-exit-notify 1
+remote-cert-eku "TLS Web Client Authentication"
diff --git a/roles/robertdebock.openvpn/tox.ini b/roles/robertdebock.openvpn/tox.ini
new file mode 100644
index 00000000..88f85456
--- /dev/null
+++ b/roles/robertdebock.openvpn/tox.ini
@@ -0,0 +1,24 @@
+#
+# Ansible managed
+#
+[tox]
+minversion = 3.21.4
+envlist = py{310}-ansible-{4,5}
+
+skipsdist = true
+
+[testenv]
+deps =
+ 4: ansible == 4.*
+ 5: ansible == 5.*
+ molecule[docker]
+ docker == 5.*
+ ansible-lint == 5.*
+commands = molecule test
+setenv =
+ TOX_ENVNAME={envname}
+ PY_COLORS=1
+ ANSIBLE_FORCE_COLOR=1
+ ANSIBLE_ROLES_PATH=../
+
+passenv = namespace image tag DOCKER_HOST
diff --git a/roles/robertdebock.openvpn/vars/main.yml b/roles/robertdebock.openvpn/vars/main.yml
new file mode 100644
index 00000000..bb6a34ad
--- /dev/null
+++ b/roles/robertdebock.openvpn/vars/main.yml
@@ -0,0 +1,51 @@
+---
+# vars file for openvpn
+
+# The packages differ for "server" and "client".
+_openvpn_packages:
+ server:
+ - openvpn
+ - easy-rsa
+ client:
+ - openvpn
+
+openvpn_packages: "{{ _openvpn_packages[openvpn_role] }}"
+
+_openvpn_easyrsa_path:
+ default: /usr/share/easy-rsa/3
+ Debian: /usr/share/easy-rsa
+
+openvpn_easyrsa_path: "{{ _openvpn_easyrsa_path[ansible_os_family] | default(_openvpn_easyrsa_path['default'] ) }}"
+
+_openvpn_group:
+ default: nobody
+ Debian: nogroup
+ RedHat: openvpn
+
+_openvpn_configuration_directory:
+ client:
+ default: /etc/openvpn/client
+ Debian: /etc/openvpn
+ RedHat-7: /etc/openvpn
+ server:
+ default: /etc/openvpn/server
+ Debian: /etc/openvpn
+ RedHat-7: /etc/openvpn
+
+openvpn_configuration_directory: "{{ _openvpn_configuration_directory[openvpn_role][ansible_os_family ~ '-' ~ ansible_distribution_major_version] | default(_openvpn_configuration_directory[openvpn_role][ansible_os_family] | default(_openvpn_configuration_directory[openvpn_role]['default'] )) }}"
+
+openvpn_group: "{{ _openvpn_group[ansible_os_family] | default(_openvpn_group['default'] ) }}"
+
+_openvpn_service:
+ server:
+ default: openvpn@server
+ RedHat-7: openvpn@server
+ RedHat: openvpn-server@server
+ Ubuntu: openvpn
+ client:
+ default: openvpn@client
+ RedHat-7: openvpn@client
+ RedHat: openvpn-client@client
+ Ubuntu: openvpn
+
+openvpn_service: "{{ _openvpn_service[openvpn_role][ansible_os_family ~ '-' ~ ansible_distribution_major_version] | default(_openvpn_service[openvpn_role][ansible_os_family] | default(_openvpn_service[openvpn_role]['default'] )) }}"
diff --git a/roles/scale_nodepool_and_wait_for_workers_hypershift/tasks/main.yaml b/roles/scale_nodepool_and_wait_for_workers_hypershift/tasks/main.yaml
new file mode 100644
index 00000000..fbca3d32
--- /dev/null
+++ b/roles/scale_nodepool_and_wait_for_workers_hypershift/tasks/main.yaml
@@ -0,0 +1,68 @@
+---
+
+- name: Wait for agents to join the cluster
+ k8s_info:
+ api_version: agent-install.openshift.io/v1beta1
+ kind: Agent
+ register: agents
+ until: agents.resources | length == {{ hypershift.agents_parms.agents_count }}
+ retries: 30
+ delay: 10
+
+- name: Get agent names
+ command: oc get agents -n {{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }} --no-headers
+ register: agents_info
+
+- name: Create List for agents
+ set_fact:
+ agents: []
+
+- name: Get a List of agents
+ set_fact:
+ agents: "{{ agents + [agents_info.stdout.split('\n')[item].split(' ')[0]] }}"
+ loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}"
+
+- name: Patch Agents
+ shell: oc -n {{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }} patch agent {{ agents[item] }} -p '{"spec":{"installation_disk_id":"/dev/vda","approved":true,"hostname":"worker-{{item}}.{{ hypershift.hcp.hosted_cluster_name }}.{{ hypershift.hcp.basedomain }}"}}' --type merge
+ loop: "{{ range(hypershift.agents_parms.agents_count|int) | list }}"
+
+- name: Scale Nodepool
+ command: oc -n {{ hypershift.hcp.clusters_namespace }} scale nodepool {{ hypershift.hcp.hosted_cluster_name }} --replicas {{ hypershift.agents_parms.agents_count }}
+
+- name: Wait for Agentmachines to create
+ k8s_info:
+ api_version: capi-provider.agent-install.openshift.io/v1alpha1
+ kind: AgentMachine
+ register: agent_machines
+ until: agent_machines.resources | length == {{ hypershift.agents_parms.agents_count }}
+ retries: 30
+ delay: 10
+
+- name: Wait for Machines to create
+ k8s_info:
+ api_version: cluster.x-k8s.io/v1beta1
+ kind: Machine
+ register: machines
+ until: machines.resources | length == {{ hypershift.agents_parms.agents_count }}
+ retries: 30
+ delay: 10
+
+- name: Create Kubeconfig for Hosted Cluster
+ shell: hypershift create kubeconfig --namespace {{ hypershift.hcp.clusters_namespace }} --name {{ hypershift.hcp.hosted_cluster_name }} > /root/ansible_workdir/{{ hypershift.hcp.hosted_cluster_name }}-kubeconfig
+
+- name: Wait for Worker Nodes to Join
+ k8s_info:
+ api_version: v1
+ kind: Node
+ kubeconfig: "/root/ansible_workdir/{{ hypershift.hcp.hosted_cluster_name }}-kubeconfig"
+ register: nodes
+ until: nodes.resources | length == {{ hypershift.agents_parms.agents_count }}
+ retries: 300
+ delay: 10
+
+- name: Wait for Worker nodes to be Ready
+ shell: oc get no --kubeconfig=/root/ansible_workdir/{{ hypershift.hcp.hosted_cluster_name }}-kubeconfig --no-headers | grep -i 'NotReady' | wc -l
+ register: node_status
+ until: node_status.stdout == '0'
+ retries: 50
+ delay: 15
diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml
new file mode 100644
index 00000000..b0b4d161
--- /dev/null
+++ b/roles/set_firewall/tasks/main.yaml
@@ -0,0 +1,55 @@
+---
+
+- name: start firewalld service
+ service:
+ name: firewalld
+ state: started
+ enabled: true
+
+- name: Add ports to firewall
+ tags: set_firewall
+ firewalld:
+ port: "{{ item }}"
+ permanent: yes
+ state: enabled
+ loop:
+ - 8080/tcp
+ - 80/tcp
+ - 443/tcp
+ - 4443/tcp
+ - 6443/tcp
+ - 22623/tcp
+ - 53/tcp
+ - 53/udp
+
+- name: Permit traffic in default zone for http and https
+ tags: set_firewall
+ ansible.posix.firewalld:
+ service: "{{ item }}"
+ permanent: yes
+ state: enabled
+ loop:
+ - http
+ - https
+
+- name: Ensure the default Apache port is 8080
+ tags: set_firewall
+ replace:
+ path: /etc/httpd/conf/httpd.conf
+ regexp: '^Listen 80$'
+ replace: 'Listen 8080'
+ backup: yes
+
+- name: Ensure the SSL default port is 4443
+ tags: set_firewall
+ replace:
+ path: /etc/httpd/conf.d/ssl.conf
+ regexp: '^Listen 443 https'
+ replace: 'Listen 4443 https'
+ backup: yes
+
+- name: reload firewalld to reflect changes
+ tags: set_firewall
+ systemd:
+ name: firewalld
+ state: reloaded
diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml
new file mode 100644
index 00000000..2c52b7d8
--- /dev/null
+++ b/roles/set_inventory/tasks/main.yaml
@@ -0,0 +1,50 @@
+---
+
+- name: Find inventory directory from ansible.cfg
+ tags: set_inventory
+ shell: cat {{ ansible_config_file }} | grep 'inventory=' | cut -f2 -d"="
+ register: find_inventory
+
+- name: Find absolute path to project.
+ tags: set_inventory
+ shell: |
+ ansible_config="{{ ansible_config_file }}"
+ echo "${ansible_config%/*}/"
+ register: find_project
+
+- name: Fail if network_mode is NAT and jumphost vars are undefined.
+ tags: set_inventory
+ fail:
+ msg: "Error jumphost vars undefined: when env.network_mode is NAT, you must set all env.jumphost variables."
+ when: ( env.network_mode | upper == 'NAT' ) and (env.jumphost.name is none or env.jumphost.ip is none or env.jumphost.user is none or env.jumphost.pass is none or env.jumphost.path_to_keypair is none)
+
+- name: Template out inventory with localhost, file server, KVM host, jumphost(optional) and bastion information
+ tags: set_inventory
+ template:
+ src: hosts.j2
+ dest: "{{ find_project.stdout }}{{ find_inventory.stdout }}/hosts"
+ force: yes
+
+- meta: refresh_inventory
+
+- name: Add path to Ansible private key in ansible.cfg
+ tags: set_inventory
+ lineinfile:
+ line: "private_key_file=~/.ssh/{{ env.ansible_key_name }}"
+ path: "{{ ansible_config_file }}"
+ regexp: "private_key_file"
+ state: present
+
+- name: check inventory setup
+ tags: set_inventory
+ command: ansible-inventory --list
+ register: inv_check
+ failed_when: inv_check.rc != 0
+
+- name: Gather facts to re-read inventory after changes made to inventory
+ tags: set_inventory
+ ansible.builtin.gather_facts:
+
+- name: Refresh inventory
+ tags: set_inventory
+ meta: refresh_inventory
diff --git a/roles/set_inventory/templates/hosts.j2 b/roles/set_inventory/templates/hosts.j2
new file mode 100644
index 00000000..7c4a1f9a
--- /dev/null
+++ b/roles/set_inventory/templates/hosts.j2
@@ -0,0 +1,18 @@
+[localhost]
+127.0.0.1 ansible_connection=local ansible_become_password={{ env.controller.sudo_pass }}
+
+[file_server]
+{{ env.file_server.ip }} ansible_user={{ env.file_server.user }} ansible_become_password={{ env.file_server.pass }}
+
+[kvm_host]
+{{ env.z.lpar1.hostname }} ansible_host={{ env.z.lpar1.ip }} ansible_user={{ env.z.lpar1.user }} ansible_become_password={{ env.z.lpar1.pass }}
+{{ (env.z.lpar2.hostname + ' ansible_host=' + env.z.lpar2.ip + ' ansible_user=' + env.z.lpar2.user + ' ansible_become_password=' + env.z.lpar2.pass ) if env.z.lpar2.hostname is defined else '' }}
+{{ (env.z.lpar3.hostname + ' ansible_host=' + env.z.lpar3.ip + ' ansible_user=' + env.z.lpar3.user + ' ansible_become_password=' + env.z.lpar3.pass ) if env.z.lpar3.hostname is defined else '' }}
+
+[bastion]
+{{ env.bastion.networking.hostname }} ansible_host={{ env.bastion.networking.ip }} ansible_user={{ env.bastion.access.user }} ansible_become_password={{ env.bastion.access.pass }}
+
+{% if ( env.network_mode | upper == 'NAT' ) and ( env.jumphost.name is not none ) and ( env.jumphost.ip is not none ) and ( env.jumphost.user is not none ) and ( env.jumphost.pass is not none ) -%}
+{{ '[jumphost]' }}
+{{ env.jumphost.name | string + ' ansible_host=' + env.jumphost.ip | string + ' ansible_user=' + env.jumphost.user | string + ' ansible_become_password=' + env.jumphost.pass | string }}
+{% endif -%}
diff --git a/roles/setup_for_agents_hypershift/tasks/main.yaml b/roles/setup_for_agents_hypershift/tasks/main.yaml
new file mode 100644
index 00000000..a2c65200
--- /dev/null
+++ b/roles/setup_for_agents_hypershift/tasks/main.yaml
@@ -0,0 +1,34 @@
+---
+
+- name: Download ipxe script
+ shell: curl -k -L $(oc -n {{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }} get InfraEnv {{ hypershift.hcp.hosted_cluster_name }} -ojsonpath="{.status.bootArtifacts.ipxeScript}")
+ register: ipxe_script
+
+- name: Create Installation directory
+ file:
+ path: /var/lib/libvirt/images/pxeboot
+ state: directory
+ mode: '0755'
+
+- name: Get URL for initrd
+ set_fact:
+ initrd_url: "{{ ipxe_script.stdout_lines[1].split(' ')[3] }}"
+
+- name: Download initrd.img
+ get_url:
+ url: "{{ initrd_url }}"
+ dest: /var/lib/libvirt/images/pxeboot/initrd.img
+ validate_certs: false
+
+- name: Get URL for kernel.img
+ set_fact:
+ kernel_url: "{{ ipxe_script.stdout_lines[2].split(' ')[1] }}"
+
+- name: Download kernel.img
+ get_url:
+ url: "{{ kernel_url }}"
+ dest: /var/lib/libvirt/images/pxeboot/kernel.img
+ validate_certs: false
+
+
+
diff --git a/roles/ssh_add_config/tasks/main.yaml b/roles/ssh_add_config/tasks/main.yaml
new file mode 100644
index 00000000..a68b0a6d
--- /dev/null
+++ b/roles/ssh_add_config/tasks/main.yaml
@@ -0,0 +1,19 @@
+---
+
+- name: Create ssh config file (or add to an exsting file) to if network mode is NAT
+ tags: ssh_copy_id, ssh
+ ansible.builtin.blockinfile:
+ path: ~/.ssh/config
+ backup: true
+ create: true
+ mode: '0644'
+ block: |
+ Host {{ env.jumphost.name }}
+ HostName {{ env.jumphost.ip }}
+ User {{ env.jumphost.user }}
+ IdentityFile {{ path_to_key_pair.split('.')[:-1] | join('.') }}
+ Host {{ env.bastion.networking.ip }}
+ HostName {{ env.bastion.networking.ip }}
+ User {{ env.bastion.access.user }}
+ IdentityFile {{ path_to_key_pair.split('.')[:-1] | join('.') }}
+ ProxyJump {{ env.jumphost.name }}
diff --git a/roles/ssh_agent/tasks/main.yaml b/roles/ssh_agent/tasks/main.yaml
new file mode 100644
index 00000000..bb17edb3
--- /dev/null
+++ b/roles/ssh_agent/tasks/main.yaml
@@ -0,0 +1,13 @@
+---
+
+- name: Add ansible SSH key to ssh-agent
+ tags: ssh_agent, ssh
+ lineinfile:
+ line: "eval $(ssh-agent) && ssh-add ~/.ssh/{{ env.ansible_key_name }}"
+ path: ~/.bash_profile
+ register: ssh_agent_setup
+
+- name: Print results from setting up SSH agent
+ tags: ssh_agent, ssh
+ debug:
+ var: ssh_agent_setup
diff --git a/roles/ssh_copy_id/files/.gitkeep b/roles/ssh_copy_id/files/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml
new file mode 100644
index 00000000..e2012ca8
--- /dev/null
+++ b/roles/ssh_copy_id/tasks/main.yaml
@@ -0,0 +1,78 @@
+---
+
+- name: Load in variables
+ tags: ssh_copy_id, ssh
+ include_vars: "{{ inventory_dir }}/group_vars/all.yaml"
+
+- name: Delete SSH key from known hosts if it already exists for idempotency
+ tags: ssh_copy_id, ssh
+ lineinfile:
+ path: "~/.ssh/known_hosts"
+ search_string: "{{ ssh_target[0] }}"
+ state: absent
+
+- name: Use template file to create expect script
+ tags: ssh_copy_id, ssh
+ template:
+ src: ssh-copy-id.exp.j2
+ dest: "{{ role_path }}/files/ssh-copy-id-expect-pass.exp"
+ force: yes
+ delegate_to: 127.0.0.1
+
+- name: Copy expect file to jumphost first, if not running on localhost.
+ tags: ssh_copy_id, ssh
+ copy:
+ src: "{{ role_path }}/files/ssh-copy-id-expect-pass.exp"
+ dest: "~/.ssh/ssh-copy-id-expect-pass.exp"
+ when: "inventory_hostname != '127.0.0.1'"
+
+- name: Print results of copying ssh id to remote host
+ tags: ssh_copy_id, ssh
+ debug:
+ var: ssh_copy
+ when: "inventory_hostname != '127.0.0.1'"
+
+- name: Copy SSH ID from controller to remote host with pre-provided password.
+ tags: ssh_copy_id, ssh
+ command: "expect {{ role_path }}/files/ssh-copy-id-expect-pass.exp"
+ register: ssh_copy
+ when: "inventory_hostname == '127.0.0.1'"
+
+- name: Print results of copying ssh id to remote host
+ tags: ssh_copy_id, ssh
+ debug:
+ var: ssh_copy
+ when: "inventory_hostname == '127.0.0.1'"
+
+- name: Copy SSH ID from jumphost to remote host with pre-provided password.
+ tags: ssh_copy_id, ssh
+ command: "expect ~/.ssh/ssh-copy-id-expect-pass.exp"
+ register: ssh_copy
+ when: "inventory_hostname != '127.0.0.1'"
+
+- name: Print results of copying ssh id to remote host
+ tags: ssh_copy_id, ssh
+ debug:
+ var: ssh_copy
+ when: "inventory_hostname != '127.0.0.1'"
+
+- name: Delete templated expect script on controller.
+ tags: ssh_copy_id, ssh
+ file:
+ path: "{{ role_path }}/files/ssh-copy-id-expect-pass.exp"
+ state: absent
+ delegate_to: 127.0.0.1
+
+- name: Delete templated expect script on jumphost.
+ tags: ssh_copy_id, ssh
+ file:
+ path: "{{ role_path }}/files/ssh-copy-id-expect-pass.exp"
+ state: absent
+ when: "inventory_hostname != '127.0.0.1'"
+
+- name: Ensure ssh-copy-id files folder exists for future runs.
+ tags: ssh_copy_id, ssh
+ file:
+ path: "{{ role_path }}/files/"
+ state: directory
+ delegate_to: 127.0.0.1
diff --git a/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 b/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2
new file mode 100644
index 00000000..a80dfff1
--- /dev/null
+++ b/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2
@@ -0,0 +1,20 @@
+#!/usr/local/bin/expect -f
+
+set force_conservative 0
+if {$force_conservative} {
+ set send_slow {1 .1}
+ proc send {ignore arg} {
+ sleep .1
+ exp_send -s -- $arg
+ }
+}
+
+set timeout 20
+spawn ssh-copy-id -f -o StrictHostKeyChecking=no -i {{ ssh_target[3] }} {{ ssh_target[1] }}@{{ ssh_target[0] }}
+expect {
+ "password: " {
+ send -- "{{ ssh_target[2] }}\r"
+ expect eof
+ }
+ "Number of key(s) added:" {}
+}
diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml
new file mode 100644
index 00000000..c1a115ce
--- /dev/null
+++ b/roles/ssh_key_gen/tasks/main.yaml
@@ -0,0 +1,37 @@
+---
+
+- name: Check to see if local SSH directory exists
+ tags: ssh_key_gen, ssh
+ stat:
+ path: "~/.ssh"
+ register: ssh_directory_exists_check
+
+- name: Create SSH local directory if it doesn't already exist
+ tags: ssh_key_gen, ssh
+ file:
+ path: "~/.ssh"
+ state: directory
+ mode: '700'
+ register: ssh_directory_creation
+ when: ssh_directory_exists_check.stat.exists == false
+
+- name: Generate an OpenSSH keypair with the default values (4096 bits, RSA)
+ tags: ssh_key_gen, ssh
+ community.crypto.openssh_keypair:
+ path: "~/.ssh/{{ env.ansible_key_name }}"
+ passphrase: ""
+ comment: "Ansible-OpenShift-Provisioning SSH key"
+ regenerate: full_idempotence
+ register: ssh_key_creation
+
+- name: Print results of ssh key pair creation
+ tags: ssh_key_gen, ssh
+ debug:
+ var: ssh_key_creation
+
+- name: Save path to key pair for use in ssh-copy-id role
+ tags: ssh_key_gen, ssh
+ lineinfile:
+ search_string: "path_to_key_pair:"
+ line: "path_to_key_pair: {{ ssh_key_creation.filename }}.pub"
+ path: "{{ inventory_dir }}/group_vars/all.yaml"
diff --git a/roles/ssh_ocp_key_gen/tasks/main.yaml b/roles/ssh_ocp_key_gen/tasks/main.yaml
new file mode 100644
index 00000000..0e97ae73
--- /dev/null
+++ b/roles/ssh_ocp_key_gen/tasks/main.yaml
@@ -0,0 +1,47 @@
+---
+
+- name: Check to see if local SSH directory exists
+ tags: ssh_ocp_key_gen
+ stat:
+ path: /root/.ssh
+ register: ssh_directory_exists_check
+
+- name: Create SSH local directory if it doesn't already exist
+ tags: ssh_ocp_key_gen
+ file:
+ path: /root/.ssh
+ state: directory
+ mode: "0700"
+ register: ssh_directory_creation
+ when: ssh_directory_exists_check.stat.exists == false
+
+- name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key.
+ tags: ssh_ocp_key_gen
+ community.crypto.openssh_keypair:
+ path: "/root/.ssh/id_rsa"
+ backend: opensshbin
+ owner: root
+ passphrase: ""
+ comment: "{{ env.ocp_ssh_key_comment }}"
+ regenerate: full_idempotence
+ register: ssh_ocp
+
+- name: Print results of SSH key generation
+ tags: ssh_ocp_key_gen
+ debug:
+ var: ssh_ocp.public_key
+ when: ssh_ocp.changed == true
+
+- name: Set SSH key permissions
+ tags: ssh_ocp_key_gen
+ command: chmod 600 /root/.ssh/{{ item }}
+ loop:
+ - id_rsa
+ - id_rsa.pub
+
+- name: Set SSH key ownership
+ tags: ssh_ocp_key_gen
+ command: chown root:root /root/.ssh/{{ item }}
+ loop:
+ - id_rsa
+ - id_rsa.pub
diff --git a/roles/update_cfgs/tasks/main.yaml b/roles/update_cfgs/tasks/main.yaml
new file mode 100644
index 00000000..359637f1
--- /dev/null
+++ b/roles/update_cfgs/tasks/main.yaml
@@ -0,0 +1,96 @@
+---
+
+### Setup
+
+- name: Create directory for KVM host's RHEL configuration files for installation.
+ tags: update_cfgs
+ file:
+ path: '{{ env.file_server.cfgs_dir }}/{{ networking.hostname }}'
+ state: directory
+
+- name: Clean-up old cfg files.
+ tags: update_cfgs
+ shell: rm -rf {{ env.file_server.cfgs_dir }}/{{ networking.hostname }}/kvm_host.*
+
+### Templating out RHEL configuration files for the KVM host to pull from the FTP server
+
+- name: Split iso_mount_dir variable on / for use in template
+ tags: update_cfgs
+ set_fact:
+ ins_dir: "{{ env.file_server.iso_mount_dir.split('/') }}"
+
+- name: Template RHEL configuration files out to FTP server.
+ tags: update_cfgs
+ template:
+ src: "{{ item }}.j2"
+ dest: "{{ env.file_server.cfgs_dir }}/{{ networking.hostname }}/{{ item }}"
+ loop:
+ - kvm_host.prm
+ - kvm_host.ins
+ - kvm_host.cfg
+
+### Updating additional parameters in RHEL configuration files that are more variable.
+
+- name: Add FCP storage boot drive triplet info to KVM host's RHEL prm configuration file.
+ tags: update_cfgs
+ lineinfile:
+ path: '{{ env.file_server.cfgs_dir }}/{{ networking.hostname }}/kvm_host.prm'
+ insertafter: 'inst.repo'
+ line: rd.zfcp=0.0.{{ lpar.storage_group_1.dev_num }},0x{{ lpar.storage_group_1.storage_wwpn[i] }},0x0000000000000000
+ with_sequence: start=0 end={{(lpar.storage_group_1.storage_wwpn | length) - 1}} stride=1
+ loop_control:
+ extended: yes
+ index_var: i
+
+- name: Create list from nic1 device number, incremented twice for input in KVM host prm file for boot
+ set_fact:
+ nic_child_list:
+ - "{{ '%04x' % ( lpar.networking.nic.card1.dev_num | int ) }}"
+ - "{{ '%04x' % ( lpar.networking.nic.card1.dev_num | int + 1 ) }}"
+ - "{{ '%04x' % ( lpar.networking.nic.card1.dev_num | int + 2 ) }}"
+
+- name: Add network device information to KVM host's RHEL prm file for boot
+ lineinfile:
+ path: "{{env.file_server.cfgs_dir}}/{{ networking.hostname }}/kvm_host.prm"
+ insertafter: "ro ramdisk_size"
+ line: "rd.znet=qeth,0.0.{{nic_child_list[0]}},0.0.{{nic_child_list[1]}},0.0.{{nic_child_list[2]}},layer2=1,portno={{ lpar.networking.nic.card1.port }}"
+
+- name: Create hash from KVM host root password to input in kickstart file
+ tags: update_cfgs
+ shell: echo "{{ lpar.access.root_pass }}" | openssl passwd -6 -in -
+ register: root_pass_hash
+
+- name: Add hashed root password to KVM host's RHEL kickstart config file
+ tags: update_cfgs
+ lineinfile:
+ path: "{{ env.file_server.cfgs_dir }}/{{ networking.hostname }}/kvm_host.cfg"
+ insertafter: "Root password"
+ line: "rootpw --iscrypted {{ root_pass_hash.stdout }}"
+
+- name: Create hash from KVM user password to input in kickstart file
+ tags: update_cfgs
+ shell: echo "{{ lpar.access.pass }}" | openssl passwd -6 -in -
+ register: user_pass_hash
+
+- name: Add hashed user password to KVM host's RHEL kickstart config file
+ tags: update_cfgs
+ lineinfile:
+ path: "{{ env.file_server.cfgs_dir }}/{{ networking.hostname }}/kvm_host.cfg"
+ insertafter: "Users and Groups Definitions"
+ line: "user --groups=wheel,kvm,libvirt --name={{ ansible_user }} --password={{ user_pass_hash.stdout }} --iscrypted"
+
+- name: Add network information in KVM hosts's RHEL kickstart file when there is only one network card defined.
+ tags: update_cfgs
+ lineinfile:
+ path: "{{ env.file_server.cfgs_dir }}/{{ networking.hostname }}/kvm_host.cfg"
+ insertafter: "Network information"
+ line: network --bootproto=static --device={{ networking.device1 }} --gateway={{ networking.gateway }} --ip={{ networking.ip }} --nameserver={{ networking.nameserver1 }} {{ ('--nameserver=' + networking.nameserver2) if networking.nameserver2 is defined else '' }} --netmask={{ networking.subnetmask }} --noipv6 --activate --hostname={{ networking.hostname }}
+ when: lpar.networking.nic.card2 is not defined
+
+- name: Add network information in KVM hosts's RHEL kickstart file when there are two network cards defined.
+ tags: update_cfgs
+ lineinfile:
+ path: "{{ env.file_server.cfgs_dir }}/{{ networking.hostname }}/kvm_host.cfg"
+ insertafter: "Network information"
+ line: "network --bootproto=static --device={{ networking.device1 }} --bondslaves={{ lpar.networking.nic.card1.name }},{{ lpar.networking.nic.card2.name }} --bondopts=mode=active-backup;primary={{ lpar.networking.nic.card1 }} --gateway={{ networking.gateway }} --ip={{ networking.ip }} --nameserver={{ networking.nameserver1 }} {{ ('--nameserver=' + networking.nameserver2) if networking.nameserver2 is defined else '' }} --netmask={{ networking.subnetmask }} --noipv6 --activate --hostname={{ networking.hostname }}"
+ when: lpar.networking.nic.card2.name is defined
diff --git a/roles/update_cfgs/templates/kvm_host.cfg.j2 b/roles/update_cfgs/templates/kvm_host.cfg.j2
new file mode 100644
index 00000000..903d7ba4
--- /dev/null
+++ b/roles/update_cfgs/templates/kvm_host.cfg.j2
@@ -0,0 +1,68 @@
+# Template for KVM host kickstart config file. Some parts come from the update_cfgs role.
+
+%pre --log=/root/pre.log
+dd if=/dev/zero of=/dev/mapper/mpatha bs=512 count=10
+dd if=/dev/zero of=/dev/mapper/mpathb bs=512 count=10
+dd if=/dev/zero of=/dev/mapper/mpathc bs=512 count=10
+dd if=/dev/zero of=/dev/mapper/mpathd bs=512 count=10
+dd if=/dev/zero of=/dev/mapper/mpathe bs=512 count=10
+dd if=/dev/zero of=/dev/mapper/mpathf bs=512 count=10
+dd if=/dev/zero of=/dev/mapper/mpathg bs=512 count=10
+dd if=/dev/zero of=/dev/mapper/mpathh bs=512 count=10
+%end
+
+# Reboot after installation
+reboot
+
+# Use network installation
+url --url=ftp://{{env.file_server.user}}:{{env.file_server.pass}}@{{env.file_server.ip}}/{{ env.file_server.iso_mount_dir }}
+
+# Use text mode install
+text
+
+# Run the Setup Agent on first boot
+firstboot --enable
+
+# Keyboard layouts
+keyboard --vckeymap=us --xlayouts='us'
+
+# System language
+lang {{ env.language }}
+
+# Network information (will fill in during update_cfgs role)
+
+# Firewall and SELinux
+firewall --enabled --http --ftp --smtp --ssh --port=443,9090,123
+selinux --enforcing
+
+# Root password (will fill in during update_cfgs role)
+
+# System timezone
+timezone {{ env.timezone }}
+
+#Users and Groups Definitions (will fill in during update_cfgs role)
+
+# The following is the partition information you requested
+ignoredisk --only-use=mpatha
+
+# System bootloader configuration
+bootloader --append="crashkernel=auto" --location=mbr --boot-drive=mpatha
+
+# Partition clearing information
+zerombr
+clearpart --all --initlabel --drives=mpatha
+
+# Disk partitioning information
+autopart --type=lvm --fstype=xfs --nohome
+#if modifying partitioning, double-check roles/configure_storage to make it will work smoothly
+
+# packages selection
+%packages --multilib --ignoremissing
+@^minimal
+%end
+
+%addon com_redhat_kdump --disable
+%end
+
+%post --log=/root/post.log
+%end
diff --git a/roles/update_cfgs/templates/kvm_host.ins.j2 b/roles/update_cfgs/templates/kvm_host.ins.j2
new file mode 100644
index 00000000..ea66d718
--- /dev/null
+++ b/roles/update_cfgs/templates/kvm_host.ins.j2
@@ -0,0 +1,4 @@
+../{{ ins_dir[-1] }}/images/kernel.img 0x00000000
+../{{ ins_dir[-1] }}/images/initrd.img 0x02000000
+kvm_host.prm 0x00010480
+../{{ ins_dir[-1] }}/images/initrd.addrsize 0x00010408
diff --git a/roles/update_cfgs/templates/kvm_host.prm.j2 b/roles/update_cfgs/templates/kvm_host.prm.j2
new file mode 100644
index 00000000..06a6f7e9
--- /dev/null
+++ b/roles/update_cfgs/templates/kvm_host.prm.j2
@@ -0,0 +1,6 @@
+ro ramdisk_size=40000
+ip={{ networking.ip }}::{{ networking.gateway }}:{{ lpar.networking.subnet_cidr }}:{{ networking.hostname}}:{{ networking.device1 }}:none
+nameserver={{ networking.nameserver1 }} {{ ( 'nameserver=' + networking.nameserver2 ) if networking.nameserver2 is defined else '' }}
+inst.repo=ftp://{{ env.file_server.user }}:{{ env.file_server.pass }}@{{ env.file_server.ip }}/{{ env.file_server.iso_mount_dir }}
+inst.ks=ftp://{{ env.file_server.user }}:{{ env.file_server.pass }}@{{ env.file_server.ip }}/{{ env.file_server.cfgs_dir }}/{{ networking.hostname }}/kvm_host.cfg
+inst.cmdline
diff --git a/roles/update_ignition_files/defaults/main.yml b/roles/update_ignition_files/defaults/main.yml
new file mode 100644
index 00000000..10e5b723
--- /dev/null
+++ b/roles/update_ignition_files/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# Assume we have only one bastion host defined
+#ignition_update_host: "{{ groups['bastion'][0] }}"
diff --git a/roles/update_ignition_files/tasks/main.yml b/roles/update_ignition_files/tasks/main.yml
new file mode 100644
index 00000000..27e9b6e4
--- /dev/null
+++ b/roles/update_ignition_files/tasks/main.yml
@@ -0,0 +1,19 @@
+---
+
+# Ignition file update
+- name: Update master and worker ignition files on bastion host
+ # Requires sudo access
+ become: true
+ block:
+ - name: Update master and worker ignition files on bastion
+ ansible.builtin.shell: |
+ echo "Print machineconfigs:"
+ oc get mc
+ echo "Update worker ignition file ..."
+ oc extract -n openshift-machine-api secret/worker-user-data --keys=userData --to=- > /var/www/html/ignition/worker.ign
+ echo "Update master ignition file ..."
+ oc extract -n openshift-machine-api secret/master-user-data --keys=userData --to=- > /var/www/html/ignition/master.ign
+ register: cmd_output
+ - name: Print above command output
+ ansible.builtin.debug:
+ var: cmd_output.stdout_lines
diff --git a/roles/wait_for_bootstrap/tasks/main.yaml b/roles/wait_for_bootstrap/tasks/main.yaml
new file mode 100644
index 00000000..1bdecde9
--- /dev/null
+++ b/roles/wait_for_bootstrap/tasks/main.yaml
@@ -0,0 +1,49 @@
+---
+
+- name: "Wait for first node-bootstrapper request. Takes < 10 min with good network connection (retry every 30s)...To watch progress, \
+SSH to root@bastion, SSH to core@bootstrap-ip and run 'journalctl -b -f -u release-image.service -u bootkube.service'"
+ tags: wait_for_bootstrap
+ ansible.builtin.shell: |
+ set -o pipefail
+ oc get csr | grep ":node-bootstrapper"
+ register: csr_check
+ until: (":node-bootstrapper" in csr_check.stdout)
+ retries: 60
+ delay: 30
+
+- name: Print first node-bootstrapper requests
+ tags: wait_for_bootstrap
+ ansible.builtin.debug:
+ var: csr_check.stdout_lines
+
+- name: Start openshift-installer with 'wait-for bootstrap-complete' (async task)
+ tags: wait_for_bootstrap
+ ansible.builtin.command: openshift-install wait-for bootstrap-complete --dir=/root/ocpinst
+ # Installer will wait up to ~50 min
+ async: 3060
+ poll: 0
+ register: watch_bootstrap
+
+- name: "Retry wait-for bootstrap-complete job ID check until it's finished. This may take some time... To watch progress, \
+SSH to bastion, switch to root, from there, SSH to core@bootstrap-ip and run 'journalctl -b -f -u release-image.service -u bootkube.service'"
+ tags: wait_for_bootstrap
+ ansible.builtin.async_status:
+ jid: "{{ watch_bootstrap.ansible_job_id }}"
+ register: bootstrapping
+ until: bootstrapping.finished
+ # Set wait time to 60 min, because it depends highly on system performance and network speed
+ retries: 120
+ delay: 30
+
+- name: Make sure kubeconfig works properly
+ tags: wait_for_bootstrap
+ ansible.builtin.command: oc whoami
+ register: oc_whoami
+ until: (oc_whoami.stdout == "system:admin")
+ retries: 30
+ delay: 10
+
+- name: Print output of oc whoami, should be "system:admin" if previous task worked
+ tags: wait_for_bootstrap
+ ansible.builtin.debug:
+ var: oc_whoami.stdout
diff --git a/roles/wait_for_cluster_operators/defaults/main.yml b/roles/wait_for_cluster_operators/defaults/main.yml
new file mode 100644
index 00000000..bedba01a
--- /dev/null
+++ b/roles/wait_for_cluster_operators/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+cluster_operators_ok: false
diff --git a/roles/wait_for_cluster_operators/tasks/check_co.yaml b/roles/wait_for_cluster_operators/tasks/check_co.yaml
new file mode 100644
index 00000000..17ae2540
--- /dev/null
+++ b/roles/wait_for_cluster_operators/tasks/check_co.yaml
@@ -0,0 +1,36 @@
+---
+- name: "{{ loop_count }} round of checking cluster operators"
+ tags: wait_for_cluster_operators
+ ansible.builtin.shell: |
+ set -o pipefail
+ # Get and print only cluster operators which are only in 'PROGRESSING' state
+ oc get co 2> /dev/null | grep ' True' || true
+ register: oc_get_co
+ when: not cluster_operators_ok
+
+- name: Print cluster operators which are only in 'PROGRESSING' state
+ tags: wait_for_cluster_operators
+ ansible.builtin.debug:
+ var: oc_get_co.stdout_lines
+ when: not cluster_operators_ok
+
+- name: "{{ loop_count }} round of waiting for cluster operators. Trying 10 times before printing status again"
+ tags: wait_for_cluster_operators
+ ansible.builtin.shell: |
+ set -o pipefail
+ # Check for 'PROGRESSING' state
+ oc get co 2> /dev/null | awk '{print $4}'
+ register: co_check
+ # Check for "True" and "False", in case output was empty for any reason
+ until: ("True" not in co_check.stdout) and ("False" in co_check.stdout)
+ retries: 10
+ delay: 30
+ ignore_errors: true
+ when: not cluster_operators_ok
+
+- name: Update local variable, if required
+ tags: wait_for_cluster_operators
+ ansible.builtin.set_fact:
+ cluster_operators_ok: true
+ # Check for "True" and "False", in case output was empty for any reason
+ when: not cluster_operators_ok and ("True" not in co_check.stdout) and ("False" in co_check.stdout)
diff --git a/roles/wait_for_cluster_operators/tasks/main.yaml b/roles/wait_for_cluster_operators/tasks/main.yaml
new file mode 100644
index 00000000..6ec601f4
--- /dev/null
+++ b/roles/wait_for_cluster_operators/tasks/main.yaml
@@ -0,0 +1,25 @@
+---
+
+- name: Wait for cluster operators
+ tags: wait_for_cluster_operators
+ ansible.builtin.include_tasks: check_co.yaml
+ with_items:
+ - "First"
+ - "Second"
+ - "Third"
+ - "Fourth"
+ - "Fifth and last"
+ loop_control:
+ loop_var: loop_count
+
+- name: Get and print final cluster operator status
+ tags: wait_for_cluster_operators
+ block:
+ - name: Get final cluster operators
+ ansible.builtin.command: oc get co
+ register: oc_get_co
+ changed_when: false
+
+ - name: Print final cluster operators
+ ansible.builtin.debug:
+ var: oc_get_co.stdout_lines
diff --git a/roles/wait_for_hc_to_complete_hypershift/tasks/main.yaml b/roles/wait_for_hc_to_complete_hypershift/tasks/main.yaml
new file mode 100644
index 00000000..8f51cb17
--- /dev/null
+++ b/roles/wait_for_hc_to_complete_hypershift/tasks/main.yaml
@@ -0,0 +1,39 @@
+---
+
+- name: Wait for All Cluster Operators to be available
+ shell: oc get co --kubeconfig=/root/ansible_workdir/{{ hypershift.hcp.hosted_cluster_name }}-kubeconfig --no-headers| awk '$3 != "True" {print $1}' | wc -l
+ register: co
+ until: co.stdout == '0'
+ retries: 60
+ delay: 20
+
+- name: Wait for Hosted Control Plane to Complete
+ shell: oc get hc -n {{ hypershift.hcp.clusters_namespace }} --no-headers | awk '{print $4}'
+ register: hc_status
+ until: hc_status.stdout == "Completed"
+ retries: 40
+ delay: 15
+
+- name: Get URL for Webconsole of Hosted Cluster
+ shell: oc whoami --show-console --kubeconfig=/root/ansible_workdir/{{ hypershift.hcp.hosted_cluster_name }}-kubeconfig
+ register: console_url
+
+- name: Get Password for Hosted Cluster
+ shell: oc get secret kubeadmin-password -n "{{ hypershift.hcp.clusters_namespace }}-{{ hypershift.hcp.hosted_cluster_name }}" -o yaml | grep -i 'password:'
+ register: cluster_password_encoded
+
+- name: Decode the Password
+ shell: echo "{{cluster_password_encoded.stdout_lines[0].split(' ')[-1]}}" | base64 --decode
+ register: cluster_password_decoded
+
+- name: Get api server of Hosted Cluster
+ shell: "cat /root/ansible_workdir/{{ hypershift.hcp.hosted_cluster_name }}-kubeconfig | grep -i server:"
+ register: api_server
+
+- name: Display Login Credentials
+ debug:
+ msg: " You can access webconsole of Hosted Cluster here : {{ console_url.stdout }} | Username : 'kubeadmin' Password : {{ cluster_password_decoded.stdout_lines[0] }} "
+
+- name: Display oc login command for CLI
+ debug:
+ msg: " You can access the Hosted Cluster using CLI : oc login {{ api_server.stdout_lines[0].split(': ')[-1] }} -u kubeadmin -p {{ cluster_password_decoded.stdout_lines[0] }} "
diff --git a/roles/wait_for_install_complete/tasks/main.yaml b/roles/wait_for_install_complete/tasks/main.yaml
new file mode 100644
index 00000000..fe59ca68
--- /dev/null
+++ b/roles/wait_for_install_complete/tasks/main.yaml
@@ -0,0 +1,41 @@
+---
+- name: Almost there! Add host info to /etc/hosts so you can login to the cluster via web browser. Ansible Controller sudo password required
+ tags: wait_for_install_complete
+ become: true
+ blockinfile:
+ create: true
+ backup: true
+ marker: "# {mark} ANSIBLE MANAGED BLOCK FOR OCP CLUSTER: {{ env.cluster.networking.metadata_name }}"
+ path: /etc/hosts
+ block: |
+ {{ env.bastion.networking.ip }} oauth-openshift.apps.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}
+ {{ env.bastion.networking.ip }} console-openshift-console.apps.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}
+ {{ env.bastion.networking.ip }} api.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}
+ delegate_to: 127.0.0.1
+
+- name: Get OCP URL
+ tags: wait_for_install_complete
+ set_fact:
+ ocp_url: https://console-openshift-console.apps.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}
+
+- name: Get OCP temporary password
+ tags: wait_for_install_complete
+ command: "cat /root/ocpinst/auth/kubeadmin-password"
+ register: ocp_passwd
+ changed_when: false
+
+- name: "Additional step, if using NAT"
+ tags: wait_for_install_complete
+ debug:
+ msg: "NAT USERS ONLY: Create SSH tunnel to cluster, i.e run command in terminal window from controller: 'sshuttle -r {{ env.bastion.access.user }}@{{ env.bastion.networking.ip }} 192.168.122.0/15 --dns'"
+ when: ( env.network_mode | upper == "NAT" )
+ changed_when: false
+
+- name: Congratulations! OpenShift installation complete. Use the information below for first-time login via web browser.
+ tags: wait_for_install_complete
+ command: "echo {{ item }}"
+ loop:
+ - " URL: {{ ocp_url }} "
+ - " Username: kubeadmin "
+ - " Password: {{ ocp_passwd.stdout }} "
+ changed_when: false
diff --git a/roles/wait_for_node/tasks/main.yaml b/roles/wait_for_node/tasks/main.yaml
new file mode 100644
index 00000000..6583f72a
--- /dev/null
+++ b/roles/wait_for_node/tasks/main.yaml
@@ -0,0 +1,16 @@
+---
+
+- name: Get and print nodes status
+ ansible.builtin.include_tasks: "{{ role_path }}/../common/tasks/print_ocp_node_status.yaml"
+
+- name: "Make sure node is 'Ready' (retry every 20s, wait for '{{ wait_for_node_name }}')"
+ ansible.builtin.shell: |
+ set -o pipefail
+ oc get nodes --no-headers=true | grep "^{{ wait_for_node_name | lower }}" | awk '{print $2}'
+ register: cmd_output
+ until: ("Ready" == cmd_output.stdout)
+ retries: 30
+ delay: 20
+
+- name: Get and print nodes status
+ ansible.builtin.include_tasks: "{{ role_path }}/../common/tasks/print_ocp_node_status.yaml"
diff --git a/roles/wait_for_node/vars/main.yaml b/roles/wait_for_node/vars/main.yaml
new file mode 100644
index 00000000..193af81e
--- /dev/null
+++ b/roles/wait_for_node/vars/main.yaml
@@ -0,0 +1,2 @@
+---
+wait_for_node_name:
diff --git a/roles/worker_nodes/.DS_Store b/roles/worker_nodes/.DS_Store
deleted file mode 100644
index 1fbd6892..00000000
Binary files a/roles/worker_nodes/.DS_Store and /dev/null differ
diff --git a/setup-mgmt-user.yml b/setup-mgmt-user.yml
deleted file mode 100644
index 54dcba13..00000000
--- a/setup-mgmt-user.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-
-
-- hosts: all
- become: true
- tasks:
-
- - name: create zcts user
- tags: always
- user:
- name: zcts
- groups: root
-
- - name: add ssh key for zcts user
- tags: always
- authorized_key:
- user: zcts
- key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKTf6OEBNCzusceF3/dTWK9rIACxOw009HMkH//AuE8h zcts default"
-
- - name: add sudoers file for zcts user
- tags: always
- copy:
- src: sudoers_zcts
- dest: /etc/sudoers.d/zcts
- owner: root
- group: root
- mode: 0440
-
diff --git a/test02-joe.yml b/test02-joe.yml
deleted file mode 100644
index 8b689fd1..00000000
--- a/test02-joe.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-
-- hosts: kvm_hosts
- become: true
- tasks:
-
- - name: Ensure pre-requisite packages are installed
- yum:
- names:
- - libvirt
- - libvirt-devel
- - libvirt-daemon-kvm
- - qemu-kvm
- - virt-manager
- - libvirt-daemon-config-network
- - libvirt-client
- - qemu-img
-
- - name: Ensure libvirtd is started
- ansible.builtin.shell:
- - systemctl enable --now libvirtd
- - systemctl status libvirtd.service
- - systemctl status libvirtd