From 3a9f6245ca614a78892bf9962a87eb65c381ab08 Mon Sep 17 00:00:00 2001 From: Philip Laine Date: Wed, 8 Jun 2022 10:42:06 +0200 Subject: [PATCH] Rename all occurences of XKS to XKF --- Makefile | 4 +- .../developer-guide/developer-flow.drawio | 0 .../developer-guide/ingress-overview.drawio | 0 .../developer-guide/linkerd-overview.drawio | 0 .../network-policy-default-deny.drawio | 0 .../pod-resource-request.drawio | 0 .../developer-guide/pod-scheduling.drawio | 0 .../~$pod-resource-request.drawio.bkp | 0 .../operator-guide/aks-overview.drawio | 0 .../{xks => xkf}/operator-guide/aks-rg.drawio | 0 .../simple-network-design.drawio | 0 docs/{xks => xkf}/architecture-and-design.md | 12 ++--- .../developer-guide/best-practices.md | 0 docs/{xks => xkf}/developer-guide/ci-cd/cd.md | 4 +- docs/{xks => xkf}/developer-guide/ci-cd/ci.md | 0 .../developer-guide/ci-cd/flux.md | 0 .../developer-guide/ci-cd/gitops.md | 14 ++--- .../{xks => xkf}/developer-guide/cloud-iam.md | 8 +-- .../developer-guide/container-security.md | 0 .../developer-guide/introduction.md | 0 .../developer-guide/networking.md | 26 +++++----- .../developer-guide/observability.md | 0 docs/{xks => xkf}/developer-guide/report.md | 0 .../developer-guide/scheduling-scaling.md | 14 ++--- .../developer-guide/secrets-management.md | 6 +-- docs/{xks => xkf}/developer-guide/wsl2.md | 6 +-- docs/{xks => xkf}/index.md | 8 +-- docs/{xks => xkf}/operator-guide/agents.md | 2 +- docs/{xks => xkf}/operator-guide/aws-azdo.md | 0 .../operator-guide/blast-radius.md | 2 +- .../{xks => xkf}/operator-guide/blue-green.md | 0 .../operator-guide/getting-started.md | 24 ++++----- docs/{xks => xkf}/operator-guide/github.md | 0 docs/{xks => xkf}/operator-guide/index.md | 10 ++-- .../operator-guide/kubernetes/aks.md | 2 +- .../operator-guide/kubernetes/eks.md | 0 .../{xks => xkf}/operator-guide/networking.md | 0 sidebars.js | 52 +++++++++---------- 38 files changed, 97 insertions(+), 97 deletions(-) rename assets/{xks => xkf}/developer-guide/developer-flow.drawio (100%) rename assets/{xks => xkf}/developer-guide/ingress-overview.drawio (100%) rename assets/{xks => xkf}/developer-guide/linkerd-overview.drawio (100%) rename assets/{xks => xkf}/developer-guide/network-policy-default-deny.drawio (100%) rename assets/{xks => xkf}/developer-guide/pod-resource-request.drawio (100%) rename assets/{xks => xkf}/developer-guide/pod-scheduling.drawio (100%) rename assets/{xks => xkf}/developer-guide/~$pod-resource-request.drawio.bkp (100%) rename assets/{xks => xkf}/operator-guide/aks-overview.drawio (100%) rename assets/{xks => xkf}/operator-guide/aks-rg.drawio (100%) rename assets/{xks => xkf}/operator-guide/simple-network-design.drawio (100%) rename docs/{xks => xkf}/architecture-and-design.md (96%) rename docs/{xks => xkf}/developer-guide/best-practices.md (100%) rename docs/{xks => xkf}/developer-guide/ci-cd/cd.md (95%) rename docs/{xks => xkf}/developer-guide/ci-cd/ci.md (100%) rename docs/{xks => xkf}/developer-guide/ci-cd/flux.md (100%) rename docs/{xks => xkf}/developer-guide/ci-cd/gitops.md (93%) rename docs/{xks => xkf}/developer-guide/cloud-iam.md (97%) rename docs/{xks => xkf}/developer-guide/container-security.md (100%) rename docs/{xks => xkf}/developer-guide/introduction.md (100%) rename docs/{xks => xkf}/developer-guide/networking.md (95%) rename docs/{xks => xkf}/developer-guide/observability.md (100%) rename docs/{xks => xkf}/developer-guide/report.md (100%) rename docs/{xks => xkf}/developer-guide/scheduling-scaling.md (96%) rename docs/{xks => xkf}/developer-guide/secrets-management.md (98%) rename docs/{xks => xkf}/developer-guide/wsl2.md (97%) rename docs/{xks => xkf}/index.md (76%) rename docs/{xks => xkf}/operator-guide/agents.md (99%) rename docs/{xks => xkf}/operator-guide/aws-azdo.md (100%) rename docs/{xks => xkf}/operator-guide/blast-radius.md (97%) rename docs/{xks => xkf}/operator-guide/blue-green.md (100%) rename docs/{xks => xkf}/operator-guide/getting-started.md (96%) rename docs/{xks => xkf}/operator-guide/github.md (100%) rename docs/{xks => xkf}/operator-guide/index.md (86%) rename docs/{xks => xkf}/operator-guide/kubernetes/aks.md (99%) rename docs/{xks => xkf}/operator-guide/kubernetes/eks.md (100%) rename docs/{xks => xkf}/operator-guide/networking.md (100%) diff --git a/Makefile b/Makefile index 48722e5f546..bdb2ab90ff6 100644 --- a/Makefile +++ b/Makefile @@ -6,8 +6,8 @@ all: install-site build .PHONY: assets assets: - docker run --rm -it -v $(PWD):/data rlespinasse/drawio-export:4.1.0 -s 3 -b 10 -f jpg --remove-page-suffix -o /data/static/img/assets/xks/operator-guide/ /data/assets/xks/operator-guide/ - docker run --rm -it -v $(PWD):/data rlespinasse/drawio-export:4.1.0 -s 3 -b 10 -f jpg --remove-page-suffix -o /data/static/img/assets/xks/developer-guide/ /data/assets/xks/developer-guide/ + docker run --rm -it -v $(PWD):/data rlespinasse/drawio-export:4.1.0 -s 3 -b 10 -f jpg --remove-page-suffix -o /data/static/img/assets/xkf/operator-guide/ /data/assets/xkf/operator-guide/ + docker run --rm -it -v $(PWD):/data rlespinasse/drawio-export:4.1.0 -s 3 -b 10 -f jpg --remove-page-suffix -o /data/static/img/assets/xkf/developer-guide/ /data/assets/xkf/developer-guide/ .SILENT: serve: all diff --git a/assets/xks/developer-guide/developer-flow.drawio b/assets/xkf/developer-guide/developer-flow.drawio similarity index 100% rename from assets/xks/developer-guide/developer-flow.drawio rename to assets/xkf/developer-guide/developer-flow.drawio diff --git a/assets/xks/developer-guide/ingress-overview.drawio b/assets/xkf/developer-guide/ingress-overview.drawio similarity index 100% rename from assets/xks/developer-guide/ingress-overview.drawio rename to assets/xkf/developer-guide/ingress-overview.drawio diff --git a/assets/xks/developer-guide/linkerd-overview.drawio b/assets/xkf/developer-guide/linkerd-overview.drawio similarity index 100% rename from assets/xks/developer-guide/linkerd-overview.drawio rename to assets/xkf/developer-guide/linkerd-overview.drawio diff --git a/assets/xks/developer-guide/network-policy-default-deny.drawio b/assets/xkf/developer-guide/network-policy-default-deny.drawio similarity index 100% rename from assets/xks/developer-guide/network-policy-default-deny.drawio rename to assets/xkf/developer-guide/network-policy-default-deny.drawio diff --git a/assets/xks/developer-guide/pod-resource-request.drawio b/assets/xkf/developer-guide/pod-resource-request.drawio similarity index 100% rename from assets/xks/developer-guide/pod-resource-request.drawio rename to assets/xkf/developer-guide/pod-resource-request.drawio diff --git a/assets/xks/developer-guide/pod-scheduling.drawio b/assets/xkf/developer-guide/pod-scheduling.drawio similarity index 100% rename from assets/xks/developer-guide/pod-scheduling.drawio rename to assets/xkf/developer-guide/pod-scheduling.drawio diff --git a/assets/xks/developer-guide/~$pod-resource-request.drawio.bkp b/assets/xkf/developer-guide/~$pod-resource-request.drawio.bkp similarity index 100% rename from assets/xks/developer-guide/~$pod-resource-request.drawio.bkp rename to assets/xkf/developer-guide/~$pod-resource-request.drawio.bkp diff --git a/assets/xks/operator-guide/aks-overview.drawio b/assets/xkf/operator-guide/aks-overview.drawio similarity index 100% rename from assets/xks/operator-guide/aks-overview.drawio rename to assets/xkf/operator-guide/aks-overview.drawio diff --git a/assets/xks/operator-guide/aks-rg.drawio b/assets/xkf/operator-guide/aks-rg.drawio similarity index 100% rename from assets/xks/operator-guide/aks-rg.drawio rename to assets/xkf/operator-guide/aks-rg.drawio diff --git a/assets/xks/operator-guide/simple-network-design.drawio b/assets/xkf/operator-guide/simple-network-design.drawio similarity index 100% rename from assets/xks/operator-guide/simple-network-design.drawio rename to assets/xkf/operator-guide/simple-network-design.drawio diff --git a/docs/xks/architecture-and-design.md b/docs/xkf/architecture-and-design.md similarity index 96% rename from docs/xks/architecture-and-design.md rename to docs/xkf/architecture-and-design.md index 0590eba55a6..08331afcf5d 100644 --- a/docs/xks/architecture-and-design.md +++ b/docs/xkf/architecture-and-design.md @@ -11,12 +11,12 @@ import useBaseUrl from '@docusaurus/useBaseUrl'; In the terminology of [Microsoft Cloud Adoption Framework](https://docs.microsoft.com/en-us/azure/cloud-adoption-framework/ready/enterprise-scale/architecture) (CAF), Xenit Kubernetes Service is an enterprise-scale landing zone. Additionally, the workload supports multiple cloud providers and AWS is also supported at the moment (but still requires the governance part in Azure). -XKS Overview +XKF Overview ### Glossary - Platform team: the team managing the platform (XKF) -- Tenant: A group of people (team/project/product) at the company using XKS +- Tenant: A group of people (team/project/product) at the company using XKF ## Role-based access management @@ -76,7 +76,7 @@ Other than that, most of the access and work with the tenant resources are done By default, the network setup is expected to be quite autonomous and usually considered to be an external service compared to everything else in the organization using it. It is possible to setup peering with internal networks, but usually it begins with a much simpler setup and then grows organically when required. -XKS Simple Network Design +XKF Simple Network Design The cluster environments are completely separated from each other, but a hub in the production subscription has a peering with them to provide static IP-addresses for CI/CD like terraform to access resources. @@ -104,10 +104,10 @@ Most of the management of the workloads that the tenants deploy are handled thro ## Xenit Kubernetes Framework -XKF is set up from a set of Terraform modules that when combined creates the full XKS service. There are multiple individual states that all fulfill their own purpose and build +XKF is set up from a set of Terraform modules that when combined creates the full XKF service. There are multiple individual states that all fulfill their own purpose and build upon each other in a hierarchical manner. The first setup requires applying the Terraform in the correct order, but after that ordering should not matter. Separate states are used as it allows for a more flexible architecture that could be changed in parallel. -XKS Overview +XKF Overview The AKS Terraform contains three modules that are used to setup a Kubernetes cluster. To allow for blue/green deployments of AKS clusters resources have to be split up into global resources that can be shared between the clusters, and cluster-specific resources. @@ -117,4 +117,4 @@ The aks-global module contains the global resources like ACR, DNS and Azure AD c The aks and aks-core module creates an AKS cluster and configures it. This cluster will have a suffix, normally a number to allow for temporarily creating multiple clusters when performing a blue/green deployment of the clusters. Namespaces will be created in the cluster for each of the configured tenants. Each namespace is linked to a resource group in Azure where namespace resources are expected to be created. -AKS Resource Groups +AKS Resource Groups diff --git a/docs/xks/developer-guide/best-practices.md b/docs/xkf/developer-guide/best-practices.md similarity index 100% rename from docs/xks/developer-guide/best-practices.md rename to docs/xkf/developer-guide/best-practices.md diff --git a/docs/xks/developer-guide/ci-cd/cd.md b/docs/xkf/developer-guide/ci-cd/cd.md similarity index 95% rename from docs/xks/developer-guide/ci-cd/cd.md rename to docs/xkf/developer-guide/ci-cd/cd.md index 82f27d23e73..3a76505dd2a 100644 --- a/docs/xks/developer-guide/ci-cd/cd.md +++ b/docs/xkf/developer-guide/ci-cd/cd.md @@ -5,7 +5,7 @@ title: Continuous Delivery import useBaseUrl from '@docusaurus/useBaseUrl'; -Continuous Delivery (CD) should be the only way to make changes to running applications in the XKS service. +Continuous Delivery (CD) should be the only way to make changes to running applications in the XKF service. This is to ensure that changes are consistent and tracked in a centralized manner that can be observed by all. ## GitOps @@ -27,7 +27,7 @@ The core feature of the gitops repo is that one of the pipelines automatically u We have to grant it permissions to do this, sadly manually... -CI access +CI access ### Service connections diff --git a/docs/xks/developer-guide/ci-cd/ci.md b/docs/xkf/developer-guide/ci-cd/ci.md similarity index 100% rename from docs/xks/developer-guide/ci-cd/ci.md rename to docs/xkf/developer-guide/ci-cd/ci.md diff --git a/docs/xks/developer-guide/ci-cd/flux.md b/docs/xkf/developer-guide/ci-cd/flux.md similarity index 100% rename from docs/xks/developer-guide/ci-cd/flux.md rename to docs/xkf/developer-guide/ci-cd/flux.md diff --git a/docs/xks/developer-guide/ci-cd/gitops.md b/docs/xkf/developer-guide/ci-cd/gitops.md similarity index 93% rename from docs/xks/developer-guide/ci-cd/gitops.md rename to docs/xkf/developer-guide/ci-cd/gitops.md index bcfc79711e7..fbc65600914 100644 --- a/docs/xks/developer-guide/ci-cd/gitops.md +++ b/docs/xkf/developer-guide/ci-cd/gitops.md @@ -1,6 +1,6 @@ --- id: gitops -title: GitOps a la XKS +title: GitOps a la XKF --- import useBaseUrl from '@docusaurus/useBaseUrl'; @@ -9,9 +9,9 @@ import useBaseUrl from '@docusaurus/useBaseUrl'; > GitOps works by using Git as a single source of truth for declarative infrastructure and applications. With GitOps, the use of software agents can alert on any divergence between Git and what is running in [an environment]. If there is a difference, Kubernetes reconcilers automatically update or rollback the cluster depending on what is appropriate. ‐ _[Weave Works - Guide To GitOps](https://www.weave.works/technologies/gitops/)_ -XKS supports GitHub and Azure DevOps with almost identical workflows. XKF refers to these as Git providers. For simplicity, we refer to their CI/CD automation as "pipelines". If you are using GitHub, whenever this text refers to "pipeline", think "GitHub Actions workflow". As you saw in the previous section, XKS comes with a set of pipelines that automatically detects app releases and promotes them through a series of environments. The allows both rapid iteration and strong validation of apps. +XKF supports GitHub and Azure DevOps with almost identical workflows. XKF refers to these as Git providers. For simplicity, we refer to their CI/CD automation as "pipelines". If you are using GitHub, whenever this text refers to "pipeline", think "GitHub Actions workflow". As you saw in the previous section, XKF comes with a set of pipelines that automatically detects app releases and promotes them through a series of environments. The allows both rapid iteration and strong validation of apps. -XKS is built around [trunk-based development](https://trunkbaseddevelopment.com/). +XKF is built around [trunk-based development](https://trunkbaseddevelopment.com/). ## User story: Emilia updates an app @@ -36,7 +36,7 @@ The `dev` and `qa` environments have `auto: true` which means that new releases The flow is fully automatic and is triggered by the container image upload. -Apply to dev +Apply to dev 1. The / container image upload triggers a pipeline in the GitOps repository that runs the / [gitops-promotion new](https://github.com/XenitAB/gitops-promotion#gitops-promotion-new) command. It pushes a new branch and updates the `dev` environment manifest for the app with the new tag. It then opens an "auto-merging" pull request to integrate the new tag into the main branch. 1. The pull request triggers another pipeline that runs / [gitops-promotion status](https://github.com/XenitAB/gitops-promotion#gitops-promotion-new) command. Since `dev` is the first environment in the list, it does nothing and reports success. @@ -46,7 +46,7 @@ The flow is fully automatic and is triggered by the container image upload. ### Applying to qa -Apply to qa +Apply to qa 1. Merging a promotion to the main branch triggers a pipeline in the GitOps repository that runs the [gitops-promotion promote](https://github.com/XenitAB/gitops-promotion#gitops-promotion-promote) command. Like `new`, it creates a branch and updates the `qa` environment manifest for the app with the new tag. Because the configuration for this environment says `auto: true` it creates an auto-merging pull request. 1. As before, this new pull request triggers another pipeline that runs the `status` command. This time there is a previous environment and the status command reads the Flux commit status for that environment. Since Flux managed to apply the change in `dev` the `status` command reports success. @@ -58,7 +58,7 @@ Emilia's team has configured Flux to notify them when updates fail and so Emilia ### Application to prod is blocked -Apply to prod +Apply to prod The workflow for applying to `prod` is similar to that of `qa` above, but since Flux reported failure when applying the update to `qa`, the pipeline running the `status` command will fail and the Git provider will block merging of the pull request. @@ -66,6 +66,6 @@ Seeing that the rollout failed, Emilia investigates and realizes that the releas ### Second attempt applying to prod -Apply to prod +Apply to prod Emilia's updated app with database migration is successfully applied, first to the `dev` environment and then to the `qa` environment. The `status` check for the pull request against `prod` turns green and the pull request can be merged. Since the configuration says `auto: false`, the pull request is not automatically merged. Emilia can now verify the update in the `qa` environment and then merge the pull request through the Git provider's user interface. diff --git a/docs/xks/developer-guide/cloud-iam.md b/docs/xkf/developer-guide/cloud-iam.md similarity index 97% rename from docs/xks/developer-guide/cloud-iam.md rename to docs/xkf/developer-guide/cloud-iam.md index 45e904b305e..40b888d963c 100644 --- a/docs/xks/developer-guide/cloud-iam.md +++ b/docs/xkf/developer-guide/cloud-iam.md @@ -3,16 +3,16 @@ id: cloud-iam title: Cloud IAM --- -Sometimes applications will need to integrate with other cloud resources as they require things like persistent data storage. When working with XKS each namespace is accompanied by an Azure resource +Sometimes applications will need to integrate with other cloud resources as they require things like persistent data storage. When working with XKF each namespace is accompanied by an Azure resource group or an AWS account. This is where cloud resources can be created by each tenant. To keep things simple it may be a good idea to not share these resources across multiple tenants, as one of the -tenants has to own each resource. Instead look at other options like exposing an API inside the cluster instead. As one may expect the authentication methods differ when running XKS in Azure and AWS, +tenants has to own each resource. Instead look at other options like exposing an API inside the cluster instead. As one may expect the authentication methods differ when running XKF in Azure and AWS, this is because the APIs and underlying authentication methods differ greatly. It is important to take this into consideration when reading this documentation. ## Cloud Providers ### Azure -The recommended way to authenticate towards Azure in XKS is to make use of [AAD Pod Identity](https://github.com/Azure/aad-pod-identity) which runs inside the cluster. AAD Pod Identity allows Pods +The recommended way to authenticate towards Azure in XKF is to make use of [AAD Pod Identity](https://github.com/Azure/aad-pod-identity) which runs inside the cluster. AAD Pod Identity allows Pods within the cluster to use [managed identities](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview) to authenticate towards Azure. This removes the need for static credentials that have to be passed to the Pods. It works by intercepting API requests before they leave the cluster and will attach the correct credential based on the source Pod of the request. @@ -121,7 +121,7 @@ TBD ### AWS -When authenticating towards AWS in XKS we recommend using [IAM Roles for Service Accounts](https://docs.aws.amazon.com/emr/latest/EMR-on-EKS-DevelopmentGuide/setting-up-enable-IAM.html) (IRSA). IRSA +When authenticating towards AWS in XKF we recommend using [IAM Roles for Service Accounts](https://docs.aws.amazon.com/emr/latest/EMR-on-EKS-DevelopmentGuide/setting-up-enable-IAM.html) (IRSA). IRSA works by intercepting AWS API calls before leaving the cluster and appending the correct authentication token to the request. This removes the need for static security credentials as it is handled outside the app. IRSA works by annotating a Service Account with a reference to a specfic AWS IAM role. When that Service Account is attached to a Pod, the Pod will be able to assume the IAM role. The reason IRSA works in a multi-tenant cluster is because the reference is multi-directional. The Service Account has to specify the full role ARN it wants to assume and the IAM role has to specify diff --git a/docs/xks/developer-guide/container-security.md b/docs/xkf/developer-guide/container-security.md similarity index 100% rename from docs/xks/developer-guide/container-security.md rename to docs/xkf/developer-guide/container-security.md diff --git a/docs/xks/developer-guide/introduction.md b/docs/xkf/developer-guide/introduction.md similarity index 100% rename from docs/xks/developer-guide/introduction.md rename to docs/xkf/developer-guide/introduction.md diff --git a/docs/xks/developer-guide/networking.md b/docs/xkf/developer-guide/networking.md similarity index 95% rename from docs/xks/developer-guide/networking.md rename to docs/xkf/developer-guide/networking.md index 26f86ee36a8..c58ca9e33b1 100644 --- a/docs/xks/developer-guide/networking.md +++ b/docs/xkf/developer-guide/networking.md @@ -8,12 +8,12 @@ import useBaseUrl from '@docusaurus/useBaseUrl'; ## Network Policies [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) in Kubernetes add the ability to allow and deny network traffic from specific pods and namespaces. Both -egress traffic from a Pod and ingress traffic to a Pod can be controlled. In a vanilla Kubernetes cluster all traffic between all namespaces is allowed by default. This is not the case in XKS. Out of -the box in XKS all tenant namespaces have a default deny rule added to them. This default deny rule will block any traffic going between namespaces. It will deny both ingress traffic from other +egress traffic from a Pod and ingress traffic to a Pod can be controlled. In a vanilla Kubernetes cluster all traffic between all namespaces is allowed by default. This is not the case in XKF. Out of +the box in XKF all tenant namespaces have a default deny rule added to them. This default deny rule will block any traffic going between namespaces. It will deny both ingress traffic from other namespaces and egress traffic to other namespaces. All traffic within the namespace between Pods is allowed. The reasoning behind this setup is that Pods should not have more network access than they require to function, as it reduces the blast radius in case of an exploit. -Default Deny Network Policy +Default Deny Network Policy The default deny Network Policy contains an exception for traffic destined to the cluster's DNS service. Without this exception DNS resolution would not work. The Pod selector in the Network Policy is empty, this means that the Network Policy will apply for all Pods in the namespace. @@ -160,13 +160,13 @@ TBD as a layer on top of Kubernetes Services by exposing the Service with a hostname. All Ingress traffic is Layer 7 routed, meaning that traffic is routed based on the host header in the HTTP request. This also means that Ingress only works with HTTP traffic. Doing it this way means that only a single load balancer is required reducing cost compared to running multiple load balancers, one per Ingress. -Ingress Overview +Ingress Overview -XKS comes with everything pre-configured for Ingress to work. The cluster will either have a single [Nginx Ingress Controller](https://kubernetes.github.io/ingress-nginx/) which is exposed to the +XKF comes with everything pre-configured for Ingress to work. The cluster will either have a single [Nginx Ingress Controller](https://kubernetes.github.io/ingress-nginx/) which is exposed to the public Internet or two controllers where one is public and one is private. On top of that the cluster is configured with [External DNS](https://github.com/kubernetes-sigs/external-dns)(which creates DNS records) and [Cert Manager](https://cert-manager.io/docs/) (which deals with certificate creation and renewal). Together these three tools offer an automated solution where the complexity of DNS and certificates are not handled by the application. The recommendation is to always enable TLS for all Ingress resources no matter how small the service is. Updating a certificate is -quick and easy so there is no reason not to do this. Every XKS cluster comes with a preconfigured Cluster Issuer which will provision certificates from [Let's Encrypt](https://letsencrypt.org/). +quick and easy so there is no reason not to do this. Every XKF cluster comes with a preconfigured Cluster Issuer which will provision certificates from [Let's Encrypt](https://letsencrypt.org/). Start off by creating a Certificate resource for your Ingress. It is possible to have Cert Manager automatically create a Certificate when an Ingress resource is created. This however has the downside that every Ingress resource will receive its own Certificate. Lets Encrypt has [rate limits](https://letsencrypt.org/docs/rate-limits/) for the same domain, if one were to create a @@ -219,7 +219,7 @@ spec: ### Public and Private Ingress -By default an XKS cluster will deploy a single public Ingress controller. All Ingress resources will be routed with a public IP and therefore exposed to the public Internet. It is however also possible to +By default an XKF cluster will deploy a single public Ingress controller. All Ingress resources will be routed with a public IP and therefore exposed to the public Internet. It is however also possible to create private Ingress resources which are only exposed through an IP that is private to the virtual network in which the Kubernetes cluster is deployed. This is an opt in feature as two load balancing services are needed. Making an Ingress private is simple when the private Ingress feature is enabled. All that is required is that the Ingress class has to be set to `nginx-private`, this makes sure that the resource is only served through the private IP. @@ -251,9 +251,9 @@ spec: ### External Routing There is no requirement that the destination for an Ingress resource has to be served from within the cluster. It is possible to route Ingress traffic either to endpoints outside of the -cloud provider or to another service that is only accessible from within the private network. Using the XKS Ingress instead of a separate solution has it's benefits in these situations, -as DNS record creation and certificate management is already setup to work. A typical use case may be during a migration period when XKS and another solution may exist in parallel. -All traffic can enter through XKS but then be forwarded to the external destination. The service endpoints can be updated as applications are migrated to run inside XKS instead of outside. +cloud provider or to another service that is only accessible from within the private network. Using the XKF Ingress instead of a separate solution has it's benefits in these situations, +as DNS record creation and certificate management is already setup to work. A typical use case may be during a migration period when XKF and another solution may exist in parallel. +All traffic can enter through XKF but then be forwarded to the external destination. The service endpoints can be updated as applications are migrated to run inside XKF instead of outside. A Service resource is required to configure the destination of the traffic. There are two options available in Kubernetes when directing traffic outside of the cluster. The first option is to create a Service of type ExternalName which specifies a host name which the Service should write to. When a request is made to the Service the given external name IP will be resolved and the @@ -399,8 +399,8 @@ Remember that you can inspect your network policies with `kubectl get networkpol ## Linkerd -[Linkerd](https://linkerd.io/) is an optional service mesh that can be added to XKS. The component is opt-in as it adds a certain amount of overhead, -so unless it has been requested Linkerd will not be present in XKS. A service mesh extends the networking functionality in a Kubernetes cluster. It is +[Linkerd](https://linkerd.io/) is an optional service mesh that can be added to XKF. The component is opt-in as it adds a certain amount of overhead, +so unless it has been requested Linkerd will not be present in XKF. A service mesh extends the networking functionality in a Kubernetes cluster. It is useful when features such as end-to-end encryption or GRPC load balancing is required. Linkerd will automatically handle TCP loadbalancing so when GRPC is used Linkerd will detect this and loadbalance between instances of GRPC servers. @@ -409,7 +409,7 @@ Refer to the [official documentation](https://linkerd.io/2.10/overview/) for doc Linkerd works by injecting a sidecar into every Pod which uses Linkerd. All network requests have to be sent through the sidecar which will then be responsible for forwarding it. The sidecar will handle things like traffic encryption before sending the packets outside of the node. -Linkerd Overview +Linkerd Overview ### Get Started diff --git a/docs/xks/developer-guide/observability.md b/docs/xkf/developer-guide/observability.md similarity index 100% rename from docs/xks/developer-guide/observability.md rename to docs/xkf/developer-guide/observability.md diff --git a/docs/xks/developer-guide/report.md b/docs/xkf/developer-guide/report.md similarity index 100% rename from docs/xks/developer-guide/report.md rename to docs/xkf/developer-guide/report.md diff --git a/docs/xks/developer-guide/scheduling-scaling.md b/docs/xkf/developer-guide/scheduling-scaling.md similarity index 96% rename from docs/xks/developer-guide/scheduling-scaling.md rename to docs/xkf/developer-guide/scheduling-scaling.md index f6e5a9914f4..169655d9cd5 100644 --- a/docs/xks/developer-guide/scheduling-scaling.md +++ b/docs/xkf/developer-guide/scheduling-scaling.md @@ -35,14 +35,14 @@ The scheduler will look at the cumulative resource requests across all container resource requests of all Pods currently scheduled to the Node. A Pod may at times request more resource than any Node has capacity for, there are two possible outcomes for this situation. If the Pods resource request is less than a Nodes total available resources, a new Node will be added to the cluster. The Pod will however be considered unschedulable if the resource request exceeds the total resources available on a single node. In these cases either the resource request has to change or a new Node type has to be added to the cluster to cater to these needs. -Pod Scheduling +Pod Scheduling It is possible to overprovision Node resources in cases where the resource request for each container is much larger that the actual resource consumption. Efficient resource allocation is a constant battle between requesting enough resources to avoid under allocation while not requesting too much which would result in overallocation. The easiest way to think about resources consumption and availability is to imaging the capacity as a glass, as more resources are consumed water is added to the glass. If the consumption increase does not stop the glass will eventually overfill. -Pod Scheduling +Pod Scheduling The resource limit defined for a Pod has no affect on the scheduling of a Pod. Limits instead comes into play for a Pod during runtime. Exceeding the resource limit for CPU and memory will have different affects. A Pod which exceeds the memory limit will be terminated with an out of memory error (OOM Error). The Pod will after termination be started again, it may start to exceed the limit again which will result in another OOM error. These types of errors can either be resolved by having the application @@ -180,7 +180,7 @@ antiAffinity rule: the pod should not be scheduled onto a node if that node is i - key: prometheus operator: In values: - - xks + - xkf topologyKey: kubernetes.io/hostname weight: 100 - podAffinityTerm: @@ -189,12 +189,12 @@ antiAffinity rule: the pod should not be scheduled onto a node if that node is i - key: prometheus operator: In values: - - xks + - xkf topologyKey: topology.kubernetes.io/zone weight: 100 ``` -This is an example configuration of podAntiAffinity for Prometheus. Spreading the pod deployment based on `topology.kubernetes.io/zone` and `topology.kubernetes.io/hostname` to only allow 1 pod on each node and to mitigate downtime in case an entire zone goes down, e.g: if a pod runs in zone A with key `prometheus` and value `xks` do not schedule it in zone A - choose zone B or C. Note that these settings are "preferred" and not required. +This is an example configuration of podAntiAffinity for Prometheus. Spreading the pod deployment based on `topology.kubernetes.io/zone` and `topology.kubernetes.io/hostname` to only allow 1 pod on each node and to mitigate downtime in case an entire zone goes down, e.g: if a pod runs in zone A with key `prometheus` and value `xkf` do not schedule it in zone A - choose zone B or C. Note that these settings are "preferred" and not required. We recommend using this configuration, as critical services should be distributed to multiple zones to minimize downtime. @@ -203,7 +203,7 @@ You can read more about this [in the official documentation](https://kubernetes. ## Pod Disruption Budget [Pod Disruption Budgets](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) are critical for any production deployment of an application. It enforces so that there are always a set amount of replicas of an application running. There is a risk that an application will during a short period of time have zero replicas -running without if a Pod Disruption Budget has not been defined. XKS depends heavily on the existence of Pod Disruption Budgets to make sure that a cluster node pool can be scaled safely and upgrades can be applied to node pools without involving individual developers. During these types of event multiple Nodes will be drained. +running without if a Pod Disruption Budget has not been defined. XKF depends heavily on the existence of Pod Disruption Budgets to make sure that a cluster node pool can be scaled safely and upgrades can be applied to node pools without involving individual developers. During these types of event multiple Nodes will be drained. The Node will block any new Pods from being scheduled to it and start evicting all existing Pods running the Node during a drain. Without a Pod Disruption Budget all Pods belonging to the same Deployment may be stopped at the same time, before any new Pods have had the time to start. With a Pod Disruption Budget a limited amount of Pods will be stopped, and then started on a new Node. Eviction will continue with the remaining Pods after the new Pods are running and passed their readiness probe. This documentation is only relevant for applications that are deployed with multiple replicas. It is not possible to create a Pod Disruption Budget for a single replica application, one has to assume that downtime will most likely happen and an application is deployed as a single replica. @@ -258,7 +258,7 @@ The Kubernetes scheduler will out of the box treat each Pod with the same priori scheduling duration may increase if multiple Horizontal Pod Autoscalers were to increase the replica count at the same time, as new Nodes would have to be provisioned first. In this case the queue would grow while waiting for more capacity in the cluster. Some applications may be more critical than others for the survival of a product. Waiting for the applications turn may not be the optimal solution if other applications have no problem waiting a bit longer to start running. -Setting Priority Class to a Pod can help the scheduler decide which Pods are more important and should be assigned to a Node first. In XKS there are three Priority Classes available. +Setting Priority Class to a Pod can help the scheduler decide which Pods are more important and should be assigned to a Node first. In XKF there are three Priority Classes available. * `tenant-low` * `tenant-medium` diff --git a/docs/xks/developer-guide/secrets-management.md b/docs/xkf/developer-guide/secrets-management.md similarity index 98% rename from docs/xks/developer-guide/secrets-management.md rename to docs/xkf/developer-guide/secrets-management.md index cec5bd92052..7f03b66d049 100644 --- a/docs/xks/developer-guide/secrets-management.md +++ b/docs/xkf/developer-guide/secrets-management.md @@ -5,7 +5,7 @@ title: Secrets Management Secrets management is an important feature when building secure products. Access to secrets should be limited, and it should be easy to rotate them when required. It becomes a requirement when working with GitOps as secrets can and should not be committed to a git repository. This means that secrets have to be loaded from another source separate from the manifests, but before the application is started. -To solve this problem XKS makes use of the [Secret Store CSI Driver](https://secrets-store-csi-driver.sigs.k8s.io/providers.html) project when running in both Azure and AWS. The CSI driver creates an +To solve this problem XKF makes use of the [Secret Store CSI Driver](https://secrets-store-csi-driver.sigs.k8s.io/providers.html) project when running in both Azure and AWS. The CSI driver creates an entrypoint so that secrets store services in cloud providers can be read as Kubernetes volumes. The project works in a similar way in both Azure and AWS but there are some configuration differences as the service that stores the secrets is different. @@ -372,8 +372,8 @@ A Pod will get the latest version of the Secret Provider Class when started. The updated as this would require the application to be able to restart the process and read the file instead. The Pod will not receive the new value until a new instance of the Pod is created. This could become annoying for situations where the secret value may change often or there are a lot of secrets being read. -The solution in XKS is to configure the Secret Provider Class to annotate the Pod to be recreated when the Secret value is updated. The Pod recreation is done with the -[Reloader](https://github.com/stakater/Reloader) project which is present in all XKS clusters. Reloader works by adding an annotation with the key `secret.reloader.stakater.com/reload`, where the value +The solution in XKF is to configure the Secret Provider Class to annotate the Pod to be recreated when the Secret value is updated. The Pod recreation is done with the +[Reloader](https://github.com/stakater/Reloader) project which is present in all XKF clusters. Reloader works by adding an annotation with the key `secret.reloader.stakater.com/reload`, where the value is the name of the secret. If you need to recreate your Pod when any of multiple secrets are changed, use comma-separated values: ```yaml diff --git a/docs/xks/developer-guide/wsl2.md b/docs/xkf/developer-guide/wsl2.md similarity index 97% rename from docs/xks/developer-guide/wsl2.md rename to docs/xkf/developer-guide/wsl2.md index f79c9382d39..1f6bb361d2b 100644 --- a/docs/xks/developer-guide/wsl2.md +++ b/docs/xkf/developer-guide/wsl2.md @@ -20,7 +20,7 @@ In Windows: Go to Control Panel → Programs and Features. In the left-hand menu, select “Turn Windows features on or off” -wsl-enable +wsl-enable ## Install Docker-Desktop @@ -33,11 +33,11 @@ Now sign out from Windows and sign back in, and the Docker application should wo Verify in settings that the WSL2-based engine is used. -docker1 +docker1 Also under settings, go to Resources → WSL Integration and verify that you have access to the WSL integration with your installed WSL (in this case Ubuntu), and make sure it is checked. -docker2 +docker2 To verify functionality: diff --git a/docs/xks/index.md b/docs/xkf/index.md similarity index 76% rename from docs/xks/index.md rename to docs/xkf/index.md index 8386bd1b7d3..5ac0cb81184 100644 --- a/docs/xks/index.md +++ b/docs/xkf/index.md @@ -6,7 +6,7 @@ title: Overview import useBaseUrl from '@docusaurus/useBaseUrl'; [Xenit Kubernetes Service](https://xenit.se/it-tjanster/kubernetes-framework-2/) is an opinionated Kubernetes deployment on top of a cloud provider's managed Kubernetes service. -XKS currently supports Azure Kubernetes Service (AKS) and AWS Elastic Kubernetes service (EKS). Xenit Kubernetes Service: +XKF currently supports Azure Kubernetes Service (AKS) and AWS Elastic Kubernetes service (EKS). Xenit Kubernetes Service: - is secure by default - is DevOps-oriented @@ -15,11 +15,11 @@ XKS currently supports Azure Kubernetes Service (AKS) and AWS Elastic Kubernetes This documentation consists of two main sections: -[**Developer Guide**](./developer-guide/introduction): This documentation is targeted towards developers using XKS. It covers the basics of Kubernetes and the custom features that are offered by XKS. +[**Developer Guide**](./developer-guide/introduction): This documentation is targeted towards developers using XKF. It covers the basics of Kubernetes and the custom features that are offered by XKF. -[**Operator Guide**](./operator-guide): This section is meant primarily for Xenit's operations staff. It collects Xenit's internal documentation for operating XKS clusters. It is public and part of this documentation because we believe in transparency. It serves as a reference to how the various services included in XKS are set up. It also describes various recurring procedures, such as replacing an existing Kubernetes cluster. +[**Operator Guide**](./operator-guide): This section is meant primarily for Xenit's operations staff. It collects Xenit's internal documentation for operating XKF clusters. It is public and part of this documentation because we believe in transparency. It serves as a reference to how the various services included in XKF are set up. It also describes various recurring procedures, such as replacing an existing Kubernetes cluster. -XKS is assembled from Open Source services, some of which are provided to XKS customers. This assembly is itself Open Source and the important components are documented under the Projects section in the menu bar. For more information about the services, please refer to their respective documentation. Some of the more prominent projects: +XKF is assembled from Open Source services, some of which are provided to XKF customers. This assembly is itself Open Source and the important components are documented under the Projects section in the menu bar. For more information about the services, please refer to their respective documentation. Some of the more prominent projects: - [Kubernetes](https://kubernetes.io/) - [Flux](https://fluxcd.io/) diff --git a/docs/xks/operator-guide/agents.md b/docs/xkf/operator-guide/agents.md similarity index 99% rename from docs/xks/operator-guide/agents.md rename to docs/xkf/operator-guide/agents.md index c88ba104fe0..e9a3e0a4321 100644 --- a/docs/xks/operator-guide/agents.md +++ b/docs/xkf/operator-guide/agents.md @@ -264,7 +264,7 @@ In Azure DevOps under project settings. Agent pools -> Add Pool -> Pick VMSS from dropdown -Agent image +Agent image #### Billing diff --git a/docs/xks/operator-guide/aws-azdo.md b/docs/xkf/operator-guide/aws-azdo.md similarity index 100% rename from docs/xks/operator-guide/aws-azdo.md rename to docs/xkf/operator-guide/aws-azdo.md diff --git a/docs/xks/operator-guide/blast-radius.md b/docs/xkf/operator-guide/blast-radius.md similarity index 97% rename from docs/xks/operator-guide/blast-radius.md rename to docs/xkf/operator-guide/blast-radius.md index 06d983fc96c..080661187ce 100644 --- a/docs/xks/operator-guide/blast-radius.md +++ b/docs/xkf/operator-guide/blast-radius.md @@ -49,7 +49,7 @@ Pipelines -> "pipeline-you-want-to-run" -> Run pipeline -> Variables -> Add vari Add `opaBlastRadius=51`, it should look something like this: -Blast Radius +Blast Radius To start the job you have to push "<-" and Run. diff --git a/docs/xks/operator-guide/blue-green.md b/docs/xkf/operator-guide/blue-green.md similarity index 100% rename from docs/xks/operator-guide/blue-green.md rename to docs/xkf/operator-guide/blue-green.md diff --git a/docs/xks/operator-guide/getting-started.md b/docs/xkf/operator-guide/getting-started.md similarity index 96% rename from docs/xks/operator-guide/getting-started.md rename to docs/xkf/operator-guide/getting-started.md index 4c6e09c699c..1548d429165 100644 --- a/docs/xks/operator-guide/getting-started.md +++ b/docs/xkf/operator-guide/getting-started.md @@ -24,13 +24,13 @@ Go to pipelines -> New pipeline -> Azure Repos Git -> azure-devops-templates -> Import the pipeline from the following path: `/.ci/pipeline.yaml` -### Setup XKS +### Setup XKF -In this case we will only setup a single XKS cluster in one environment, in our case dev. It is easy to add more environments when you have created your first one. +In this case we will only setup a single XKF cluster in one environment, in our case dev. It is easy to add more environments when you have created your first one. At Xenit we are using Terraform modules that we share [upstream](https://github.com/XenitAB/terraform-modules) -To setup XKS we will utilize 4 modules: +To setup XKF we will utilize 4 modules: - governance-global - governance-regional @@ -231,7 +231,7 @@ az ad sp create --id ${AZ_APP_OBJECT_ID} Grant the service principal additional permissions in the App Registration. The permissions `Group.ReadWrite.All` and `Application.ReadWrite.All` in Microsoft Graph should be added. After the permissions are added grant admin consent for the Tenant. -Make the service principal `Owner` of all the XKS subscriptions. This is done in the IAM settings of each individual subscription. Additionaly the service principal also needs to be member of the User +Make the service principal `Owner` of all the XKF subscriptions. This is done in the IAM settings of each individual subscription. Additionaly the service principal also needs to be member of the User administrator role. Create three Azure AD groups. These will be used to assing users a owner, contributor, or reader role for all resources.. @@ -290,7 +290,7 @@ Project settings -> Service connections -> New service connection -> Azure Resou Update the variable `azureSubscriptionTemplate`. You can find the value under Project settings -> Service Connections -Service Connections +Service Connections In my case `sp-sub-project1-xks`: @@ -335,11 +335,11 @@ To make it possible for flux to clone repos from azure devops we need to create User Settings -> Personal access tokens -> New Token -Settings user +Settings user Create a PAT -Create PAT +Create PAT Copy the generated key, we will need it for the next step. @@ -369,15 +369,15 @@ In the Azure portal search for "Key vaults" and pick the core one that matches t Key vaults -> core-1234 -> Secrets -> Generate/Import -Azure Key Vaults +Azure Key Vaults Call the secret `azure-devops-pat` and add the PAT key that you created in the previous step. ## Admin and developer access -Hopefully you should now have one XKS cluster up and running, but currently no developer can actually reach the cluster. +Hopefully you should now have one XKF cluster up and running, but currently no developer can actually reach the cluster. -In XKF we see clusters as cattle and at any time we can decide to recreate an XKS cluster. +In XKF we see clusters as cattle and at any time we can decide to recreate an XKF cluster. To be able to do this without our developers even knowing we use blue green clusters. TODO write a document on how blue green clusters works and link. We use GitOps together with DNS to be able to migrate applications without any impact to end-users assuming that our developers have written 12 step applications. To store state we utilize the cloud services available in the different clouds that XKF supports. @@ -386,7 +386,7 @@ To make sure that our developers do not notice when we change our the cluster we ### Azure AD Kubernetes Proxy -AZAD as we also call it, is a deployment that runs inside XKS and sits in front of the Kubernetes API. +AZAD as we also call it, is a deployment that runs inside XKF and sits in front of the Kubernetes API. We also supply a krew/kubectl plugin to make it easy for our developers to use AZAD. For instructions on how to setup and configure this [see](https://github.com/XenitAB/azad-kube-proxy). @@ -464,7 +464,7 @@ If you already have a rolebinding where a existing UUID exist you can run the fo ### Authorized IPs -To minimize the exposure of the XKS clusters we define a list of authorized IP:s that is approved to connect the Kubernetes cluster API. +To minimize the exposure of the XKF clusters we define a list of authorized IP:s that is approved to connect the Kubernetes cluster API. We need to approve multiple infrastructure networks and user networks. diff --git a/docs/xks/operator-guide/github.md b/docs/xkf/operator-guide/github.md similarity index 100% rename from docs/xks/operator-guide/github.md rename to docs/xkf/operator-guide/github.md diff --git a/docs/xks/operator-guide/index.md b/docs/xkf/operator-guide/index.md similarity index 86% rename from docs/xks/operator-guide/index.md rename to docs/xkf/operator-guide/index.md index 6b416a3f1f7..0537234234a 100644 --- a/docs/xks/operator-guide/index.md +++ b/docs/xkf/operator-guide/index.md @@ -9,20 +9,20 @@ import useBaseUrl from '@docusaurus/useBaseUrl'; ## Architecture -XKS is set up from a set of Terraform modules that when combined creates the full XKS service. There are multiple individual states that all fulfill their own purpose and build +XKF is set up from a set of Terraform modules that when combined creates the full XKF service. There are multiple individual states that all fulfill their own purpose and build upon each other in a hierarchical manner. The first setup requires applying the Terraform in the correct order, but after that ordering should not matter. Separate states are used as it allows for a more flexible architecture that could be changed in parallel. -XKS Overview +XKF Overview ## Network diagram Looking at a cluster, the simple network diagram looks like this: -XKS Overview +XKF Overview ## Terraform modules -The following Terraform modules are used in XKS. +The following Terraform modules are used in XKF. ### Governance @@ -46,4 +46,4 @@ The aks-global module contains the global resources like ACR, DNS and Azure AD c The aks and aks-core modules create a AKS cluster and configures it. This cluster will have a suffix, normally a number to allow for temporarily creating multiple clusters when performing a blue/green deployment of the clusters. Namespaces will be created in the cluster for each of the configured tenants. Each namespaces is linked to a resource group in Azure where namespace resources are expected to be created. -AKS Resource Groups +AKS Resource Groups diff --git a/docs/xks/operator-guide/kubernetes/aks.md b/docs/xkf/operator-guide/kubernetes/aks.md similarity index 99% rename from docs/xks/operator-guide/kubernetes/aks.md rename to docs/xkf/operator-guide/kubernetes/aks.md index 748a2888e71..69cafaf2969 100644 --- a/docs/xks/operator-guide/kubernetes/aks.md +++ b/docs/xkf/operator-guide/kubernetes/aks.md @@ -12,7 +12,7 @@ possible without manual intervention to change the instance type or taints on th work there has to be at least one instance present. This is because critical system pods like Tunnelfront and CoreDNS will by default run on the system node pool. For more information about AKS system node pool refer to the [official documentation](https://docs.microsoft.com/en-us/azure/aks/use-system-pools#system-and-user-node-pools). -XKS follows the Azure recommendation and runs only system critical applications on the system node pool. Doing this protects services like CoreDNS from starvation or memory issues caused by user +XKF follows the Azure recommendation and runs only system critical applications on the system node pool. Doing this protects services like CoreDNS from starvation or memory issues caused by user applications running on the same nodes. This is achieved by adding the taint `CriticalAddonsOnly` to all of the system nodes. ### Sizing Nodes diff --git a/docs/xks/operator-guide/kubernetes/eks.md b/docs/xkf/operator-guide/kubernetes/eks.md similarity index 100% rename from docs/xks/operator-guide/kubernetes/eks.md rename to docs/xkf/operator-guide/kubernetes/eks.md diff --git a/docs/xks/operator-guide/networking.md b/docs/xkf/operator-guide/networking.md similarity index 100% rename from docs/xks/operator-guide/networking.md rename to docs/xkf/operator-guide/networking.md diff --git a/sidebars.js b/sidebars.js index dab0a9e5156..beed9c2f65f 100755 --- a/sidebars.js +++ b/sidebars.js @@ -10,57 +10,57 @@ module.exports = { "items": [ { "type": "doc", - "id": "xks/index" + "id": "xkf/index" }, { "type": "doc", - "id": "xks/architecture-and-design", + "id": "xkf/architecture-and-design", }, { "type": "category", "label": "Developer Guide", "items": [ - "xks/developer-guide/introduction", - "xks/developer-guide/best-practices", - "xks/developer-guide/secrets-management", - "xks/developer-guide/cloud-iam", - "xks/developer-guide/container-security", + "xkf/developer-guide/introduction", + "xkf/developer-guide/best-practices", + "xkf/developer-guide/secrets-management", + "xkf/developer-guide/cloud-iam", + "xkf/developer-guide/container-security", { "type": "category", "label": "CI/CD", "items": [ - "xks/developer-guide/ci-cd/ci", - "xks/developer-guide/ci-cd/cd", - "xks/developer-guide/ci-cd/gitops", - "xks/developer-guide/ci-cd/flux", + "xkf/developer-guide/ci-cd/ci", + "xkf/developer-guide/ci-cd/cd", + "xkf/developer-guide/ci-cd/gitops", + "xkf/developer-guide/ci-cd/flux", ] }, - "xks/developer-guide/scheduling-scaling", - "xks/developer-guide/observability", - "xks/developer-guide/networking", - "xks/developer-guide/wsl2", - "xks/developer-guide/reports", + "xkf/developer-guide/scheduling-scaling", + "xkf/developer-guide/observability", + "xkf/developer-guide/networking", + "xkf/developer-guide/wsl2", + "xkf/developer-guide/reports", ] }, { "type": "category", "label": "Operator Guide", "items": [ - "xks/operator-guide/index", - "xks/operator-guide/getting-started", - "xks/operator-guide/agents", - "xks/operator-guide/networking", - "xks/operator-guide/blast-radius", - "xks/operator-guide/blue-green", - "xks/operator-guide/aws-azdo", - "xks/operator-guide/github", + "xkf/operator-guide/index", + "xkf/operator-guide/getting-started", + "xkf/operator-guide/agents", + "xkf/operator-guide/networking", + "xkf/operator-guide/blast-radius", + "xkf/operator-guide/blue-green", + "xkf/operator-guide/aws-azdo", + "xkf/operator-guide/github", { "type": "category", "label": "Kubernetes", "items": [ - "xks/operator-guide/kubernetes/aks", - "xks/operator-guide/kubernetes/eks", + "xkf/operator-guide/kubernetes/aks", + "xkf/operator-guide/kubernetes/eks", ] } ]