diff --git a/.yarn/releases/yarn-4.10.3.cjs b/.yarn/releases/yarn-4.10.3.cjs
old mode 100755
new mode 100644
diff --git a/lab/bin/reset-environment b/lab/bin/reset-environment
index 8f5b2d842d..a6211b5f28 100644
--- a/lab/bin/reset-environment
+++ b/lab/bin/reset-environment
@@ -106,24 +106,29 @@ kubectl delete pod load-generator --ignore-not-found
kubectl delete namespace other --ignore-not-found
-kubectl apply -k $base_path --prune --all \
- --prune-allowlist=autoscaling/v1/HorizontalPodAutoscaler \
- --prune-allowlist=core/v1/Service \
- --prune-allowlist=core/v1/ConfigMap \
- --prune-allowlist=apps/v1/Deployment \
- --prune-allowlist=apps/v1/StatefulSet \
- --prune-allowlist=core/v1/ServiceAccount \
- --prune-allowlist=core/v1/Secret \
- --prune-allowlist=core/v1/PersistentVolumeClaim \
- --prune-allowlist=scheduling.k8s.io/v1/PriorityClass \
- --prune-allowlist=networking.k8s.io/v1/Ingress
+if [[ $module != introduction/basics* ]]; then
+ kubectl apply -k $base_path --prune --all \
+ --prune-allowlist=autoscaling/v1/HorizontalPodAutoscaler \
+ --prune-allowlist=core/v1/Service \
+ --prune-allowlist=core/v1/ConfigMap \
+ --prune-allowlist=apps/v1/Deployment \
+ --prune-allowlist=apps/v1/StatefulSet \
+ --prune-allowlist=core/v1/ServiceAccount \
+ --prune-allowlist=core/v1/Secret \
+ --prune-allowlist=core/v1/PersistentVolumeClaim \
+ --prune-allowlist=scheduling.k8s.io/v1/PriorityClass \
+ --prune-allowlist=networking.k8s.io/v1/Ingress
+
+ logmessage "\n⏳ Waiting for application to become ready..."
+
+ sleep 10
+
+ kubectl wait --for=condition=available --timeout=240s deployments -l app.kubernetes.io/created-by=eks-workshop -A
+ kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A
-logmessage "\n⏳ Waiting for application to become ready..."
-
-sleep 10
-
-kubectl wait --for=condition=available --timeout=240s deployments -l app.kubernetes.io/created-by=eks-workshop -A
-kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A
+else
+ kubectl delete -k $base_path --ignore-not-found
+fi
# Addons
rm -rf /eks-workshop/terraform
@@ -238,10 +243,12 @@ if [ $EXIT_CODE -ne 0 ]; then
fi
# Recycle workload pods in case stateful pods got restarted
-kubectl delete pod -l app.kubernetes.io/created-by=eks-workshop -l app.kubernetes.io/component=service -A
+kubectl delete pod -l app.kubernetes.io/created-by=eks-workshop -l app.kubernetes.io/component=service -A --ignore-not-found
# Wait for the workload pods previously recycled
-kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A
+if kubectl get pods -A -l app.kubernetes.io/created-by=eks-workshop 2>/dev/null | grep -q .; then
+ kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A
+fi
# Finished
logmessage "\n✅ Environment is ${GREEN}ready${NC}!"
diff --git a/manifests/modules/introduction/basics/.workshop/cleanup.sh b/manifests/modules/introduction/basics/.workshop/cleanup.sh
new file mode 100755
index 0000000000..b76b38f432
--- /dev/null
+++ b/manifests/modules/introduction/basics/.workshop/cleanup.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+set -e
+
+echo "Cleaning up Kubernetes Basics module resources..."
+
+# Clean up pods
+echo "Cleaning up pods..."
+kubectl delete pod ui-pod -n ui --ignore-not-found=true
+kubectl delete pod test-pod --ignore-not-found=true
+
+# Clean up secrets
+echo "Cleaning up secrets..."
+kubectl delete secret catalog-db -n catalog --ignore-not-found=true
+
+# Clean up daemonsets
+echo "Cleaning up daemonsets..."
+kubectl delete daemonset log-collector -n kube-system --ignore-not-found=true
+
+# Clean up jobs and cronjobs
+echo "Cleaning up jobs and cronjobs..."
+kubectl delete job data-processor -n catalog --ignore-not-found=true
+kubectl delete cronjob catalog-cleanup -n catalog --ignore-not-found=true
+kubectl delete job manual-cleanup -n catalog --ignore-not-found=true
+
+# Delete any jobs that start with catalog-cleanup (created by CronJob)
+kubectl get jobs -n catalog -o name 2>/dev/null | grep "job/catalog-cleanup" | xargs -r kubectl delete -n catalog --ignore-not-found=true
+
+# Clean up namespaces (do this last as it will clean up any remaining resources)
+echo "Cleaning up namespaces..."
+kubectl delete namespace ui --ignore-not-found=true
+kubectl delete namespace catalog --ignore-not-found=true
+
+echo "Kubernetes Basics module cleanup completed."
\ No newline at end of file
diff --git a/manifests/modules/introduction/basics/configmaps/kustomization.yaml b/manifests/modules/introduction/basics/configmaps/kustomization.yaml
new file mode 100644
index 0000000000..44d8b488f3
--- /dev/null
+++ b/manifests/modules/introduction/basics/configmaps/kustomization.yaml
@@ -0,0 +1,4 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+ - ../../../../base-application/ui
diff --git a/manifests/modules/introduction/basics/configmaps/ui-pod-with-config.yaml b/manifests/modules/introduction/basics/configmaps/ui-pod-with-config.yaml
new file mode 100644
index 0000000000..e57db19140
--- /dev/null
+++ b/manifests/modules/introduction/basics/configmaps/ui-pod-with-config.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: ui-pod
+ namespace: ui
+ labels:
+ app.kubernetes.io/name: ui
+ app.kubernetes.io/component: service
+ app.kubernetes.io/created-by: eks-workshop
+spec:
+ containers:
+ - name: ui
+ image: public.ecr.aws/aws-containers/retail-store-sample-ui:0.4.0
+ ports:
+ - containerPort: 8080
+ envFrom:
+ - configMapRef:
+ name: ui
+ resources:
+ requests:
+ memory: "128Mi"
+ cpu: "100m"
+ limits:
+ memory: "256Mi"
+ cpu: "200m"
\ No newline at end of file
diff --git a/manifests/modules/introduction/basics/daemonsets/log-collector.yaml b/manifests/modules/introduction/basics/daemonsets/log-collector.yaml
new file mode 100644
index 0000000000..94fb1782bf
--- /dev/null
+++ b/manifests/modules/introduction/basics/daemonsets/log-collector.yaml
@@ -0,0 +1,41 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: log-collector
+ namespace: kube-system
+ labels:
+ app.kubernetes.io/name: log-collector
+ app.kubernetes.io/created-by: eks-workshop
+spec:
+ selector:
+ matchLabels:
+ app: log-collector
+ template:
+ metadata:
+ labels:
+ app: log-collector
+ spec:
+ containers:
+ - name: fluentd
+ image: public.ecr.aws/aws-observability/aws-for-fluent-bit:stable
+ volumeMounts:
+ - name: varlog
+ mountPath: /var/log
+ readOnly: true
+ - name: containers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ resources:
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ limits:
+ cpu: 200m
+ memory: 256Mi
+ volumes:
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: containers
+ hostPath:
+ path: /var/lib/docker/containers
\ No newline at end of file
diff --git a/manifests/modules/introduction/helm/.workshop/cleanup.sh b/manifests/modules/introduction/basics/helm/.workshop/cleanup.sh
similarity index 100%
rename from manifests/modules/introduction/helm/.workshop/cleanup.sh
rename to manifests/modules/introduction/basics/helm/.workshop/cleanup.sh
diff --git a/manifests/modules/introduction/helm/.workshop/terraform/main.tf b/manifests/modules/introduction/basics/helm/.workshop/terraform/main.tf
similarity index 100%
rename from manifests/modules/introduction/helm/.workshop/terraform/main.tf
rename to manifests/modules/introduction/basics/helm/.workshop/terraform/main.tf
diff --git a/manifests/modules/introduction/helm/.workshop/terraform/outputs.tf b/manifests/modules/introduction/basics/helm/.workshop/terraform/outputs.tf
similarity index 100%
rename from manifests/modules/introduction/helm/.workshop/terraform/outputs.tf
rename to manifests/modules/introduction/basics/helm/.workshop/terraform/outputs.tf
diff --git a/manifests/modules/introduction/helm/.workshop/terraform/vars.tf b/manifests/modules/introduction/basics/helm/.workshop/terraform/vars.tf
similarity index 100%
rename from manifests/modules/introduction/helm/.workshop/terraform/vars.tf
rename to manifests/modules/introduction/basics/helm/.workshop/terraform/vars.tf
diff --git a/manifests/modules/introduction/helm/values.yaml b/manifests/modules/introduction/basics/helm/values.yaml
similarity index 100%
rename from manifests/modules/introduction/helm/values.yaml
rename to manifests/modules/introduction/basics/helm/values.yaml
diff --git a/manifests/modules/introduction/basics/jobs/catalog-cleanup.yaml b/manifests/modules/introduction/basics/jobs/catalog-cleanup.yaml
new file mode 100644
index 0000000000..c40e1c3701
--- /dev/null
+++ b/manifests/modules/introduction/basics/jobs/catalog-cleanup.yaml
@@ -0,0 +1,52 @@
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: catalog-cleanup
+ namespace: catalog
+ labels:
+ app.kubernetes.io/name: catalog-cleanup
+ app.kubernetes.io/created-by: eks-workshop
+spec:
+ schedule: "*/1 * * * *" # Every 1 minute for demo purposes
+ timeZone: "UTC"
+ successfulJobsHistoryLimit: 3
+ failedJobsHistoryLimit: 1
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ app: catalog-cleanup
+ spec:
+ restartPolicy: OnFailure
+ containers:
+ - name: cleanup
+ image: busybox:1.36
+ command:
+ - /bin/sh
+ - -c
+ - |
+ echo "Starting cleanup job at $(date)"
+ echo "Checking for temporary files..."
+
+ # Simulate finding and cleaning up files
+ echo "Found 3 temporary files to clean up:"
+ echo " - /tmp/cache_file_1.tmp"
+ echo " - /tmp/cache_file_2.tmp"
+ echo " - /tmp/old_log.log"
+
+ # Simulate cleanup process
+ sleep 3
+ echo "Cleaning up temporary files..."
+ sleep 2
+ echo "Temporary files removed successfully"
+
+ echo "Cleanup completed at $(date)"
+ echo "Next cleanup scheduled in 1 minute"
+ resources:
+ requests:
+ cpu: 50m
+ memory: 64Mi
+ limits:
+ cpu: 100m
+ memory: 128Mi
\ No newline at end of file
diff --git a/manifests/modules/introduction/basics/jobs/data-processing-job.yaml b/manifests/modules/introduction/basics/jobs/data-processing-job.yaml
new file mode 100644
index 0000000000..ae68cb26cd
--- /dev/null
+++ b/manifests/modules/introduction/basics/jobs/data-processing-job.yaml
@@ -0,0 +1,55 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: data-processor
+ namespace: catalog
+ labels:
+ app.kubernetes.io/name: data-processor
+ app.kubernetes.io/created-by: eks-workshop
+spec:
+ completions: 1
+ parallelism: 1
+ backoffLimit: 3
+ template:
+ metadata:
+ labels:
+ app: data-processor
+ spec:
+ restartPolicy: Never
+ containers:
+ - name: processor
+ image: busybox:1.36
+ command:
+ - /bin/sh
+ - -c
+ - |
+ echo "Starting data processing job..."
+ echo "Processing catalog data files..."
+
+ # Simulate processing multiple files
+ for i in $(seq 1 5); do
+ echo "Processing file $i/5..."
+ sleep 2
+ echo "File $i processed successfully"
+ done
+
+ echo "Generating summary report..."
+ cat > /tmp/processing-report.txt << EOF
+ Data Processing Report
+ =====================
+ Job: data-processor
+ Date: $(date)
+ Files processed: 5
+ Status: Completed successfully
+ EOF
+
+ echo "Report generated:"
+ cat /tmp/processing-report.txt
+ echo "Data processing job completed successfully!"
+ resources:
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ limits:
+ cpu: 200m
+ memory: 256Mi
\ No newline at end of file
diff --git a/manifests/modules/introduction/kustomize/deployment.yaml b/manifests/modules/introduction/basics/kustomize/deployment.yaml
similarity index 100%
rename from manifests/modules/introduction/kustomize/deployment.yaml
rename to manifests/modules/introduction/basics/kustomize/deployment.yaml
diff --git a/manifests/modules/introduction/basics/kustomize/kustomization.yaml b/manifests/modules/introduction/basics/kustomize/kustomization.yaml
new file mode 100644
index 0000000000..35f499f927
--- /dev/null
+++ b/manifests/modules/introduction/basics/kustomize/kustomization.yaml
@@ -0,0 +1,6 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+ - ../../../../base-application/checkout
+patches:
+ - path: deployment.yaml
diff --git a/manifests/modules/introduction/basics/namespaces/namespace.yaml b/manifests/modules/introduction/basics/namespaces/namespace.yaml
new file mode 100644
index 0000000000..3c16236b55
--- /dev/null
+++ b/manifests/modules/introduction/basics/namespaces/namespace.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: ui
+ labels:
+ app.kubernetes.io/created-by: eks-workshop
diff --git a/manifests/modules/introduction/basics/pods/ui-pod.yaml b/manifests/modules/introduction/basics/pods/ui-pod.yaml
new file mode 100644
index 0000000000..02e4da1605
--- /dev/null
+++ b/manifests/modules/introduction/basics/pods/ui-pod.yaml
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: ui-pod
+ namespace: ui
+ labels:
+ app.kubernetes.io/name: ui
+ app.kubernetes.io/component: service
+spec:
+ containers:
+ - name: ui
+ image: public.ecr.aws/aws-containers/retail-store-sample-ui:1.2.1
+ ports:
+ - name: http
+ containerPort: 8080
+ protocol: TCP
+ env:
+ - name: JAVA_OPTS
+ value: -XX:MaxRAMPercentage=75.0 -Djava.security.egd=file:/dev/urandom
+ resources:
+ requests:
+ cpu: 250m
+ memory: 1.5Gi
+ limits:
+ memory: 1.5Gi
\ No newline at end of file
diff --git a/manifests/modules/introduction/basics/secrets/catalog-pod-with-secret.yaml b/manifests/modules/introduction/basics/secrets/catalog-pod-with-secret.yaml
new file mode 100644
index 0000000000..1848859623
--- /dev/null
+++ b/manifests/modules/introduction/basics/secrets/catalog-pod-with-secret.yaml
@@ -0,0 +1,27 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: catalog-pod
+ namespace: catalog
+ labels:
+ app.kubernetes.io/name: catalog
+ app.kubernetes.io/component: service
+ app.kubernetes.io/created-by: eks-workshop
+spec:
+ containers:
+ - name: catalog
+ image: public.ecr.aws/aws-containers/retail-store-sample-catalog:1.2.1
+ ports:
+ - containerPort: 8080
+ envFrom:
+ - configMapRef:
+ name: catalog
+ - secretRef:
+ name: catalog-db
+ resources:
+ requests:
+ memory: "128Mi"
+ cpu: "100m"
+ limits:
+ memory: "256Mi"
+ cpu: "200m"
\ No newline at end of file
diff --git a/manifests/modules/introduction/basics/secrets/kustomization.yaml b/manifests/modules/introduction/basics/secrets/kustomization.yaml
new file mode 100644
index 0000000000..5ae93b2ed0
--- /dev/null
+++ b/manifests/modules/introduction/basics/secrets/kustomization.yaml
@@ -0,0 +1,4 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+ - ../../../../base-application/catalog
diff --git a/manifests/modules/introduction/basics/services/deployment.yaml b/manifests/modules/introduction/basics/services/deployment.yaml
new file mode 100644
index 0000000000..771184f1a4
--- /dev/null
+++ b/manifests/modules/introduction/basics/services/deployment.yaml
@@ -0,0 +1,26 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: ui
+ labels:
+ app.kubernetes.io/created-by: eks-workshop
+ app.kubernetes.io/type: app
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: ui
+ app.kubernetes.io/instance: ui
+ app.kubernetes.io/component: service
+ template:
+ spec:
+ containers:
+ - name: ui
+ env:
+ - name: MANAGEMENT_INFO_ENV_ENABLED
+ value: "true"
+ - name: INFO_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+
\ No newline at end of file
diff --git a/manifests/modules/introduction/kustomize/kustomization.yaml b/manifests/modules/introduction/basics/services/kustomization.yaml
similarity index 73%
rename from manifests/modules/introduction/kustomize/kustomization.yaml
rename to manifests/modules/introduction/basics/services/kustomization.yaml
index 5dd2dae094..33a9c05913 100644
--- a/manifests/modules/introduction/kustomize/kustomization.yaml
+++ b/manifests/modules/introduction/basics/services/kustomization.yaml
@@ -1,6 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- - ../../../base-application/checkout
+ - ../../../../base-application/ui
patches:
- path: deployment.yaml
diff --git a/website/docs/introduction/getting-started/packaging-application.md b/website/docs/introduction/application-overview/index.md
similarity index 50%
rename from website/docs/introduction/getting-started/packaging-application.md
rename to website/docs/introduction/application-overview/index.md
index dfb8d8adcd..d869053097 100644
--- a/website/docs/introduction/getting-started/packaging-application.md
+++ b/website/docs/introduction/application-overview/index.md
@@ -1,8 +1,34 @@
---
-title: Packaging the components
-sidebar_position: 20
+title: Sample Application Overview
+sidebar_position: 40
---
+# Sample Application Overview
+
+The EKS workshop uses a sample application designed to illustrate various concepts related to containers on AWS. It models a sample retail store application, where customers can browse product catalog, add items to their cart and complete the order through the checkout process.
+
+You can find the full source code for the sample application on [GitHub](https://github.com/aws-containers/retail-store-sample-app).
+
+
+
+
+
+## Application Architecture
+
+The application follows a microservices architecture with several independent components:
+
+
+
+| Component | Description |
+| --------- | --------------------------------------------------------------------------------------------- |
+| UI | Provides the front end user interface and aggregates API calls to the various other services. |
+| Catalog | API for product listings and details |
+| Cart | API for customer shopping carts |
+| Checkout | API to orchestrate the checkout process |
+| Orders | API to receive and process customer orders |
+
+## Packaging the components
+
Before a workload can be deployed to a Kubernetes distribution like EKS it first must be packaged as a container image and published to a container registry. Basic container topics like this are not covered as part of this workshop, and the sample application has container images already available in Amazon Elastic Container Registry for the labs we'll complete today.
The table below provides links to the ECR Public repository for each component, as well as the `Dockerfile` that was used to build each component.
@@ -14,3 +40,6 @@ The table below provides links to the ECR Public repository for each component,
| Shopping cart | [Repository](https://gallery.ecr.aws/aws-containers/retail-store-sample-cart) | [Dockerfile](https://github.com/aws-containers/retail-store-sample-app/blob/v1.2.1/src/cart/Dockerfile) |
| Checkout | [Repository](https://gallery.ecr.aws/aws-containers/retail-store-sample-checkout) | [Dockerfile](https://github.com/aws-containers/retail-store-sample-app/blob/v1.2.1/src/checkout/Dockerfile) |
| Orders | [Repository](https://gallery.ecr.aws/aws-containers/retail-store-sample-orders) | [Dockerfile](https://github.com/aws-containers/retail-store-sample-app/blob/v1.2.1/src/orders/Dockerfile) |
+
+Initially we'll deploy the application in a manner that is self-contained in the Amazon EKS cluster, without using any AWS services like load balancers or a managed database. Over the course of the labs we'll leverage different features of EKS to take advantage of broader AWS services and features for our retail store.
+
diff --git a/website/docs/introduction/basics/access/index.md b/website/docs/introduction/basics/access/index.md
new file mode 100644
index 0000000000..86219508dd
--- /dev/null
+++ b/website/docs/introduction/basics/access/index.md
@@ -0,0 +1,12 @@
+---
+title: Cluster Access
+sidebar_position: 20
+description: "Learn how to configure access and interact with Kubernetes clusters using kubeconfig and kubectl."
+---
+
+# Interacting with Kubernetes
+
+Now that you understand what Kubernetes is, let's learn **how to interact with it** using the command-line tool and cluster configuration.
+
+- [kubectl](./kubectl) - The command-line tool for managing Kubernetes resources and applications.
+- [kubeconfig](./kubeconfig) - A `YAML` file holds the information needed to interact with Kubernetes API Server.
\ No newline at end of file
diff --git a/website/docs/introduction/basics/access/kubeconfig.md b/website/docs/introduction/basics/access/kubeconfig.md
new file mode 100644
index 0000000000..044f84f349
--- /dev/null
+++ b/website/docs/introduction/basics/access/kubeconfig.md
@@ -0,0 +1,203 @@
+---
+title: kubeconfig
+sidebar_position: 20
+description: "Learn how to configure access to Kubernetes clusters using kubeconfig and AWS EKS integration."
+---
+
+# Cluster Access & Configuration
+
+To use kubectl with a Kubernetes cluster, you need to configure access using a **kubeconfig** file.
+
+The kubeconfig file is a YAML configuration file that tells kubectl:
+- **Where** to find your Kubernetes cluster (API server endpoint)
+- **How** to authenticate with it (credentials)
+- **Which** cluster and user to use by default (context)
+
+### kubeconfig Structure
+
+A kubeconfig file contains three main sections:
+
+```yaml
+apiVersion: v1
+kind: Config
+clusters: # Information about Kubernetes clusters
+- name: my-cluster
+ cluster:
+ server: https://kubernetes-api-server:6443
+ certificate-authority-data:
+
+users: # Authentication credentials for different users
+- name: my-user
+ user:
+ token:
+ # OR client-certificate-data and client-key-data
+ # OR exec command for dynamic authentication
+
+contexts: # Combinations of cluster + user + namespace
+- name: my-context
+ context:
+ cluster: my-cluster
+ user: my-user
+ namespace: default
+
+current-context: my-context # Which context to use by default
+```
+
+### Key Components Explained
+
+**Clusters**: Define how to connect to Kubernetes API servers
+- **server**: The API server URL (e.g., `https://my-cluster.example.com:6443`)
+- **certificate-authority**: CA certificate to verify the server's identity
+- **insecure-skip-tls-verify**: Skip TLS verification (not recommended for production)
+
+**Users**: Define authentication methods
+- **token**: Bearer token authentication
+- **client-certificate/client-key**: Mutual TLS authentication
+- **username/password**: Basic authentication (rarely used)
+- **exec**: External command for dynamic authentication (like AWS CLI)
+
+**Contexts**: Combine cluster + user + optional default namespace
+- Allows you to easily switch between different clusters or users
+- Can set a default namespace to avoid specifying `-n` repeatedly
+
+### Managing Multiple Clusters
+
+kubeconfig supports multiple clusters, users, and contexts in a single file:
+
+```bash
+# View your complete kubeconfig
+$ kubectl config view
+
+# List all available contexts
+$ kubectl config get-contexts
+
+# Check current context
+$ kubectl config current-context
+```
+
+Additional commands:
+```
+# Switch between contexts
+$ kubectl config use-context
+
+# Set default namespace for current context
+$ kubectl config set-context --current --namespace=
+```
+
+### kubeconfig File Location
+
+By default, kubectl looks for kubeconfig at:
+- `~/.kube/config` (Linux/macOS)
+- `%USERPROFILE%\.kube\config` (Windows)
+
+You can override this with:
+- `KUBECONFIG` environment variable
+- `--kubeconfig` flag with kubectl commands
+
+## EKS-Specific Configuration
+
+Amazon EKS integrates seamlessly with the standard kubeconfig pattern but adds AWS-specific authentication.
+
+### AWS CLI Integration
+
+For EKS clusters, AWS CLI provides a convenient way to configure kubectl:
+
+```bash
+# Configure kubectl for your EKS cluster
+$ aws eks update-kubeconfig --region us-west-2 --name eks-workshop
+
+# Verify the connection
+$ kubectl get nodes
+```
+
+### What AWS CLI Does
+
+When you run `aws eks update-kubeconfig`, it:
+
+1. **Retrieves cluster information** from the EKS API
+2. **Updates your kubeconfig file** (`~/.kube/config`)
+3. **Sets up AWS authentication** using the `aws eks get-token` command
+
+### EKS kubeconfig Structure
+
+Here's what an EKS entry looks like in your kubeconfig:
+
+```yaml
+clusters:
+- cluster:
+ certificate-authority-data:
+ server: https://ABC123.gr7.us-west-2.eks.amazonaws.com
+ name: arn:aws:eks:us-west-2:123456789012:cluster/eks-workshop
+
+users:
+- name: arn:aws:eks:us-west-2:123456789012:cluster/eks-workshop
+ user:
+ exec:
+ apiVersion: client.authentication.k8s.io/v1beta1
+ command: aws
+ args:
+ - eks
+ - get-token
+ - --cluster-name
+ - eks-workshop
+ - --region
+ - us-west-2
+
+contexts:
+- context:
+ cluster: arn:aws:eks:us-west-2:123456789012:cluster/eks-workshop
+ user: arn:aws:eks:us-west-2:123456789012:cluster/eks-workshop
+ name: arn:aws:eks:us-west-2:123456789012:cluster/eks-workshop
+```
+
+### EKS Authentication Flow
+
+When you run kubectl commands with EKS:
+
+1. **kubectl** reads the kubeconfig file
+2. **Executes** `aws eks get-token` command
+3. **AWS CLI** uses your AWS credentials to get a temporary token
+4. **kubectl** uses this token to authenticate with the EKS API server
+5. **EKS** validates the token and maps it to Kubernetes RBAC permissions
+
+### AWS Credentials for EKS
+
+EKS authentication relies on your AWS credentials, which can come from:
+- **AWS CLI profiles** (`~/.aws/credentials`)
+- **Environment variables** (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`)
+- **IAM roles** (EC2 instance profiles, EKS service accounts)
+- **AWS SSO** sessions
+
+### Viewing Your EKS Configuration
+
+```bash
+# See your current kubeconfig (including EKS entries)
+$ kubectl config view
+
+# Check which EKS cluster you're connected to
+$ kubectl config current-context
+
+# Test your connection
+$ kubectl get nodes
+
+# Get cluster information
+$ kubectl cluster-info
+```
+
+## Key Concepts to Remember
+
+### kubeconfig Fundamentals
+- **kubeconfig file** is the standard way Kubernetes stores cluster connection information
+- **Three main components**: clusters (where), users (who), contexts (which combination)
+- **Works the same** across all Kubernetes distributions (EKS, GKE, AKS, self-managed)
+- **File location**: `~/.kube/config` by default, customizable via `KUBECONFIG` environment variable
+
+### EKS Integration
+- **AWS CLI integration** uses standard kubeconfig with AWS-specific authentication via `aws eks get-token`
+- **Dynamic authentication** - tokens are generated on-demand using your AWS credentials
+- **No static credentials** stored in kubeconfig - more secure than traditional approaches
+
+### Context Management
+- **Contexts** combine cluster + user + optional namespace for easy switching
+- **Multiple clusters** can be managed from a single kubeconfig file
+- **Default namespace** can be set per context to avoid repetitive `-n` flags
diff --git a/website/docs/introduction/basics/access/kubectl.md b/website/docs/introduction/basics/access/kubectl.md
new file mode 100644
index 0000000000..d29ba26de1
--- /dev/null
+++ b/website/docs/introduction/basics/access/kubectl.md
@@ -0,0 +1,137 @@
+---
+title: kubectl
+sidebar_position: 10
+description: "Learn essential kubectl commands for managing Kubernetes resources."
+---
+
+[kubectl](https://kubernetes.io/docs/reference/kubectl/) (pronounced "kube-control" or "kube-c-t-l") is the command-line tool that communicates with the Kubernetes API server. It translates your commands into API calls and presents the results in a human-readable format.
+
+All kubectl commands follow this pattern:
+```
+kubectl [command] [type] [name] [flags]
+```
+
+Examples:
+- `kubectl get pods` - List all pods
+- `kubectl describe service ui` - Get detailed info about the ui service
+- `kubectl apply -f deployment.yaml` - Create resources from a file
+
+kubectl has excellent built-in documentation. Let's explore it:
+
+```bash
+$ kubectl
+kubectl controls the Kubernetes cluster manager.
+
+ Find more information at: https://kubernetes.io/docs/reference/kubectl/
+
+Basic Commands (Beginner):
+ create Create a resource from a file or from stdin
+ expose Take a replication controller, service, deployment or pod and expose it as a new
+Kubernetes service
+ run Run a particular image on the cluster
+ set Set specific features on objects
+
+Basic Commands (Intermediate):
+ explain Get documentation for a resource
+ get Display one or many resources
+ edit Edit a resource on the server
+ delete Delete resources by file names, stdin, resources and names, or by resources and
+label selector
+
+Deploy Commands:
+ rollout Manage the rollout of a resource
+ scale Set a new size for a deployment, replica set, or replication controller
+ autoscale Auto-scale a deployment, replica set, stateful set, or replication controller
+
+Cluster Management Commands:
+ certificate Modify certificate resources
+ cluster-info Display cluster information
+ top Display resource (CPU/memory) usage
+ cordon Mark node as unschedulable
+ uncordon Mark node as schedulable
+ drain Drain node in preparation for maintenance
+ taint Update the taints on one or more nodes
+
+Troubleshooting and Debugging Commands:
+ describe Show details of a specific resource or group of resources
+ logs Print the logs for a container in a pod
+ attach Attach to a running container
+ exec Execute a command in a container
+ port-forward Forward one or more local ports to a pod
+ proxy Run a proxy to the Kubernetes API server
+ cp Copy files and directories to and from containers
+ auth Inspect authorization
+ debug Create debugging sessions for troubleshooting workloads and nodes
+ events List events
+
+Advanced Commands:
+ diff Diff the live version against a would-be applied version
+ apply Apply a configuration to a resource by file name or stdin
+ patch Update fields of a resource
+ replace Replace a resource by file name or stdin
+ wait Experimental: Wait for a specific condition on one or many resources
+ kustomize Build a kustomization target from a directory or URL
+
+Settings Commands:
+ label Update the labels on a resource
+ annotate Update the annotations on a resource
+ completion Output shell completion code for the specified shell (bash, zsh, fish, or
+powershell)
+
+Subcommands provided by plugins:
+ connect The command connect is a plugin installed by the user
+
+Other Commands:
+ api-resources Print the supported API resources on the server
+ api-versions Print the supported API versions on the server, in the form of "group/version"
+ config Modify kubeconfig files
+ plugin Provides utilities for interacting with plugins
+ version Print the client and server version information
+
+Usage:
+ kubectl [flags] [options]
+
+Use "kubectl --help" for more information about a given command.
+Use "kubectl options" for a list of global command-line options (applies to all commands).
+```
+
+`kubectl` organizes commands into logical categories. Understanding these categories helps you find the right command for any task.
+1. Basic Commands (Beginner & Intermediate)
+2. Deploy Commands
+3. Cluster Management Commands
+4. Troubleshooting and Debugging Commands
+5. Advanced Commands
+6. Settings Commands
+7. Other Commands
+
+### Getting Help
+kubectl has excellent built-in help:
+```bash
+# See all command categories
+$ kubectl --help
+
+# Get help for specific commands
+$ kubectl get --help
+$ kubectl apply --help
+
+# Get resource documentation
+$ kubectl explain pod
+$ kubectl explain deployment.spec.template
+```
+
+## Workshop Patterns
+
+Throughout this workshop, you'll frequently use these kubectl commands:
+
+- `kubectl apply -k` for Kustomize deployments
+- `kubectl get pods -n ` for checking application status
+- `kubectl describe` and `kubectl logs` for troubleshooting
+- `kubectl port-forward` for accessing applications locally
+
+## Key Concepts to Remember
+
+- **Command pattern**: `kubectl [command] [type] [name] [flags]` - all commands follow this structure
+- **Get help**: Use `kubectl --help` or `kubectl --help` to discover options
+- **Declarative approach**: Use `kubectl apply -f` for production deployments
+- **Namespace awareness**: Always specify `-n ` or use `-A` for all namespaces
+- **Essential commands**: `get`, `describe`, `logs`, `apply`, `port-forward` cover most daily tasks
\ No newline at end of file
diff --git a/website/docs/introduction/basics/architecture.md b/website/docs/introduction/basics/architecture.md
new file mode 100644
index 0000000000..57589520e2
--- /dev/null
+++ b/website/docs/introduction/basics/architecture.md
@@ -0,0 +1,71 @@
+---
+title: Architecture
+sidebar_position: 10
+description: "Understand Kubernetes and Amazon EKS architecture fundamentals."
+---
+
+# Kubernetes Architecture
+
+Kubernetes follows a **control plane–worker node architecture**, where the **control plane** manages the cluster and **worker nodes** run your workloads.
+
+
+*Figure: Simplified Kubernetes cluster architecture.*
+
+### Control Plane Components
+
+The control plane makes global decisions about the cluster and ensures the system's desired state.
+
+- **API Server** — Acts as the front-end for Kubernetes, exposing the Kubernetes API to users and components.
+- **etcd** — A highly available key-value store that holds all cluster data.
+- **Scheduler** — Assigns Pods to nodes based on resource availability and constraints.
+- **Controller Manager** — Runs background processes (controllers) that maintain cluster health and reconcile actual vs. desired states.
+
+### Worker Node Components
+
+Each node runs the components needed to host and manage Pods.
+
+- **kubelet** — Communicates with the control plane and ensures containers are running as expected.
+- **Container Runtime** — Executes containers (e.g., containerd, CRI-O).
+- **kube-proxy** — Maintains network rules and manages communication between Pods and services.
+
+---
+
+## Amazon EKS Architecture
+
+**Amazon Elastic Kubernetes Service (EKS)** is a managed Kubernetes service that simplifies cluster operations.
+It takes care of control plane management, upgrades, and high availability, so you can focus on your workloads.
+
+With EKS, you can:
+- **Deploy applications faster** with less operational overhead
+- **Scale seamlessly** to handle changing workloads
+- **Enhance security** using AWS IAM and managed updates
+- **Choose your compute model** — traditional EC2 nodes or serverless with EKS Auto Mode
+
+### Shared Responsibility Model
+
+In Amazon EKS:
+- **AWS manages the control plane** — including the API Server, etcd, scheduler, and controllers.
+- **You manage the worker nodes** — EC2, Fargate, or hybrid options where your applications run.
+- **AWS services integrate natively** — including load balancers, IAM roles, VPC networking, and storage.
+
+
+*Figure: Amazon EKS architecture and integration with AWS services.*
+
+## Key Design Principles
+
+Understanding these principles will help you work more effectively with Kubernetes:
+
+### Control Plane vs. Worker Nodes
+- **Control plane** components (API Server, etcd, Scheduler, Controller Manager) handle cluster-wide decisions and state management
+- **Worker nodes** (kubelet, container runtime, kube-proxy) focus on running and networking your applications
+- This separation allows for scalable, resilient cluster operations
+
+### EKS Advantages
+- **Reduced operational burden** — AWS manages control plane complexity, patching, and high availability
+- **Native AWS integration** — Seamless connectivity with VPC, IAM, Load Balancers, and other AWS services
+- **Flexible compute options** — Choose between EC2, Fargate, or Auto Mode based on your workload needs
+
+### Core Concepts
+- **Declarative configuration** — Define desired state; Kubernetes controllers work to achieve it
+- **API-driven** — All interactions go through the Kubernetes API for consistency and auditability
+- **Extensible** — Custom resources and controllers allow you to extend Kubernetes functionality
\ No newline at end of file
diff --git a/website/docs/introduction/basics/configuration/configmaps/index.md b/website/docs/introduction/basics/configuration/configmaps/index.md
new file mode 100644
index 0000000000..210a5313ce
--- /dev/null
+++ b/website/docs/introduction/basics/configuration/configmaps/index.md
@@ -0,0 +1,119 @@
+---
+title: ConfigMaps
+sidebar_position: 10
+---
+
+# ConfigMaps
+
+**ConfigMaps** allow you to decouple configuration artifacts from image content to keep containerized applications portable. They store non-confidential data in key-value pairs and can be consumed by pods as environment variables, command-line arguments, or configuration files.
+
+ConfigMaps provide:
+- **Configuration Management:** Store application configuration separately from code
+- **Environment Flexibility:** Use different configurations for different environments
+- **Runtime Updates:** Update configuration without rebuilding container images
+- **Portability:** Keep applications portable across different environments
+
+In this lab, you'll learn about ConfigMaps by creating one for our retail store's UI component and seeing how it connects to backend services.
+
+### Creating ConfigMap
+
+Let's create a ConfigMap for our retail store's UI component. The UI needs to know where to find the backend services:
+
+::yaml{file="manifests/base-application/ui/configMap.yaml" paths="kind,metadata.name,data" title="ui-configmap.yaml"}
+
+1. `kind: ConfigMap`: Tells Kubernetes what type of resource to create
+2. `metadata.name`: Unique identifier for this ConfigMap within the namespace
+4. `data`: Key-value pairs containing the configuration data
+
+Apply the ConfigMap configuration:
+```bash
+$ kubectl apply -k ~/environment/eks-workshop/modules/introduction/basics/configmaps/
+```
+
+### Exploring ConfigMap
+
+Now let's examine the ConfigMap we just created:
+
+```bash
+$ kubectl get configmaps -n ui
+NAME DATA AGE
+kube-root-ca.crt 1 2m51s
+ui 4 2m50s
+```
+
+Get detailed information about the ConfigMap:
+```bash
+$ kubectl describe configmap ui -n ui
+Name: ui
+Namespace: ui
+Labels:
+Annotations:
+
+Data
+====
+RETAIL_UI_ENDPOINTS_CARTS:
+----
+http://carts.carts.svc:80
+
+RETAIL_UI_ENDPOINTS_CATALOG:
+----
+http://catalog.catalog.svc:80
+
+RETAIL_UI_ENDPOINTS_CHECKOUT:
+----
+http://checkout.checkout.svc:80
+
+RETAIL_UI_ENDPOINTS_ORDERS:
+----
+http://orders.orders.svc:80
+
+
+BinaryData
+====
+
+Events:
+```
+
+This shows:
+- **Data section** - The key-value pairs stored in the ConfigMap
+- **Labels** - Metadata tags for organization
+- **Annotations** - Additional metadata
+
+### Using ConfigMaps in Pods
+
+Now let's create a pod that uses our ConfigMap. We'll update our UI pod to use the configuration:
+
+::yaml{file="manifests/modules/introduction/basics/configmaps/ui-pod-with-config.yaml" paths="spec.containers.0.envFrom" title="ui-pod-with-config.yaml"}
+
+1. `envFrom.configMapRef`: Loads all key-value pairs from the ConfigMap as environment variables
+
+Apply the updated pod configuration:
+```bash hook=ready
+$ kubectl apply -f ~/environment/eks-workshop/modules/introduction/basics/configmaps/ui-pod-with-config.yaml
+```
+
+### Testing the Configuration
+
+Let's verify that our pod can now access the configuration:
+
+```bash
+$ kubectl exec -n ui ui-pod -- env | grep RETAIL_UI_ENDPOINTS_CATALOG
+RETAIL_UI_ENDPOINTS_CATALOG=http://catalog.catalog.svc:80
+```
+
+You can also see all the ConfigMap environment variables:
+```bash
+$ kubectl exec -n ui ui-pod -- env | grep RETAIL_UI
+RETAIL_UI_ENDPOINTS_CATALOG=http://catalog.catalog.svc:80
+RETAIL_UI_ENDPOINTS_CARTS=http://carts.carts.svc:80
+RETAIL_UI_ENDPOINTS_ORDERS=http://orders.orders.svc:80
+RETAIL_UI_ENDPOINTS_CHECKOUT=http://checkout.checkout.svc:80
+```
+
+## Key Points to Remember
+
+* ConfigMaps store non-confidential configuration data
+* They decouple configuration from container images
+* Can be consumed as environment variables or mounted as files
+* Allow the same image to work across different environments
+* Have a 1MB size limit per ConfigMap
diff --git a/website/docs/introduction/basics/configuration/configmaps/tests/hook-ready.sh b/website/docs/introduction/basics/configuration/configmaps/tests/hook-ready.sh
new file mode 100644
index 0000000000..4e28a3b545
--- /dev/null
+++ b/website/docs/introduction/basics/configuration/configmaps/tests/hook-ready.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+set -Eeuo pipefail
+
+before() {
+ echo "noop"
+}
+
+after() {
+ echo "Waiting for UI pod to be ready..."
+ kubectl wait --for=condition=ready pod/ui-pod -n ui --timeout=300s
+
+ echo "Verifying ConfigMap is accessible..."
+
+ # Check that ConfigMap exists
+ kubectl get configmap ui -n ui
+
+ # Verify the pod has the environment variable from ConfigMap
+ env_var=$(kubectl exec -n ui ui-pod -- env | grep RETAIL_UI_ENDPOINTS_CATALOG || echo "")
+ if [ -z "$env_var" ]; then
+ echo "Error: RETAIL_UI_ENDPOINTS_CATALOG environment variable not found"
+ echo "Available environment variables:"
+ kubectl exec -n ui ui-pod -- env | grep RETAIL_UI || echo "No RETAIL_UI variables found"
+ exit 1
+ fi
+
+ echo "Found environment variable: $env_var"
+ echo "ConfigMap test completed successfully"
+}
+
+"$@"
\ No newline at end of file
diff --git a/website/docs/introduction/basics/configuration/index.md b/website/docs/introduction/basics/configuration/index.md
new file mode 100644
index 0000000000..5c310464f8
--- /dev/null
+++ b/website/docs/introduction/basics/configuration/index.md
@@ -0,0 +1,67 @@
+---
+title: Configuration
+sidebar_position: 60
+---
+
+# Configuration
+
+Applications often require configuration data - from environment-specific settings like API endpoints to sensitive credentials like database passwords. Kubernetes provides two core resources to manage configuration data:
+
+**ConfigMaps** - for non-confidentail configuration data
+**Secrets** - for sensitive information like passwords, tokens, and certificates
+
+Modern applications run across multiple environments and often scale dynamically.
+
+Kubernetes configuration resources make this easy by allowing you to:
+- **Separate configuration from code** — so you can deploy the same container everywhere
+- **Use environment-specific settings** without modifying application images
+- **Update configuration at runtime** without restarting or rebuilding images
+- **Enhance security** by limiting access to sensitive values
+- **Improve portability** across clusters and cloud providers
+
+## ConfigMaps vs Secrets
+
+| Category | ConfigMaps | Secrets |
+| ------------------ | ------------------------------------------ | --------------------------------------------- |
+| **Purpose** | Store non-confidential configuration | Store sensitive data |
+| **Examples** | API endpoints, feature flags, config files | Passwords, tokens, certificates |
+| **Data format** | Plain text | Base64 encoded |
+| **Visibility** | Readable by all with access | Access restricted via RBAC |
+| **Security level** | Low | High |
+
+## When to Use Each
+
+**Use ConfigMaps for:**
+- Application settings and feature flags
+- Service URLs and API endpoints
+- Configuration files (`nginx.conf`, `application.yaml`)
+- Environment-specific parameters
+
+**Use Secrets for:**
+- Database credentials
+- API keys and tokens
+- TLS certificates and private keys
+- Container registry credentials
+
+## Configuration Patterns
+
+Both ConfigMaps and Secrets can be consumed by pods in multiple ways:
+
+- **Environment variables:** Inject configuration as environment variables
+- **Volume mounts:** Mount configuration as files in the container filesystem
+- **Command-line arguments:** Pass configuration as arguments to container commands
+
+## Explore Configuration Management
+
+Learn how to manage both types of configuration data:
+
+- **[ConfigMaps](./configmaps)** - Store and manage non-confidential configuration data
+- **[Secrets](./secrets)** - Securely handle sensitive information like passwords and certificates
+
+## Key Points to Remember
+
+* ConfigMaps handle non-confidential configuration data
+* Secrets securely store sensitive information
+* Both decouple configuration from application code
+* Choose the right resource based on data sensitivity
+* Both support multiple consumption patterns (env vars, files, args)
\ No newline at end of file
diff --git a/website/docs/introduction/basics/configuration/secrets/index.md b/website/docs/introduction/basics/configuration/secrets/index.md
new file mode 100644
index 0000000000..19ba42aec2
--- /dev/null
+++ b/website/docs/introduction/basics/configuration/secrets/index.md
@@ -0,0 +1,152 @@
+---
+title: Secrets
+sidebar_position: 20
+---
+
+# Secrets
+
+**Secrets** are used to store and manage sensitive information such as passwords, OAuth tokens, SSH keys, and TLS certificates. They provide a more secure way to handle confidential data compared to putting it directly in pod specifications or container images.
+
+Secrets provide:
+- **Security:** Store sensitive data separately from application code
+- **Access Control:** Control which pods and users can access sensitive information
+- **Encryption:** Data is base64 encoded and can be encrypted at rest
+- **Flexibility:** Use secrets as environment variables, files, or for image pulls
+
+In this lab, you'll learn about Secrets by creating database credentials for our retail store's catalog service and seeing how pods securely access this sensitive information.
+
+### Creating Your First Secret
+
+Let's create a Secret for our retail store's catalog service. The catalog needs database credentials to connect to its MySQL database:
+
+::yaml{file="manifests/base-application/catalog/secrets.yaml" paths="kind,metadata.name,data" title="catalog-secret.yaml"}
+
+1. `kind: Secret`: Tells Kubernetes what type of resource to create
+2. `metadata.name`: Unique identifier for this Secret within the namespace
+5. `data`: Key-value pairs containing sensitive data (base64 encoded)
+
+Apply the Secret configuration:
+```bash
+$ kubectl apply -k ~/environment/eks-workshop/modules/introduction/basics/secrets
+```
+
+### Exploring Your Secret
+
+Now let's examine the Secret we just created:
+
+```bash
+$ kubectl get secrets -n catalog
+NAME TYPE DATA AGE
+catalog-db Opaque 2 30s
+```
+
+Get detailed information about the Secret:
+```bash
+$ kubectl describe secret -n catalog catalog-db
+Name: catalog-db
+Namespace: catalog
+Labels:
+Annotations:
+
+Type: Opaque
+
+Data
+====
+RETAIL_CATALOG_PERSISTENCE_PASSWORD: 16 bytes
+RETAIL_CATALOG_PERSISTENCE_USER: 7 bytes
+```
+
+This shows:
+- **Type** - The kind of secret (Opaque for general use)
+- **Data** - Number of key-value pairs (values are hidden for security)
+- **Labels** - Metadata tags for organization
+
+Notice that the actual values are not displayed for security reasons. To see the base64 encoded data:
+```bash
+$ kubectl get secret catalog-db -n catalog -o yaml
+apiVersion: v1
+data:
+ RETAIL_CATALOG_PERSISTENCE_PASSWORD: ZFltTmZXVjR1RXZUem9GdQ==
+ RETAIL_CATALOG_PERSISTENCE_USER: Y2F0YWxvZw==
+kind: Secret
+metadata:
+ annotations:
+ kubectl.kubernetes.io/last-applied-configuration: |
+ {"apiVersion":"v1","data":{"RETAIL_CATALOG_PERSISTENCE_PASSWORD":"ZFltTmZXVjR1RXZUem9GdQ==","RETAIL_CATALOG_PERSISTENCE_USER":"Y2F0YWxvZw=="},"kind":"Secret","metadata":{"annotations":{},"name":"catalog-db","namespace":"catalog"}}
+ creationTimestamp: "2025-10-05T17:52:34Z"
+ name: catalog-db
+ namespace: catalog
+ resourceVersion: "902820"
+ uid: 726e4fef-f82b-4a7e-a063-f72f18a941cd
+type: Opaque
+```
+
+You'll see the data is base64 encoded. To decode a value:
+```bash
+$ kubectl get secret catalog-db -n catalog -o jsonpath='{.data.RETAIL_CATALOG_PERSISTENCE_USER}' | base64 --decode
+catalog
+```
+
+### Using Secrets in Pods
+
+Now let's create a pod that uses our Secret. We'll update our catalog pod to use the database credentials:
+
+::yaml{file="manifests/modules/introduction/basics/secrets/catalog-pod-with-secret.yaml" paths="kind,metadata.name,spec.containers,spec.containers.0.envFrom" title="catalog-pod-with-secret.yaml"}
+
+The key differences here are:
+- `envFrom.configMapRef`: Loads all key-value pairs from a ConfigMap as environment variables
+- `envFrom.secretRef`: Loads all key-value pairs from a Secret as environment variables
+- This approach automatically makes all Secret data available without mapping individual keys
+
+Apply the updated pod configuration:
+```bash
+$ kubectl apply -f ~/environment/eks-workshop/modules/introduction/basics/secrets/catalog-pod-with-secret.yaml
+```
+
+### Testing the Secret Access
+
+Let's verify that our pod can access the secret values:
+
+```bash hook=ready
+$ kubectl exec -n catalog catalog-pod -- env | grep RETAIL_CATALOG_PERSISTENCE_USER
+RETAIL_CATALOG_PERSISTENCE_USER=catalog_user
+```
+
+You can also see all catalog-related environment variables:
+```bash
+$ kubectl exec -n catalog catalog-pod -- env | grep RETAIL_CATALOG
+RETAIL_CATALOG_PERSISTENCE_PROVIDER=mysql
+RETAIL_CATALOG_PERSISTENCE_ENDPOINT=catalog-mysql:3306
+RETAIL_CATALOG_PERSISTENCE_DB_NAME=catalog
+RETAIL_CATALOG_PERSISTENCE_USER=catalog_user
+RETAIL_CATALOG_PERSISTENCE_PASSWORD=dYmNfWV4uEvTzoFu
+```
+
+:::warning
+In production, avoid printing passwords to logs or console output. This is shown here for educational purposes only.
+:::
+
+## Secrets vs ConfigMaps
+
+| Secrets | ConfigMaps |
+|---------|------------|
+| Sensitive data (passwords, tokens) | Non-confidential data |
+| Base64 encoded + additional security | Base64 encoded for storage |
+| Values hidden in kubectl output | Visible in plain text |
+| Credentials, certificates, keys | Configuration files, environment variables |
+
+## Advanced Secrets Management
+
+While Kubernetes Secrets provide basic security for sensitive data, production environments often require more sophisticated secrets management solutions. For enhanced security features like automatic rotation, fine-grained access control, and integration with external secret stores, explore:
+
+**[AWS Secrets Manager Integration](../../../../security/secrets-management/secrets-manager/)** - Learn how to integrate AWS Secrets Manager with your EKS cluster for enterprise-grade secrets management with automatic rotation and centralized control.
+
+## Key Points to Remember
+
+* Secrets store sensitive data separately from application code
+* Values are base64 encoded and can be encrypted at rest
+* Secret values are hidden in kubectl describe output for security
+* Can be consumed as environment variables or mounted as files
+* Use ConfigMaps for non-sensitive configuration data
+* For production workloads, consider advanced solutions like AWS Secrets Manager
+
diff --git a/website/docs/introduction/basics/configuration/secrets/tests/hook-ready.sh b/website/docs/introduction/basics/configuration/secrets/tests/hook-ready.sh
new file mode 100644
index 0000000000..bc4696290d
--- /dev/null
+++ b/website/docs/introduction/basics/configuration/secrets/tests/hook-ready.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+set -Eeuo pipefail
+
+before() {
+ echo "Waiting for catalog pod to be ready..."
+ kubectl wait --for=condition=ready pod/catalog-pod -n catalog --timeout=300s
+}
+
+after() {
+ echo "noop"
+}
+
+"$@"
\ No newline at end of file
diff --git a/website/docs/introduction/basics/index.md b/website/docs/introduction/basics/index.md
new file mode 100644
index 0000000000..a829ce2836
--- /dev/null
+++ b/website/docs/introduction/basics/index.md
@@ -0,0 +1,29 @@
+---
+title: Kubernetes Fundamentals
+sidebar_position: 60
+sidebar_custom_props: { "module": true }
+description: "Learn fundamental Kubernetes concepts, kubectl CLI, and package management tools."
+---
+
+# Kubernetes Fundamentals
+
+Kubernetes is the industry-standard platform for running containerized applications at scale. It automates deployment, scaling, and operations, letting you focus on your applications instead of infrastructure. In this lab, we’ll cover the core concepts of Kubernetes—pods, deployments, services, and more—so you can confidently build and manage cloud-native applications on Amazon EKS.
+
+:::tip Before you start
+Prepare your environment for this section:
+
+```bash timeout=300 wait=10
+$ prepare-environment introduction/basics
+```
+
+:::
+
+You'll be learning the following fundamental concepts in this lab:
+- **[Architecture](./architecture)** - Understand how Kubernetes and Amazon EKS work under the hood
+- **[Cluster Access](./access)** - Configure access and interact with clusters using kubectl and kubeconfig
+- **[Namespaces](./namespaces)** - Organize and isolate resources
+- **[Pods](./pods)** - The smallest deployable units in Kubernetes
+- **[Workload Management](./workload-management)** - Deployments, StatefulSets, DaemonSets, and Jobs
+- **[Services](./services)** - Enable network access and service discovery
+- **[Configuration](./configuration)** - ConfigMaps and Secrets for application settings
+- **[Package Management](./package-management)** - Kustomize and Helm for managing application
\ No newline at end of file
diff --git a/website/docs/introduction/basics/namespaces/index.md b/website/docs/introduction/basics/namespaces/index.md
new file mode 100644
index 0000000000..425bcf0271
--- /dev/null
+++ b/website/docs/introduction/basics/namespaces/index.md
@@ -0,0 +1,125 @@
+---
+title: Namespaces
+sidebar_position: 20
+---
+
+# Namespaces
+
+**Namespaces** provide a way to organize and isolate resources within a single Kubernetes cluster. Think on them as virtual clusters inside your physical cluster - they help you separate different applications, environments, or teams while sharing the same underlying infrastructure.
+
+You can think of namespaces like folders on your computer — they let you group related files (resources) without mixing them up.
+
+Namespaces provide:
+- **Organization:** Group related resources together (like all components of an application)
+- **Isolation:** Prevent resource conflicts between different applications or teams
+- **Resource Management:** Apply quotas and limits to specific groups of resources
+- **Access control:** Use Kubernetes permissions (called RBAC — Role-Based Access Control) to decide who can access or change resources.
+
+In this section, you'll explore how namespaces organize resources by working with the different components of our retail store application.
+
+### Default Namespaces
+Every Kubernetes cluster starts with several built-in namespaces. These are created automatically when a cluster is provisioned:
+
+- **default** - Where resources go if you don't specify a namespace
+- **kube-system** - System components like DNS and networking
+- **kube-public** - Publicly readable resources
+- **kube-node-lease** - Node heartbeat information
+
+```bash
+$ kubectl get namespaces
+NAME STATUS AGE
+default Active 1h
+kube-node-lease Active 1h
+kube-public Active 1h
+kube-system Active 1h
+```
+
+### Creating Your First Namespace
+Let's create a namespace for our retail store's UI component:
+
+::yaml{file="manifests/base-application/ui/namespace.yaml" paths="kind,metadata.name,metadata.labels" title="namespace.yaml"}
+
+1. `kind: Namespace`: Tells Kubernetes what type of resource to create.
+2. `metadata.name`: Unique identifier for this namespace within the cluster.
+3. `metadata.labels`: Key-value pairs that organize and categorize resources.
+
+Apply the configuration file using `kubectl`
+```bash
+$ kubectl apply -f ~/environment/eks-workshop/base-application/ui/namespace.yaml
+```
+
+You can also create namespaces directly using the `kubectl create` command. Let's create a namespace for our `catalog` service and add labels (labels are optional but helpful for organization):
+
+```bash
+$ kubectl create namespace catalog
+$ kubectl label namespace catalog app.kubernetes.io/created-by=eks-workshop
+```
+
+Let's inspect both namespaces:
+```bash
+$ kubectl get namespaces -l app.kubernetes.io/created-by=eks-workshop
+```
+
+The `-l` flag stands for "label selector" and filters resources based on their labels. In this case, we're only showing namespaces that have the label `app.kubernetes.io/created-by=eks-workshop`. This is useful for finding resources created by this workshop among all the namespaces in your cluster.
+
+Describe namespace
+```bash
+$ kubectl describe namespace ui
+Name: ui
+Labels: app.kubernetes.io/created-by=eks-workshop
+ kubernetes.io/metadata.name=ui
+Annotations:
+Status: Active
+
+No resource quota.
+
+No LimitRange resource.
+```
+
+### Using Namespaces
+When working with resources, you can specify the namespace in two ways:
+
+**Using the `-n` flag:**
+```bash
+$ kubectl get all -n ui
+```
+
+**Using the `--namespace` flag:**
+```bash
+$ kubectl get all --namespace ui
+```
+
+
+Tip: You can also see resources across all namespaces using the -A flag:
+
+```bash
+$ kubectl get pods -A
+```
+
+### Namespaces in this workshop
+In this workshop, namespaces help us separate the different microservices that make up our sample retail store application.
+
+- `ui` - Frontend user interface
+- `catalog` - Product catalog service
+- `carts` - Shopping cart service
+- `checkout` - Order processing service
+- `orders` - Order management service
+
+You'll see commands like this throughout the labs:
+```bash
+$ kubectl get pods -n ui
+$ kubectl get secrets -n catalog
+```
+
+This organization makes it easy to:
+* See which components belong to which service
+* Apply configurations to specific services
+* Troubleshoot issues within a particular service
+
+## Key Points to Remember
+* Namespaces organize and separate resources
+* Names must be unique within a namespace
+* Most resources are namespaced, some are cluster-wide
+* Some resources (like Nodes and PersistentVolumes) are not namespaced and exist at the cluster level.
+* Default namespace is used when none specified
+* Enable resource quotas and access control
\ No newline at end of file
diff --git a/website/docs/introduction/helm/index.md b/website/docs/introduction/basics/package-management/helm/index.md
similarity index 65%
rename from website/docs/introduction/helm/index.md
rename to website/docs/introduction/basics/package-management/helm/index.md
index 442af23774..ae19c76431 100644
--- a/website/docs/introduction/helm/index.md
+++ b/website/docs/introduction/basics/package-management/helm/index.md
@@ -1,29 +1,40 @@
---
title: Helm
-sidebar_custom_props: { "module": true }
-sidebar_position: 50
+sidebar_position: 20
+description: "Learn Kubernetes package management and templating with Helm charts."
---
-::required-time
+# Helm
-:::tip Before you start
-Prepare your environment for this section:
+[Helm](https://helm.sh) is a package manager for Kubernetes that helps you define, install, and upgrade Kubernetes applications. It uses a packaging format called charts, which contain all the necessary Kubernetes resource definitions to run an application. Helm simplifies the deployment and management of applications on Kubernetes clusters.
-```bash timeout=600 wait=10
-$ prepare-environment introduction/helm
-```
+While Kustomize excels at declarative configuration management, Helm takes a different approach focused on **templating** and **package management**. Helm is particularly valuable when you need to:
-:::
+- **Share applications** across teams and organizations
+- **Handle complex configurations** with conditional logic
+- **Manage application lifecycles** (install, upgrade, rollback)
+- **Leverage existing ecosystem** of pre-built charts
-Although we will primarily be interacting with Kustomize in this workshop, there will be situations where Helm will be used to install certain packages in the EKS cluster. In this lab we give a brief introduction to Helm, and we'll demonstrate how to use it to install a pre-packaged application.
+## Core Concepts
-:::info
+### Charts
+Helm uses a packaging format called charts. A chart is a collection of files that describe a related set of Kubernetes resources. A single chart might be used to deploy something simple, like a memcached pod, or something complex, like a full web app stack with HTTP servers, databases, caches, and so on.
-This lab does not cover the authoring of Helm charts for your own workloads. For more information on this topic see this [guide](https://helm.sh/docs/chart_template_guide/).
+A Helm package containing:
+- `Chart.yaml` - metadata about the chart
+- `values.yaml` - default configuration values
+- `templates/` - Kubernetes manifest templates
+- Optional dependencies and documentation
-:::
+### Releases
+A **release** is an instance of a chart running in a Kubernetes cluster. You can install the same chart multiple times with different configurations.
-[Helm](https://helm.sh) is a package manager for Kubernetes that helps you define, install, and upgrade Kubernetes applications. It uses a packaging format called charts, which contain all the necessary Kubernetes resource definitions to run an application. Helm simplifies the deployment and management of applications on Kubernetes clusters.
+### Values
+**Values** are the configuration parameters that customize how a chart behaves when installed.
+
+:::info
+This lab focuses on using Helm charts rather than authoring them. For chart development, see the [official guide](https://helm.sh/docs/chart_template_guide/).
+:::
## Helm CLI
@@ -91,7 +102,7 @@ There are two common ways to provide values to charts during installation:
Let's combine these methods to update our UI release. We'll use this `values.yaml` file:
```file
-manifests/modules/introduction/helm/values.yaml
+manifests/modules/introduction/basics/helm/values.yaml
```
This adds several custom Kubernetes annotations to the Pods, as well as overriding the UI theme.
@@ -110,7 +121,7 @@ $ helm upgrade ui \
--version 1.2.1 \
--create-namespace --namespace ui \
--set replicaCount=3 \
- --values ~/environment/eks-workshop/modules/introduction/helm/values.yaml \
+ --values ~/environment/eks-workshop/modules/introduction/basics/helm/values.yaml \
--wait
```
@@ -161,4 +172,25 @@ $ helm uninstall ui --namespace ui --wait
This will delete all the resources created by the chart for that release from our EKS cluster.
-Now that you understand how Helm works, proceed to the [Fundamentals module](/docs/fundamentals).
+## When to Use Helm
+
+**Helm is ideal when:**
+- You need complex templating and conditional logic
+- You're distributing applications to multiple teams
+- You want sophisticated release management
+- You're leveraging existing charts from the ecosystem
+- You need to support many different configuration scenarios
+
+## Key Takeaways
+
+- **Templating power**: Helm's Go templates enable complex, conditional configurations
+- **Release management**: Built-in support for upgrades, rollbacks, and release history
+- **Package ecosystem**: Large repository of pre-built charts for common applications
+- **Values-driven**: Configuration through structured values files and command-line overrides
+- **Lifecycle management**: Complete application lifecycle from install to uninstall
+
+Helm provides a powerful templating and package management solution for Kubernetes applications. It's particularly valuable when you need to distribute applications widely or handle complex configuration scenarios.
+
+Both Helm and Kustomize have their place in the Kubernetes ecosystem, and many teams use both tools for different use cases. Understanding both approaches will help you choose the right tool for each situation.
+
+Next, you can explore the [Fundamentals module](/docs/fundamentals) to dive deeper into EKS-specific concepts and advanced Kubernetes patterns.
\ No newline at end of file
diff --git a/website/docs/introduction/helm/tests/hook-install.sh b/website/docs/introduction/basics/package-management/helm/tests/hook-install.sh
similarity index 100%
rename from website/docs/introduction/helm/tests/hook-install.sh
rename to website/docs/introduction/basics/package-management/helm/tests/hook-install.sh
diff --git a/website/docs/introduction/helm/tests/hook-replicas.sh b/website/docs/introduction/basics/package-management/helm/tests/hook-replicas.sh
similarity index 100%
rename from website/docs/introduction/helm/tests/hook-replicas.sh
rename to website/docs/introduction/basics/package-management/helm/tests/hook-replicas.sh
diff --git a/website/docs/introduction/helm/tests/hook-suite.sh b/website/docs/introduction/basics/package-management/helm/tests/hook-suite.sh
similarity index 100%
rename from website/docs/introduction/helm/tests/hook-suite.sh
rename to website/docs/introduction/basics/package-management/helm/tests/hook-suite.sh
diff --git a/website/docs/introduction/basics/package-management/index.md b/website/docs/introduction/basics/package-management/index.md
new file mode 100644
index 0000000000..479df50449
--- /dev/null
+++ b/website/docs/introduction/basics/package-management/index.md
@@ -0,0 +1,60 @@
+---
+title: Package Management
+sidebar_position: 70
+description: "Learn about Kubernetes package management and deployment tools - Kustomize and Helm."
+---
+
+# Package Management
+
+As Kubernetes applications grow in complexity, managing multiple YAML files across different environments becomes challenging. **Package management tools** help you organize, customize, and deploy applications more efficiently.
+
+Kubernetes offers two primary approaches to solve these challenges:
+
+## Kustomize - Configuration Management
+**Kustomize** uses a patch-based approach to customize Kubernetes YAML files:
+
+- **Template-free**: Works with standard Kubernetes YAML
+- **Overlay-based**: Apply patches to base configurations
+- **Built into kubectl**: Native integration with `kubectl apply -k`
+- **GitOps friendly**: Excellent for declarative workflows
+
+**Best for**: Teams preferring pure YAML, simple customizations, and GitOps workflows.
+
+## Helm - Package Manager
+**Helm** uses templates to generate Kubernetes manifests:
+
+- **Templating**: Go templates with variables and functions
+- **Packaging**: Bundle applications into reusable charts
+- **Release management**: Install, upgrade, and rollback applications
+- **Large ecosystem**: Thousands of pre-built charts available
+
+**Best for**: Complex applications, sharing across teams, and leveraging existing charts.
+
+## Comparison
+
+| Feature | Kustomize | Helm |
+|---------|-----------|------|
+| **Approach** | Patch-based | Template-based |
+| **Learning Curve** | Gentler (standard YAML) | Steeper (template syntax) |
+| **Release Management** | Basic (via kubectl) | Advanced (install/upgrade/rollback) |
+| **Ecosystem** | Growing adoption | Mature with large chart library |
+| **GitOps** | Excellent | Good (with additional tools) |
+
+## When to Use Which?
+
+**Choose Kustomize when:**
+- You prefer standard Kubernetes YAML
+- Your customization needs are straightforward
+- You want tight kubectl integration
+
+**Choose Helm when:**
+- You need complex templating and conditional logic
+- You're distributing applications across teams
+- You want sophisticated release management
+
+Many teams use both tools together - Helm for complex third-party applications and Kustomize for simple internal services.
+
+## Explore Package Management
+
+- **[Kustomize](./kustomize)** - Learn patch-based configuration management
+- **[Helm](./helm)** - Master template-based package management
\ No newline at end of file
diff --git a/website/docs/introduction/kustomize/index.md b/website/docs/introduction/basics/package-management/kustomize/index.md
similarity index 68%
rename from website/docs/introduction/kustomize/index.md
rename to website/docs/introduction/basics/package-management/kustomize/index.md
index a947af906d..c4e47d1191 100644
--- a/website/docs/introduction/kustomize/index.md
+++ b/website/docs/introduction/basics/package-management/kustomize/index.md
@@ -1,29 +1,26 @@
---
title: Kustomize
-sidebar_custom_props: { "module": true }
-sidebar_position: 40
+sidebar_position: 10
+description: "Learn declarative configuration management with Kustomize for Kubernetes applications."
---
-::required-time
-
-:::tip Before you start
-Prepare your environment for this section:
-
-```bash timeout=300 wait=10
-$ prepare-environment
-```
-
-:::
+# Kustomize
[Kustomize](https://kustomize.io/) allows you to manage Kubernetes manifest files using declarative "kustomization" files. It provides the ability to express "base" manifests for your Kubernetes resources and then apply changes using composition, customization and easily making cross-cutting changes across many resources.
+**Key Benefits:**
+- **Template-free**: Works with standard Kubernetes YAML
+- **Declarative**: Define what you want, not how to get there
+- **Composable**: Build complex configurations from simple pieces
+- **Built-in**: Native integration with kubectl (`kubectl apply -k`)
+
For example, take a look at the following manifest file for the `checkout` Deployment:
```file
manifests/base-application/checkout/deployment.yaml
```
-This file has already been applied in the previous [Getting Started](../getting-started) lab, but let's say we wanted to scale this component horizontally by updating the `replicas` field using Kustomize. Rather than manually updating this YAML file, we'll use Kustomize to update the `spec/replicas` field from 1 to 3.
+let's say we wanted to scale this component horizontally by updating the `replicas` field using Kustomize. Rather than manually updating this YAML file, we'll use Kustomize to update the `spec/replicas` field from 1 to 3.
To do so, we'll apply the following kustomization.
@@ -32,20 +29,20 @@ To do so, we'll apply the following kustomization.
- Finally, the third tab shows just the diff of what has changed
```kustomization
-modules/introduction/kustomize/deployment.yaml
+modules/introduction/basics/kustomize/deployment.yaml
Deployment/checkout
```
You can generate the final Kubernetes YAML that applies this kustomization with the `kubectl kustomize` command, which invokes `kustomize` that is bundled with the `kubectl` CLI:
```bash
-$ kubectl kustomize ~/environment/eks-workshop/modules/introduction/kustomize
+$ kubectl kustomize ~/environment/eks-workshop/modules/introduction/basics/kustomize
```
This will generate a lot of YAML files, which represents the final manifests you can apply directly to Kubernetes. Let's demonstrate this by piping the output from `kustomize` directly to `kubectl apply`:
```bash
-$ kubectl kustomize ~/environment/eks-workshop/modules/introduction/kustomize | kubectl apply -f -
+$ kubectl kustomize ~/environment/eks-workshop/modules/introduction/basics/kustomize | kubectl apply -f -
namespace/checkout unchanged
serviceaccount/checkout unchanged
configmap/checkout unchanged
@@ -58,7 +55,7 @@ deployment.apps/checkout-redis unchanged
You'll notice that a number of different `checkout`-related resources are "unchanged", with the `deployment.apps/checkout` being "configured". This is intentional — we only want to apply changes to the `checkout` deployment. This happens because running the previous command actually applied two files: the Kustomize `deployment.yaml` that we saw above, as well as the following `kustomization.yaml` file which matches all files in the `~/environment/eks-workshop/base-application/checkout` folder. The `patches` field specifies the specific file to be patched:
```file
-manifests/modules/introduction/kustomize/kustomization.yaml
+manifests/modules/introduction/basics/kustomize/kustomization.yaml
```
To check that the number of replicas has been updated, run the following command:
@@ -76,7 +73,7 @@ Instead of using the combination of `kubectl kustomize` and `kubectl apply` we c
Let's try that:
```bash
-$ kubectl apply -k ~/environment/eks-workshop/modules/introduction/kustomize
+$ kubectl apply -k ~/environment/eks-workshop/modules/introduction/basics/kustomize
```
To reset the application manifests back to their initial state, you can simply apply the original set of manifests:
@@ -94,6 +91,23 @@ $ kubectl kustomize ~/environment/eks-workshop/base-application \
This uses `envsubst` to substitute environment variable placeholders in the Kubernetes manifest files with the actual values based on your particular environment. For example in some manifests we need to reference the EKS cluster name with `$EKS_CLUSTER_NAME` or the AWS region with `$AWS_REGION`.
-Now that you understand how Kustomize works, you can proceed to the [Helm module](/docs/introduction/helm) or go directly to the [Fundamentals module](/docs/fundamentals).
+## When to Use Kustomize
+
+**Kustomize is ideal when:**
+- You prefer working with standard Kubernetes YAML
+- You need simple to moderate customization
+- You want GitOps-friendly configurations
+- You're building internal applications with known requirements
+- You want to avoid templating complexity
+
+## Key Takeaways
+
+- **Declarative approach**: Define desired state, let Kustomize handle the details
+- **Composition over inheritance**: Build complex configurations from simple pieces
+- **No templating required**: Work with standard Kubernetes YAML
+- **Built-in kubectl support**: Native integration with `kubectl apply -k`
+- **GitOps friendly**: All configurations are version-controlled YAML files
+
+Kustomize provides a clean, declarative way to manage Kubernetes configurations without the complexity of templating. It's particularly effective for teams that prefer to work with standard Kubernetes YAML and need straightforward customization capabilities.
-To learn more about Kustomize, you can refer to the official Kubernetes [documentation](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization/).
+To learn more about Kustomize, you can refer to the official Kubernetes [documentation](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization/).
\ No newline at end of file
diff --git a/website/docs/introduction/basics/pods/index.md b/website/docs/introduction/basics/pods/index.md
new file mode 100644
index 0000000000..7b23b76934
--- /dev/null
+++ b/website/docs/introduction/basics/pods/index.md
@@ -0,0 +1,229 @@
+---
+title: Pods
+sidebar_position: 30
+---
+
+# Pods
+
+**Pods** are the smallest deployable units in Kubernetes. A Pod represents one or more containers that share storage, network, and configuration settings for how they should run together.
+
+Pods provide:
+- **Container grouping:** Usually, a pod runs a single container, but it can include multiple tightly coupled containers that need to share data or communicate over localhost.
+- **Shared networking:** All containers in a pod share the same IP address
+- **Shared storage:** Containers can share volumes within the pod
+- **Lifecycle management:** Containers in a pod live and die together
+- **Ephemeral nature:** Pods can be created, destroyed, and recreated
+
+In this lab, you'll learn about pods by creating a simple example pod and exploring its properties.
+
+### Creating a Pod
+
+Let's create a simple pod to understand how they work. The manifest defines a simple pod running the retail store UI container.
+
+::yaml{file="manifests/modules/introduction/basics/pods/ui-pod.yaml" paths="kind,metadata.name,metadata.namespace,spec.containers,spec.containers.0.name,spec.containers.0.image,spec.containers.0.ports,spec.containers.0.env,spec.containers.0.resources" title="ui-pod.yaml"}
+
+1. `kind: Pod`: Tells Kubernetes what type of resource to create
+2. `metadata.name`: Unique identifier for this pod within the namespace
+3. `metadata.namespace`: Which namespace the pod belongs to (ui namespace)
+4. `spec.containers`: Array defining what containers run in the pod
+5. `spec.containers.0.name`: Name of the first container (ui)
+6. `spec.containers.0.image`: Container image from ECR Public registry
+7. `spec.containers.0.ports`: Network ports the container exposes
+8. `spec.containers.0.env`: Environment variables for the container
+9. `spec.containers.0.resources`: CPU and memory allocation settings
+
+Apply the pod configuration:
+```bash
+$ kubectl apply -f ~/environment/eks-workshop/modules/introduction/basics/pods/ui-pod.yaml
+```
+
+Kubernetes will create the pod in the `ui` namespace and start pulling the container image.
+
+Wait for the pod to become ready:
+```bash
+$ kubectl wait --for=condition=Ready --timeout=60s -n ui pod/ui-pod
+```
+
+### Exploring Pod
+
+Now let's examine the pod we just created:
+
+```bash
+$ kubectl get pods -n ui
+NAME READY STATUS RESTARTS AGE
+ui-pod 1/1 Running 0 30s
+```
+
+Get detailed information about the pod:
+```bash
+$ kubectl describe pod -n ui ui-pod
+Name: ui-pod
+Namespace: ui
+Priority: 0
+Service Account: default
+Node: ip-10-42-144-0.us-west-2.compute.internal/10.42.144.0
+Start Time: Sun, 05 Oct 2025 19:28:02 +0000
+Labels: app.kubernetes.io/component=service
+ app.kubernetes.io/name=ui
+Annotations:
+Status: Running
+IP: 10.42.146.177
+IPs:
+ IP: 10.42.146.177
+Containers:
+ ui:
+ Container ID: containerd://01709a8abac99ce46842dda128752a68e828a485ee47f2094549fc00f9d71953
+ Image: public.ecr.aws/aws-containers/retail-store-sample-ui:1.2.1
+ Image ID: public.ecr.aws/aws-containers/retail-store-sample-ui@sha256:63a531dd3716cf9f6a3c7b54d65c39ce4de43cb23a613ac2933f2cb38aff86d7
+ Port: 8080/TCP
+ Host Port: 0/TCP
+ State: Running
+ Started: Sun, 05 Oct 2025 19:28:03 +0000
+ Ready: True
+ Restart Count: 0
+ Limits:
+ memory: 1536Mi
+ Requests:
+ cpu: 250m
+ memory: 1536Mi
+ Environment:
+ JAVA_OPTS: -XX:MaxRAMPercentage=75.0 -Djava.security.egd=file:/dev/urandom
+ Mounts:
+ /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-68xdw (ro)
+Conditions:
+ Type Status
+ PodReadyToStartContainers True
+ Initialized True
+ Ready True
+ ContainersReady True
+ PodScheduled True
+Volumes:
+ kube-api-access-68xdw:
+ Type: Projected (a volume that contains injected data from multiple sources)
+ TokenExpirationSeconds: 3607
+ ConfigMapName: kube-root-ca.crt
+ Optional: false
+ DownwardAPI: true
+QoS Class: Burstable
+Node-Selectors:
+Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
+ node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal Scheduled 10s default-scheduler Successfully assigned ui/ui-pod to ip-10-42-144-0.us-west-2.compute.internal
+ Normal Pulled 10s kubelet Container image "public.ecr.aws/aws-containers/retail-store-sample-ui:1.2.1" already present on machine
+ Normal Created 10s kubelet Created container: ui
+ Normal Started 10s kubelet Started container ui
+```
+
+This shows:
+- **Container specifications** - Image, ports, environment variables
+- **Resource usage** - CPU and memory requests/limits
+- **Events** - What happened during pod creation
+- **Status** - Current state and health
+
+View the pod's logs:
+```bash
+$ kubectl logs -n ui ui-pod
+Picked up JAVA_TOOL_OPTIONS:
+
+ . ____ _ __ _ _
+ /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \
+( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
+ \\/ ___)| |_)| | | | | || (_| | ) ) ) )
+ ' |____| .__|_| |_|_| |_\__, | / / / /
+ =========|_|==============|___/=/_/_/_/
+
+ :: Spring Boot :: (v3.4.4)
+
+2025-10-05T19:28:06.600Z INFO 1 --- [ main] c.a.s.u.UiApplication : Starting UiApplication v0.0.1-SNAPSHOT using Java 21.0.7 with PID 1 (/app/app.jar started by appuser in /app)
+2025-10-05T19:28:06.658Z INFO 1 --- [ main] c.a.s.u.UiApplication : The following 1 profile is active: "prod"
+2025-10-05T19:28:10.268Z INFO 1 --- [ main] i.o.i.s.a.OpenTelemetryAutoConfiguration : OpenTelemetry Spring Boot starter has been disabled
+
+2025-10-05T19:28:11.712Z INFO 1 --- [ main] o.s.b.a.e.w.EndpointLinksResolver : Exposing 4 endpoints beneath base path '/actuator'
+2025-10-05T19:28:14.045Z INFO 1 --- [ main] o.s.b.w.e.n.NettyWebServer : Netty started on port 8080 (http)
+2025-10-05T19:28:14.075Z INFO 1 --- [ main] c.a.s.u.UiApplication : Started UiApplication in 8.505 seconds (process running for 10.444)
+```
+
+> You’ll see the UI container starting up.
+
+Execute a command inside the pod:
+```bash hook=ready
+$ kubectl exec -n ui ui-pod -- curl -s localhost:8080/actuator/health
+{"status":"UP","groups":["liveness","readiness"]}
+```
+This should return the status of the application.
+
+### Accessing Pod
+
+You can access the pod from your local machine using port forwarding:
+```bash test=false
+$ kubectl port-forward -n ui ui-pod 8080:8080
+```
+
+:::info
+Port forwarding temporarily connects your local port to a port inside the pod, allowing you to access the application directly from your laptop.
+:::
+
+In the Workshop IDE, a popup appears to view all forwarded ports. Click to open applicaiton URL in the browser.
+
+Alternatively, open another terminal and test:
+```bash test=false
+$ curl localhost:8080
+```
+
+In the browser, You'll see the Retail store application landing page.
+
+Press `CTRL+C` to break `port-forward` session.
+
+### Deleting Pods
+
+When you no longer need a pod, you can delete it using the `kubectl delete` command. There are several ways to delete pods:
+
+**Method 1: Delete by name**
+```bash
+$ kubectl delete pod -n ui ui-pod
+pod "ui-pod" deleted
+```
+
+**Method 2: Delete using the manifest file**
+Let's recreate the `ui-pod` and delete using mainfest file.
+```bash
+$ kubectl apply -f ~/environment/eks-workshop/modules/introduction/basics/pods/ui-pod.yaml
+$ kubectl delete -f ~/environment/eks-workshop/modules/introduction/basics/pods/ui-pod.yaml
+pod "ui-pod" deleted
+```
+
+After deletion, verify the pod is gone:
+```bash
+$ kubectl get pods -n ui
+No resources found in ui namespace.
+```
+
+:::warning
+When you delete a pod directly, it's gone forever. The data inside the pod (unless stored in persistent volumes) is lost. In production environments, pods are typically managed by controllers like Deployments that automatically recreate them if needed.
+:::
+
+### Pod Lifecycle
+
+Pods have well-defined lifecycle phases that reflect their current state in the cluster.
+- **Pending** - Pod is being scheduled and containers are starting
+- **Running** - At least one container is running
+- **Succeeded** - All containers have completed successfully
+- **Failed** - At least one container has failed
+- **Unknown** - Pod state cannot be determined
+
+Kubernetes controllers continuously monitor pod states and take action (like restarting failed containers or recreating pods) to maintain desired application health.
+
+## Key Points to Remember
+
+* Pods are the smallest deployable units in Kubernetes
+* Usually contain one container, but can contain multiple
+* Share network and storage within the pod
+* Pods are ephemeral - they come and go
+* Typically managed by higher-level controllers like Deployments
+
+:::info
+In real-world scenarios, you rarely create pods directly — instead, you use higher-level resources like Deployments, ReplicaSets, or Jobs to manage them.
+:::
\ No newline at end of file
diff --git a/website/docs/introduction/basics/pods/tests/hook-ready.sh b/website/docs/introduction/basics/pods/tests/hook-ready.sh
new file mode 100644
index 0000000000..caa53a14b5
--- /dev/null
+++ b/website/docs/introduction/basics/pods/tests/hook-ready.sh
@@ -0,0 +1,25 @@
+set -Eeuo pipefail
+
+before() {
+ echo "Waiting for pod to be ready..."
+ kubectl wait --for=condition=Ready --timeout=60s -n ui pod/ui-pod
+
+ echo "Waiting for application to start listening on port 8080..."
+ for i in {1..30}; do
+ if kubectl exec -n ui ui-pod -- curl -s --connect-timeout 2 localhost:8080/actuator/health >/dev/null 2>&1; then
+ echo "Application is ready and responding on port 8080"
+ return 0
+ fi
+ echo "Attempt $i/30: Application not ready yet, waiting..."
+ sleep 2
+ done
+
+ echo "Application failed to become ready within 60 seconds"
+ exit 1
+}
+
+after() {
+ echo "noop"
+}
+
+"$@"
\ No newline at end of file
diff --git a/website/docs/introduction/basics/services/index.md b/website/docs/introduction/basics/services/index.md
new file mode 100644
index 0000000000..3e41d3993e
--- /dev/null
+++ b/website/docs/introduction/basics/services/index.md
@@ -0,0 +1,237 @@
+---
+title: Services
+sidebar_position: 50
+---
+
+# Services
+
+**Services** provide stable network endpoints for accessing pods. Since pods are ephemeral and can be created/destroyed frequently, services give you consistent DNS names and IP addresses for reliable communication.
+
+#### Why Services are important:
+Pods can come and go, so clients cannot reliably connect to them directly. Services:
+- **Provide stable networking:** IP and DNS names remain same even if pods change.
+- **Offer load balancing:** Automatically distribute requests across healthy pods
+- **Enable service discovery:** Other components can reach the service by name
+- **Provide pod abstraction:** Clients don’t need to know individual pod IPs
+- **Handle automatic updates:** Adjust endpoints as pods are created or destroyed
+
+In this lab, you'll create a service for the catalog component of our retail store and explore how services enable communication between pods.
+
+### Service Types
+
+Kubernetes provides different service types for various use cases:
+
+| Type | Purpose | Access |
+|------|---------|--------|
+| **ClusterIP** | Internal cluster communication | Cluster-only |
+| **NodePort** | External access via node ports | External |
+| **LoadBalancer** | External access via cloud load balancer | External |
+| **ExternalName** | Map to external DNS name | External |
+
+:::info
+A dedicated lab on **LoadBalancer services** is available later in this workshop. You will learn how to expose services externally using a cloud load balancer there.
+:::
+
+### Creating a Service
+
+Let's examine the UI service from our retail store:
+
+::yaml{file="manifests/base-application/ui/service.yaml" paths="kind,metadata.name,spec.type,spec.ports,spec.selector" title="service.yaml"}
+
+1. `kind: Service`: Creates a Service resource
+2. `metadata.name`: Name of the service (ui)
+3. `spec.type`: Service type (ClusterIP for internal access)
+4. `spec.ports`: Port mapping from service to pods
+5. `spec.selector`: Selects which pods receive traffic
+
+Deploy the service:
+```bash hook=ready
+$ kubectl apply -k ~/environment/eks-workshop/modules/introduction/basics/services/
+```
+
+### How Services Connect to Pods
+
+Services don't directly know about specific pods. Instead, they use **label selectors** to dynamically find pods that should receive traffic. This creates a flexible, loosely-coupled relationship.
+
+**Here's how it works:**
+
+1. **Pods have labels** - Key-value pairs that describe the pod
+2. **Services have selectors** - Criteria that match pod labels
+3. **Kubernetes automatically connects them** - Any pod matching the selector becomes an endpoint
+
+Let's see this in action with our UI service:
+
+```bash
+# Check the service selector
+$ kubectl get service -n ui ui -o jsonpath='{.spec.selector}' | jq
+{
+ "app.kubernetes.io/component": "service",
+ "app.kubernetes.io/instance": "ui",
+ "app.kubernetes.io/name": "ui"
+}
+```
+
+Now check which pods have matching labels:
+```bash
+# Look for pods with matching labels
+$ kubectl get pod -n ui -l app.kubernetes.io/component=service -o jsonpath='{.items[0].metadata.labels}{"\n"}' | jq
+{
+ "app.kubernetes.io/component": "service",
+ "app.kubernetes.io/created-by": "eks-workshop",
+ "app.kubernetes.io/instance": "ui",
+ "app.kubernetes.io/name": "ui",
+ "pod-template-hash": "5989474687"
+}
+```
+
+You'll see the UI pods have labels that match the service selector. This is how the service knows which pods to send traffic to.
+
+**The relationship is dynamic:**
+- When new pods start with matching labels, they automatically become service endpoints
+- When pods are deleted, they're automatically removed from the service
+- If you change a pod's labels, it can be added or removed from services
+
+This label-based system means:
+- **Services work with any workload controller** (Deployments, StatefulSets, etc.)
+- **Pods can belong to multiple services** if they match different selectors
+- **Services automatically adapt** as pods scale up or down
+
+### Exploring Your Service
+
+Check service status:
+```bash
+$ kubectl get service -n ui
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ui ClusterIP 172.20.83.84 80/TCP 15m
+```
+
+View service endpoints (the actual pod IPs):
+```bash
+$ kubectl get endpoints -n ui ui
+NAME ENDPOINTS AGE
+ui 10.42.1.15:8080 15m
+```
+> This shows which pods receive traffic
+
+Get detailed service information:
+```bash
+$ kubectl describe service -n ui ui
+Name: ui
+Namespace: ui
+Labels: app.kubernetes.io/component=service
+ app.kubernetes.io/created-by=eks-workshop
+ app.kubernetes.io/instance=ui
+ app.kubernetes.io/name=ui
+Annotations:
+Selector: app.kubernetes.io/component=service,app.kubernetes.io/instance=ui,app.kubernetes.io/name=ui
+Type: ClusterIP
+IP Family Policy: SingleStack
+IP Families: IPv4
+IP: 172.16.88.252
+IPs: 172.16.88.252
+Port: http 80/TCP
+TargetPort: http/TCP
+Endpoints: 10.42.129.33:8080
+Session Affinity: None
+Internal Traffic Policy: Cluster
+Events:
+```
+
+### Service Discovery
+
+Services enable automatic service discovery through DNS names:
+
+**Full DNS name format:**
+```
+..svc.cluster.local
+```
+
+**Examples from our retail store:**
+- `ui.ui.svc.cluster.local`
+- `catalog.catalog.svc.cluster.local`
+- `carts.carts.svc.cluster.local`
+
+**Short names within the same namespace:**
+```
+# From a pod in the ui namespace
+curl http://ui:80
+
+# From a different namespace, use the full name
+curl http://ui.ui.svc.cluster.local:80
+```
+
+### Testing Service Communication
+
+Let's test service discovery and communication by creating a test pod:
+
+```bash
+# Create a test pod for network testing
+$ kubectl run test-pod --image=curlimages/curl --restart=Never -- sleep 3600
+$ kubectl wait --for=condition=ready pod/test-pod --timeout=60s
+```
+
+```bash
+# Test DNS resolution from within the cluster
+$ kubectl exec test-pod -- nslookup ui.ui.svc.cluster.local
+Server: 172.16.0.10
+Address: 172.16.0.10:53
+
+
+Name: ui.ui.svc.cluster.local
+Address: 172.16.88.252
+```
+
+```bash
+# Test HTTP communication (shows the web page)
+$ kubectl exec test-pod -- curl -s http://ui.ui.svc.cluster.local/actuator/info | jq
+{
+ "pod": {
+ "name": "ui-6db5f6bd84-cx4mg"
+ }
+}
+```
+
+### Load Balancing
+
+Services automatically distribute traffic across all healthy pods that match their selector:
+
+**Scale the UI deployment to see load balancing:**
+```bash hook=replicas
+$ kubectl scale deployment -n ui ui --replicas=3
+```
+
+**Watch how the service endpoints update:**
+```bash
+$ kubectl get endpoints -n ui ui
+NAME ENDPOINTS AGE
+ui 10.42.117.212:8080,10.42.129.33:8080,10.42.174.4:8080 11m
+```
+
+You'll now see multiple pod IPs listed as endpoints - the service automatically discovered the new pods because they have matching labels.
+
+**Test load balancing:**
+```bash
+# Make multiple requests to see load balancing in action (single line)
+$ for i in $(seq 1 5); do printf "Request %d:" "$i"; kubectl exec test-pod -- curl -s http://ui.ui.svc.cluster.local/actuator/info; echo; sleep 1; done
+Request 1:{"pod":{"name":"ui-6db5f6bd84-xgpf4"}}
+Request 2:{"pod":{"name":"ui-6db5f6bd84-cx4mg"}}
+Request 3:{"pod":{"name":"ui-6db5f6bd84-7bq8w"}}
+Request 4:{"pod":{"name":"ui-6db5f6bd84-7bq8w"}}
+Request 5:{"pod":{"name":"ui-6db5f6bd84-cx4mg"}}
+```
+
+You'll see requests distributed across different pod hostnames, demonstrating how the service load balances across all matching pods.
+
+```bash
+# Clean up the test pod
+$ kubectl delete pod test-pod
+```
+
+## Key Points to Remember
+
+* Services provide stable network endpoints for ephemeral pods
+* ClusterIP services enable internal cluster communication
+* Services use label selectors to find target pods
+* DNS names follow the pattern: service.namespace.svc.cluster.local
+* Services automatically load balance traffic across healthy pods
+* Use port forwarding to test services locally
\ No newline at end of file
diff --git a/website/docs/introduction/basics/services/tests/hook-ready.sh b/website/docs/introduction/basics/services/tests/hook-ready.sh
new file mode 100755
index 0000000000..26558871db
--- /dev/null
+++ b/website/docs/introduction/basics/services/tests/hook-ready.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+set -Eeuo pipefail
+
+before() {
+ echo "noop"
+}
+
+after() {
+ echo "Waiting for UI deployment to be available..."
+
+ kubectl wait --for=condition=available deployment/ui -n ui --timeout=300s
+
+ echo "Waiting for UI service endpoints..."
+ kubectl wait --for=jsonpath='{.subsets[0].addresses[0].ip}' endpoints/ui -n ui --timeout=300s
+}
+
+"$@"
\ No newline at end of file
diff --git a/website/docs/introduction/basics/services/tests/hook-replicas.sh b/website/docs/introduction/basics/services/tests/hook-replicas.sh
new file mode 100644
index 0000000000..164af15443
--- /dev/null
+++ b/website/docs/introduction/basics/services/tests/hook-replicas.sh
@@ -0,0 +1,23 @@
+set -Eeuo pipefail
+
+before() {
+ echo "noop"
+}
+
+after() {
+ kubectl rollout status -n ui deployment/ui --timeout=60s
+
+ # Wait for all 3 pods to be ready
+ kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=ui -n ui --timeout=60s
+
+ POD_COUNT=$(kubectl get pod -n ui -l app.kubernetes.io/name=ui -o json | jq -r ".items | length")
+
+ if [[ $POD_COUNT -eq 3 ]]; then
+ exit 0
+ fi
+
+ >&2 echo "There should be 3 pods running"
+ exit 1
+}
+
+"$@"
\ No newline at end of file
diff --git a/website/docs/introduction/basics/workload-management/daemonsets.md b/website/docs/introduction/basics/workload-management/daemonsets.md
new file mode 100644
index 0000000000..012a7af8a9
--- /dev/null
+++ b/website/docs/introduction/basics/workload-management/daemonsets.md
@@ -0,0 +1,121 @@
+---
+title: DaemonSets
+sidebar_position: 33
+---
+
+# DaemonSets
+
+**DaemonSets** ensure that a copy of a pod runs on **every node** (or a subset of nodes) in your cluster. They are ideal for system-level services that must operate on all nodes, such as logging, monitoring, and network agents.
+
+Key benefits:
+- **Cover all nodes** - One Pod per node
+- **Scale automatically with nodes** - New nodes get pods, removed nodes lose pods
+- **Run system services** - Ideal for logging, monitoring, and networking
+- **Target specific nodes** - Using selectors or affinity
+- **Access host resources** - Like logs, metrics, and system files
+
+## When to Use DaemonSets
+Daemonsets are perfect for services that need to run on every node or a subset of nodes:
+- **Log collectors** - Fluentd, Filebeat, Fluent Bit
+- **Monitoring agents** - Node Exporter, Datadog agent, New Relic
+- **Network plugins** - CNI plugins, load balancer controllers
+- **Security agents** - Antivirus scanners, compliance tools
+- **Storage daemons** - Distributed storage agents
+
+## Deploying a DaemonSet
+
+Let's create a simple log collector DaemonSet that runs on all nodes and collects logs from the host filesystem:
+
+::yaml{file="manifests/modules/introduction/basics/daemonsets/log-collector.yaml" paths="kind,metadata.name,spec.selector,spec.template.spec.containers.0.volumeMounts,spec.template.spec.volumes" title="log-collector.yaml"}
+
+1. `kind: DaemonSet`: Creates a DaemonSet controller
+2. `metadata.name`: Name of the DaemonSet (`log-collector`)
+3. `spec.selector`: How DaemonSet finds its pods (by labels)
+4. `spec.template.spec.containers.0.volumeMounts`: How container accesses node files
+5. `spec.template.spec.volumes`: Host paths for accessing node logs
+
+Key DaemonSet characteristics:
+- No `replicas` field - Kubernetes automatically runs one pod per node
+- Pods automatically scale as nodes are added or removed.
+- `hostPath` volumes allow Pods to access node files, if required.
+- Typically deployed in `kube-system` namespace for system services, but can run in other namespaces.
+
+Deploy the DaemonSet:
+```bash
+$ kubectl apply -f ~/environment/eks-workshop/modules/introduction/basics/daemonsets/log-collector.yaml
+```
+
+## Inspecting Your DaemonSet
+
+Check DaemonSet status:
+```bash
+$ kubectl get daemonset -n kube-system
+NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE AGE
+log-collector 3 3 3 3 3 2m
+```
+> You'll see output showing desired vs current pods:
+
+View the pods across all nodes:
+```bash
+$ kubectl get pods -n kube-system -l app=log-collector -o wide
+NAME READY STATUS NODE AGE
+log-collector-abc12 1/1 Running ip-10-42-1-1 2m
+log-collector-def34 1/1 Running ip-10-42-2-1 2m
+log-collector-ghi56 1/1 Running ip-10-42-3-1 2m
+```
+> Notice one pod per node
+
+## Node Selection
+
+Target specific nodes using nodeSelector:
+
+```yaml
+spec:
+ template:
+ spec:
+ nodeSelector:
+ node-type: worker
+ containers:
+ - name: monitoring-agent
+ image: monitoring:latest
+```
+
+Or use nodeAffinity for more complex rules:
+
+```yaml
+spec:
+ template:
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/arch
+ operator: In
+ values:
+ - amd64
+```
+Use nodeSelector for simple label matches and nodeAffinity for more complex scheduling requirements.
+
+## DaemonSets vs Other Controllers
+
+| Controller | Purpose | Replica Count | Node Placement | Use Case |
+|------------|---------|---------------|----------------|----------|
+| DaemonSet | One Pod per node | Automatic | All nodes or subset | System services |
+| Deployment | Multiple interchangeable Pods | Configurable | Any node | Stateless apps |
+| StatefulSet | Pods with stable identity | Configurable | Any node | Stateful apps |
+
+:::info
+DaemonSets are ideal for services that must run on every node or a specific set of nodes.
+:::
+
+## Key Points to Remember
+
+* DaemonSets automatically run one pod per node
+* Perfect for system-level services like logging and monitoring
+* No need to specify replica count - it's automatic
+* Can access node resources through hostPath volumes
+* Use node selectors to target specific nodes
+* Pods are automatically added/removed as nodes join/leave
+* Ideal for consistent system functionality across all nodes
\ No newline at end of file
diff --git a/website/docs/introduction/basics/workload-management/deployments.md b/website/docs/introduction/basics/workload-management/deployments.md
new file mode 100644
index 0000000000..cf371cf325
--- /dev/null
+++ b/website/docs/introduction/basics/workload-management/deployments.md
@@ -0,0 +1,119 @@
+---
+title: Deployments
+sidebar_position: 31
+---
+
+# Deployments
+
+**Deployments** are the most common workload controller for running stateless applications. They make sure your application always runs the desired number of Pods - automatically handling creation, scaling, updates, and recovery.
+
+Instead of managing Pods manually, Deployments let Kubernetes:
+- **Run multiple identical Pods** for reliability and load distribution
+- **Scale automatically** by adjusting replica counts
+- **Recover failed Pods** without manual intervention
+- **Perform rolling** updates without downtime
+- **Rollback easily** to previous versions when needed
+
+### Creating a Deployment
+
+Let's deploy the retail store UI using a deployment:
+
+::yaml{file="manifests/base-application/ui/deployment.yaml" paths="kind,metadata.name,spec.replicas,spec.selector,spec.template" title="deployment.yaml"}
+
+1. `kind: Deployment`: Defines a Deployment controller
+2. `metadata.name`: Name of the Deployment (ui)
+3. `spec.replicas`: Desired number of pods (1 in this example)
+4. `spec.selector`: Labels used to find managed Pods
+5. `spec.template`: Pod template defining what each pod should looks like
+
+The deployment ensures that the actual Pods always match this template.
+
+Apply the Deployments:
+```bash
+$ kubectl apply -k ~/environment/eks-workshop/base-application/ui
+```
+
+### Inspecting Deployment
+
+Check deployment status:
+```bash
+$ kubectl get deployment -n ui
+NAME READY UP-TO-DATE AVAILABLE AGE
+ui 1/1 1 1 30s
+```
+
+List the Pods created by the Deployment:
+```bash
+$ kubectl get pods -n ui
+NAME READY STATUS RESTARTS AGE
+ui-6d5bb7b9c8-xyz12 1/1 Running 0 30s
+```
+
+Get detailed information:
+```bash
+$ kubectl describe deployment -n ui ui
+```
+
+### Scaling Deployment
+
+Scale up to 5 replicas:
+```bash
+$ kubectl scale deployment -n ui ui --replicas=5
+$ kubectl get pods -n ui
+NAME READY STATUS RESTARTS AGE
+ui-6d5bb7b9c8-abc12 1/1 Running 0 2m
+ui-6d5bb7b9c8-def34 1/1 Running 0 12s
+ui-6d5bb7b9c8-ghi56 1/1 Running 0 12s
+ui-6d5bb7b9c8-arx97 1/1 Running 0 10s
+ui-6d5bb7b9c8-uiv85 1/1 Running 0 10s
+```
+
+:::info
+Kubernetes automatically spreads these Pods across available worker nodes for high availability.
+:::
+
+Scale back down to 3 replicas:
+```bash
+$ kubectl scale deployment -n ui ui --replicas=3
+$ kubectl get pods -n ui
+NAME READY STATUS RESTARTS AGE
+ui-6d5bb7b9c8-abc12 1/1 Running 0 2m
+ui-6d5bb7b9c8-def34 1/1 Running 0 12s
+ui-6d5bb7b9c8-ghi56 1/1 Running 0 12s
+```
+
+### Rolling Updates and Rollbacks
+You can update a Deployment by changing the image version:
+```bash
+$ kubectl set image deployment/ui ui=public.ecr.aws/aws-containers/retail-store-sample-ui:v2 -n ui
+$ kubectl get pods -n ui
+NAME READY STATUS RESTARTS AGE
+ui-5989474687-5gcbt 1/1 Running 0 13m
+ui-5989474687-dhk6q 1/1 Running 0 14s
+ui-5989474687-dw8x8 1/1 Running 0 14s
+ui-7c65b44b7c-znm9c 0/1 ErrImagePull 0 7s
+```
+> You'll see a new pod created but with status `ErrImagePull`.
+
+Now let's rollback the change
+```bash
+$ kubectl rollout undo deployment/ui -n ui
+$ kubectl get pods -n ui
+NAME READY STATUS RESTARTS AGE
+ui-5989474687-5gcbt 1/1 Running 0 13m
+ui-5989474687-dhk6q 1/1 Running 0 14s
+ui-5989474687-dw8x8 1/1 Running 0 14s
+```
+
+Rolling updates let you update your application gradually without downtime, while Kubernetes ensures new Pods match the desired state.
+If something goes wrong — like an invalid image — you can rollback safely to the previous working version, keeping your application available and stable.
+
+This demonstrates how Deployments simplify application updates, maintain availability, and reduce risk in production environments.
+
+### Key Points to Remember
+
+* Deployments manage multiple identical pods automatically
+* Use deployments instead of creating pods directly in production
+* Scaling is as simple as changing the replica count
+* Pod names include the deployment name plus random suffixes
+* Deployments are perfect for stateless applications like web apps and APIs
diff --git a/website/docs/introduction/basics/workload-management/index.md b/website/docs/introduction/basics/workload-management/index.md
new file mode 100644
index 0000000000..da890af70d
--- /dev/null
+++ b/website/docs/introduction/basics/workload-management/index.md
@@ -0,0 +1,117 @@
+---
+title: Workload Management
+sidebar_position: 40
+---
+
+# Workload Management
+While you can create individual pods directly, in production you rarely manage pods manually. Instead, you use **workload controllers** - higher-level Kubernetes resources that create and manage pods according to different application patterns.
+
+Think of workload controllers as smart managers that:
+- **Create pods** based on templates you define
+- **Monitor pod health** and replace failed instances
+- **Handle scaling** up and down based on demand
+- **Manage updates** with strategies like rolling deployments
+- **Provide specialized behavior** for different application types
+
+## Types of Workload Controllers
+Kubernetes provides several workload controllers, each designed for specific use cases:
+
+- **Deployments** manage multiple identical pods for stateless applications. They handle scaling, rolling updates, and automatic replacement of failed pods. Perfect for web applications where any pod can handle any request.
+- **ReplicaSets** ensure a specified number of identical pods are running at any time. While you rarely create ReplicaSets directly, they're the building blocks that Deployments use under the hood to manage pods.
+- **StatefulSets** provide stable identities and persistent storage for stateful applications. Each pod gets a unique name (like `mysql-0`, `mysql-1`) and its own persistent volume. Essential for databases and clustered applications.
+- **DaemonSets** ensure exactly one pod runs on each node (or selected nodes). Great for system-level services like log collectors or monitoring agents that need to run everywhere in your cluster.
+- **Jobs** run pods until they complete successfully, then stop. Unlike other controllers, they don't restart completed pods. Ideal for one-time tasks like data migrations or batch processing.
+- **CronJobs** create Jobs on a schedule using familiar cron syntax. They're perfect for recurring tasks like backups, report generation, or cleanup operations.
+
+## Understanding the Controller Hierarchy
+
+It's helpful to understand how these controllers relate to each other:
+
+**Deployment → ReplicaSet → Pods**
+
+When you create a Deployment, here's what happens:
+1. **Deployment** creates and manages ReplicaSets
+2. **ReplicaSet** creates and manages the actual Pods
+3. **Pods** run your application containers
+
+This layered approach enables powerful features:
+- **Rolling updates**: Deployments create new ReplicaSets while gradually scaling down old ones
+- **Rollbacks**: Deployments can switch back to previous ReplicaSet versions
+- **Scaling**: Changes to replica count flow through ReplicaSets to Pods
+
+You'll often see ReplicaSets when debugging (like `kubectl get rs`), but you typically manage them indirectly through Deployments.
+
+### Why Use Workload Controllers?
+
+**Managing pods directly:**
+- Manual pod replacement when they fail
+- No built-in scaling mechanisms
+- Complex update procedures
+- No rollback capabilities
+- Production management becomes difficult
+
+**Using workload controllers:**
+- Automatic pod replacement and healing
+- Easy scaling with a single command
+- Rolling updates with zero downtime
+- Simple rollback to previous versions
+- Production-ready management
+
+| Controller | Purpose | Best For |
+|------------|---------|----------|
+| **Deployments** | Stateless applications | Web apps, APIs, microservices |
+| **ReplicaSets** | Maintain pod replicas | Usually managed by Deployments |
+| **StatefulSets** | Stateful applications | Databases, message queues |
+| **DaemonSets** | Node-level services | Logging agents, monitoring |
+| **Jobs** | Run-to-completion tasks | Data migration, batch processing |
+| **CronJobs** | Scheduled tasks | Backups, reports, cleanup |
+
+### Choosing the Right Workload Controller
+
+Ask yourself these questions to pick the right controller:
+
+**What type of application am I running?**
+
+- **Web app, API, or microservice?** → Use **Deployment**
+ - Pods are interchangeable and stateless
+ - Can run multiple identical copies
+ - Example: Our retail store UI, catalog service
+
+- **Database or message queue?** → Use **StatefulSet**
+ - Needs persistent storage
+ - Requires stable network identity
+ - Example: MySQL database, Kafka cluster
+
+- **System service on every node?** → Use **DaemonSet**
+ - Monitoring, logging, or networking
+ - One pod per node automatically
+ - Example: Log collector, node monitoring
+
+- **One-time task or batch job?** → Use **Job**
+ - Runs until completion
+ - Database migration, data processing
+ - Example: Import product catalog
+
+- **Recurring scheduled task?** → Use **CronJob**
+ - Runs on a schedule (like cron)
+ - Backups, reports, cleanup
+ - Example: Daily sales report generation
+
+## Key Points to Remember
+
+* Different workload controllers serve different application patterns
+* Deployments are for stateless applications that can have identical replicas
+* StatefulSets are for stateful applications that need persistent identity
+* DaemonSets ensure pods run on every node for system-level services
+* Jobs run tasks to completion, CronJobs run them on schedule
+* Choose the right controller based on your application's requirements
+
+## Explore Each Workload Type
+
+Now that you have an overview of workload controllers, dive deeper into each type:
+
+- **[Deployments](./deployments)** - Learn to deploy and manage stateless applications like our retail store UI
+- **[StatefulSets](./statefulsets)** - Understand how to run stateful applications like databases with persistent storage
+- **[DaemonSets](./daemonsets)** - Explore system-level services that run on every node
+- **[Jobs & CronJobs](./jobs)** - Master batch processing and scheduled tasks
+
diff --git a/website/docs/introduction/basics/workload-management/jobs.md b/website/docs/introduction/basics/workload-management/jobs.md
new file mode 100644
index 0000000000..d6227de864
--- /dev/null
+++ b/website/docs/introduction/basics/workload-management/jobs.md
@@ -0,0 +1,404 @@
+---
+title: Jobs & CronJobs
+sidebar_position: 34
+---
+
+# Jobs & CronJobs
+
+**Jobs** and **CronJobs** are controllers for running **finite or recurring tasks**. Unlike Deployments or StatefulSets that keep pods running continuously, Jobs run tasks to completion, and CronJobs run Jobs on a schedule.
+
+Key benefits:
+- **Run to completion** - Pods finish the task and stop
+- **Retry failed tasks** - Automatically retry based on backoff policy
+- **Parallel execution** - Multiple Pods can run simultaneously
+- **Scheduled tasks** - CronJobs run tasks at specific times
+- **Track history** - Monitor successful and failed completions
+
+## When to Use Jobs & CronJobs
+
+**Use Jobs for:**
+- Database migrations and schema updates
+- Data processing and ETL operations
+- One-time setup tasks and initialization
+- Backup operations and file processing
+
+**Use CronJobs for:**
+- Regular backups (daily, weekly)
+- Cleanup tasks and log rotation
+- Report generation and data synchronization
+- Periodic health checks and monitoring
+
+## Deploying a Job
+
+Let's create a data processing job:
+
+::yaml{file="manifests/modules/introduction/basics/jobs/data-processing-job.yaml" paths="kind,metadata.name,spec.completions,spec.backoffLimit,spec.template.spec.restartPolicy" title="data-processing-job.yaml"}
+
+1. `kind: Job`: Creates a Job controller
+2. `metadata.name`: Name of the job (data-processor)
+3. `spec.completions`: Number of successful completions needed (1)
+4. `spec.backoffLimit`: Maximum retry attempts (3)
+5. `spec.template.spec.restartPolicy`: Pods never restart on failure; the Job controller handles retries
+
+Deploy the job:
+```bash
+$ kubectl apply -f ~/environment/eks-workshop/modules/introduction/basics/jobs/data-processing-job.yaml
+```
+
+## Inspecting Job
+
+Check job status:
+```bash
+$ kubectl get jobs -n catalog
+NAME COMPLETIONS DURATION AGE
+data-processor 1/1 15s 1m
+```
+
+View the job's pod:
+```bash
+$ kubectl get pods -n catalog -l job-name=data-processor
+NAME READY STATUS RESTARTS AGE
+data-processor-h7mg7 0/1 Completed 0 25s
+```
+
+Wait for the job to complete:
+```bash
+$ kubectl wait --for=condition=complete --timeout=60s job/data-processor -n catalog
+```
+
+Check job logs to see the processing output:
+```bash
+$ kubectl logs -n catalog job/data-processor
+Starting data processing job...
+Processing catalog data files...
+Processing file 1/5...
+File 1 processed successfully
+...
+Data processing job completed successfully!
+```
+
+Get detailed job information:
+```bash
+$ kubectl describe job -n catalog data-processor
+Name: data-processor
+Namespace: catalog
+Selector: batch.kubernetes.io/controller-uid=639c46e3-ee04-4914-8c97-516a14087c1d
+Labels: app.kubernetes.io/created-by=eks-workshop
+ app.kubernetes.io/name=data-processor
+Annotations:
+Parallelism: 1
+Completions: 1
+Completion Mode: NonIndexed
+Suspend: false
+Backoff Limit: 3
+Start Time: Sun, 05 Oct 2025 18:51:01 +0000
+Completed At: Sun, 05 Oct 2025 18:51:14 +0000
+Duration: 13s
+Pods Statuses: 0 Active (0 Ready) / 1 Succeeded / 0 Failed
+Pod Template:
+ Labels: app=data-processor
+ batch.kubernetes.io/controller-uid=639c46e3-ee04-4914-8c97-516a14087c1d
+ batch.kubernetes.io/job-name=data-processor
+ controller-uid=639c46e3-ee04-4914-8c97-516a14087c1d
+ job-name=data-processor
+ Containers:
+ processor:
+ Image: busybox:1.36
+ Port:
+ Host Port:
+ Command:
+ /bin/sh
+ -c
+ echo "Starting data processing job..."
+ echo "Processing catalog data files..."
+
+ # Simulate processing multiple files
+ for i in $(seq 1 5); do
+ echo "Processing file $i/5..."
+ sleep 2
+ echo "File $i processed successfully"
+ done
+
+ echo "Generating summary report..."
+ cat > /tmp/processing-report.txt << EOF
+ Data Processing Report
+ =====================
+ Job: data-processor
+ Date: $(date)
+ Files processed: 5
+ Status: Completed successfully
+ EOF
+
+ echo "Report generated:"
+ cat /tmp/processing-report.txt
+ echo "Data processing job completed successfully!"
+
+ Limits:
+ cpu: 200m
+ memory: 256Mi
+ Requests:
+ cpu: 100m
+ memory: 128Mi
+ Environment:
+ Mounts:
+ Volumes:
+ Node-Selectors:
+ Tolerations:
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal SuccessfulCreate 60s job-controller Created pod: data-processor-h7mg7
+ Normal Completed 47s job-controller Job completed
+```
+
+## Deploying a CronJob
+
+Let's create a cleanup CronJob that runs every 1 minutes:
+
+::yaml{file="manifests/modules/introduction/basics/jobs/catalog-cleanup.yaml" paths="kind,metadata.name,spec.schedule,spec.jobTemplate" title="catalog-cleanup.yaml"}
+
+1. `kind: CronJob`: Creates a CronJob controller
+2. `metadata.name`: Name of the CronJob (`catalog-cleanup`)
+3. `spec.schedule`: Cron schedule (`*/1 * * * *` = every 1 minutes)
+4. `spec.jobTemplate`: Template for jobs that will be created
+
+Deploy the CronJob:
+```bash
+$ kubectl apply -f ~/environment/eks-workshop/modules/introduction/basics/jobs/catalog-cleanup.yaml
+```
+
+## Managing CronJobs
+
+View CronJobs:
+```bash
+$ kubectl get cronjobs -n catalog
+NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE
+catalog-cleanup */1 * * * * False 0 30s
+```
+
+Initially, `LAST SCHEDULE` shows `` because the CronJob hasn't run yet. Since our CronJob runs every minute, let's manually trigger it to see it in action immediately:
+
+```bash
+# Manually trigger a CronJob to see it work immediately
+$ kubectl create job --from=cronjob/catalog-cleanup manual-cleanup -n catalog
+```
+
+Now view jobs created by the CronJob:
+```bash
+$ kubectl get jobs -n catalog
+NAME STATUS COMPLETIONS DURATION AGE
+data-processor Complete 1/1 13s 17m
+manual-cleanup Running 0/1 5s 5s
+```
+
+Wait for the job pod to be running before checking logs:
+```bash
+$ kubectl wait --for=jsonpath='{.status.phase}'=Running pod -l job-name=manual-cleanup -n catalog --timeout=60s
+```
+
+Check the logs of the job execution:
+```bash
+$ kubectl logs job/manual-cleanup -n catalog
+Starting cleanup job at Mon Oct 5 17:30:00 UTC 2025
+Checking for temporary files...
+Found 3 temporary files to clean up:
+ - /tmp/cache_file_1.tmp
+ - /tmp/cache_file_2.tmp
+ - /tmp/old_log.log
+Cleaning up temporary files...
+Temporary files removed successfully
+Cleanup completed at Mon Oct 5 17:30:05 UTC 2025
+Next cleanup scheduled in 1 minute
+```
+
+Wait for the CronJob to run automatically (or check back in 1 minute):
+```bash
+# Check if the CronJob has run automatically
+$ kubectl get cronjobs -n catalog
+NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE
+catalog-cleanup */1 * * * * False 0 30s 2m
+```
+
+View all jobs in the namespace (including those created by CronJobs):
+```bash
+$ kubectl get jobs -n catalog
+NAME STATUS COMPLETIONS DURATION AGE
+catalog-cleanup-29328191 Complete 1/1 9s 114s
+catalog-cleanup-29328192 Complete 1/1 9s 54s
+data-processor Complete 1/1 13s 21m
+manual-cleanup Complete 1/1 10s 56s
+```
+
+To see which jobs were created by a specific CronJob, look for jobs with names starting with the CronJob name:
+```bash hook=cronjob-first-run
+$ kubectl get jobs -n catalog | grep catalog-cleanup
+catalog-cleanup-29328192 Complete 1/1 9s 74s
+catalog-cleanup-29328193 Complete 1/1 8s 14s
+```
+
+You can also check the job's owner reference to see which CronJob created it:
+```bash
+$ kubectl get job manual-cleanup -n catalog -o yaml | grep -A 5 ownerReferences
+ ownerReferences:
+ - apiVersion: batch/v1
+ controller: true
+ kind: CronJob
+ name: catalog-cleanup
+ uid: 7f2deb86-a5c7-4703-ac5e-c5dd4893ff23
+```
+
+Clean up the manual job:
+```bash
+$ kubectl delete job manual-cleanup -n catalog
+```
+
+### Monitoring CronJob Execution
+
+Check CronJob status and history:
+```bash
+$ kubectl describe cronjob catalog-cleanup -n catalog
+Name: catalog-cleanup
+Namespace: catalog
+Labels: app.kubernetes.io/created-by=eks-workshop
+ app.kubernetes.io/name=catalog-cleanup
+Annotations:
+Schedule: */1 * * * *
+Concurrency Policy: Allow
+Suspend: False
+Successful Job History Limit: 3
+Failed Job History Limit: 1
+Starting Deadline Seconds:
+Selector:
+Parallelism:
+Completions:
+Pod Template:
+ Labels: app=catalog-cleanup
+ Containers:
+ cleanup:
+ Image: busybox:1.36
+ Port:
+ Host Port:
+ Command:
+ /bin/sh
+ -c
+ echo "Starting cleanup job at $(date)"
+ echo "Checking for temporary files..."
+
+ # Simulate finding and cleaning up files
+ echo "Found 3 temporary files to clean up:"
+ echo " - /tmp/cache_file_1.tmp"
+ echo " - /tmp/cache_file_2.tmp"
+ echo " - /tmp/old_log.log"
+
+ # Simulate cleanup process
+ sleep 3
+ echo "Cleaning up temporary files..."
+ sleep 2
+ echo "Temporary files removed successfully"
+
+ echo "Cleanup completed at $(date)"
+ echo "Next cleanup scheduled in 1 minute"
+
+ Limits:
+ cpu: 100m
+ memory: 128Mi
+ Requests:
+ cpu: 50m
+ memory: 64Mi
+ Environment:
+ Mounts:
+ Volumes:
+ Node-Selectors:
+ Tolerations:
+Last Schedule Time: Sun, 05 Oct 2025 19:14:00 +0000
+Active Jobs: catalog-cleanup-29328194
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal SuccessfulCreate 19m cronjob-controller Created job catalog-cleanup-29328175
+ Normal SawCompletedJob 18m cronjob-controller Saw completed job: catalog-cleanup-29328175, condition: Complete
+ ...
+```
+
+This shows:
+- **Schedule**: When the job runs
+- **Last Schedule Time**: When it last executed
+- **Active**: Currently running jobs
+- **Events**: Recent CronJob activity
+
+View recent events for troubleshooting:
+```bash
+$ kubectl get events -n catalog --field-selector involvedObject.name=catalog-cleanup
+LAST SEEN TYPE REASON OBJECT MESSAGE
+20m Normal SuccessfulCreate cronjob/catalog-cleanup Created job catalog-cleanup-29328175
+20m Normal SawCompletedJob cronjob/catalog-cleanup Saw completed job: catalog-cleanup-29328175, condition: Complete
+3m28s Warning UnexpectedJob cronjob/catalog-cleanup Saw a job that the controller did not create or forgot: manual-cleanup
+18m Normal SuccessfulCreate cronjob/catalog-cleanup Created job catalog-cleanup-29328176
+18m Normal SuccessfulCreate cronjob/catalog-cleanup Created job catalog-cleanup-29328177
+18m Normal SawCompletedJob cronjob/catalog-cleanup Saw completed job: catalog-cleanup-29328176, condition: Complete
+18m Normal SuccessfulDelete cronjob/catalog-cleanup Deleted job catalog-cleanup-29328175
+18m Normal SawCompletedJob cronjob/catalog-cleanup Saw completed job: catalog-cleanup-29328177, condition: Complete
+17m Normal SuccessfulCreate cronjob/catalog-cleanup Created job catalog-cleanup-29328178
+17m Normal SuccessfulDelete cronjob/catalog-cleanup Deleted job catalog-cleanup-29328176
+17m Normal SawCompletedJob cronjob/catalog-cleanup Saw completed job: catalog-cleanup-29328178, condition: Complete
+```
+
+### Suspending and Resuming CronJobs
+
+Temporarily stop a CronJob:
+```bash
+$ kubectl patch cronjob catalog-cleanup -n catalog -p '{"spec":{"suspend":true}}'
+$ kubectl get cronjobs -n catalog
+NAME SCHEDULE TIMEZONE SUSPEND ACTIVE LAST SCHEDULE AGE
+catalog-cleanup */1 * * * * UTC True 0 42s 24m
+```
+
+Resume a suspended CronJob:
+```bash
+$ kubectl patch cronjob catalog-cleanup -n catalog -p '{"spec":{"suspend":false}}'
+$ kubectl get cronjobs -n catalog
+NAME SCHEDULE TIMEZONE SUSPEND ACTIVE LAST SCHEDULE AGE
+catalog-cleanup */1 * * * * UTC False 1 16s 24m
+```
+
+## Common Cron Schedules
+
+| Schedule | Description |
+|----------|-------------|
+| `0 2 * * *` | Daily at 2 AM |
+| `0 */6 * * *` | Every 6 hours |
+| `0 0 * * 0` | Every Sunday at midnight |
+| `*/15 * * * *` | Every 15 minutes |
+| `0 9 * * 1-5` | Weekdays at 9 AM |
+
+## Parallel Jobs
+
+For processing multiple items simultaneously:
+
+```yaml
+spec:
+ completions: 10 # Process 10 items total
+ parallelism: 3 # Run 3 pods at once
+```
+- `completions` = total number of successful Pods
+- `parallelism` = how many Pods run concurrently
+
+This creates 10 successful completions using 3 parallel pods.
+
+## Jobs vs Other Controllers
+| Controller | Purpose | Pods run continuously? | Use Case |
+|------------|---------|---------------|----------------|
+| Job | One-off task | No | Batch processing, migrations |
+| CronJob | Scheduled jobs | No | Backups, periodic reports |
+| Deployment | Long-running stateless app | Yes | Web apps, APIs |
+| StatefulSet | Stateful services | Yes | Databases, queues |
+
+## Key Points to Remember
+
+* Jobs run pods until tasks complete successfully
+* CronJobs create Jobs automatically on schedules
+* Use `restartPolicy: Never` for Jobs and `OnFailure` for CronJobs
+* Set backoff limits to control retry attempts
+* Jobs can run multiple pods in parallel for faster processing
+* Clean up completed Jobs to avoid resource accumulation
+* Jobs and CronJobs are ideal for finite or recurring batch tasks, not long-running services
\ No newline at end of file
diff --git a/website/docs/introduction/basics/workload-management/statefulsets.md b/website/docs/introduction/basics/workload-management/statefulsets.md
new file mode 100644
index 0000000000..86694c73fe
--- /dev/null
+++ b/website/docs/introduction/basics/workload-management/statefulsets.md
@@ -0,0 +1,103 @@
+---
+title: StatefulSets
+sidebar_position: 32
+---
+
+# StatefulSets
+
+**StatefulSets** manage applications that need **stable identities and persistent storage**. Unlike Deployments, where Pods are interchangeable, each Pod in a StatefulSet **keeps a unique, predictable identity** throughout its lifecycle.
+
+They provide several important benefits for stateful applications:
+- **Provide stable identities** - Pods get predictable names (mysql-0, mysql-1, mysql-2)
+- **Enable persistent storage** - Each pod can have its own persistent volume
+- **Ensure ordered operations** - Pods are created and deleted sequentially
+- **Maintain stable networking** - Each pod keeps the same network identity
+- **Support rolling updates in order** - Pods update one at a time
+
+## Deploying a StatefulSet
+
+Let's deploy a MySQL database for our catalog service:
+
+The following YAML creates a StatefulSet running MySQL for the catalog service, with persistent storage and predictable Pod names.
+
+::yaml{file="manifests/base-application/catalog/statefulset-mysql.yaml" paths="kind,metadata.name,spec.serviceName,spec.replicas" title="statefulset.yaml"}
+
+1. `kind: StatefulSet`: Creates a StatefulSet controller
+2. `metadata.name`: Name of the StatefulSet (catalog-mysql)
+3. `spec.serviceName`: Required for stable network identities (creates a headless Service)
+4. `spec.replicas`: Number of pods to run (1 for this example)
+
+Deploy the database:
+```bash
+$ kubectl apply -k ~/environment/eks-workshop/base-application/catalog/
+```
+
+## Inspecting StatefulSet
+
+Check StatefulSet status:
+```bash
+$ kubectl get statefulset -n catalog
+NAME READY AGE
+catalog-mysql 1/1 2m
+```
+
+View the pods created:
+```bash
+$ kubectl get pods -n catalog
+NAME READY STATUS RESTARTS AGE
+catalog-mysql-0 1/1 Running 0 2m
+```
+> Notice the predictable pod name with a number suffix
+
+Get detailed information about the StatefulSet:
+```bash
+$ kubectl describe statefulset -n catalog catalog-mysql
+```
+
+The suffix (`-0`, `-1`, etc.) allows you to track each Pod individually for storage and network purposes.
+
+## Scaling StatefulSet
+
+Scale up to 3 replicas:
+```bash
+$ kubectl scale statefulset -n catalog catalog-mysql --replicas=3
+$ kubectl get pods -n catalog
+NAME READY STATUS RESTARTS AGE
+catalog-mysql-0 1/1 Running 0 5m
+catalog-mysql-1 0/1 Pending 0 10s
+catalog-mysql-1 1/1 Running 0 30s
+catalog-mysql-2 0/1 Pending 0 5s
+catalog-mysql-2 1/1 Running 0 25s
+```
+You'll see pods created one at a time in order
+
+Scale back down:
+```bash
+$ kubectl scale statefulset -n catalog catalog-mysql --replicas=1
+```
+
+Pods are deleted in reverse order (2, then 1, keeping 0), ensuring stability.
+
+Kubernetes also ensures that **each Pod keeps its persistent volume**, even when scaled up or down.
+
+## StatefulSets vs Deployments
+| Feature | StatefulSet | Deployment |
+| ----------------- | ----------------------------- | ----------------- |
+| Pod Names | Stable (`mysql-0`, `mysql-1`) | Random |
+| Storage | Persistent per Pod | Usually ephemeral |
+| Creation/Deletion | Ordered | Any order |
+| Network Identity | Stable | Dynamic |
+| Use Case | Databases, message queues | Stateless apps |
+
+:::info
+StatefulSets are ideal for applications that require persistent identity, stable networking, and ordered operations.
+:::
+
+## Key Points to Remember
+
+* StatefulSets provide stable, unique identities for each pod
+* Perfect for databases, message queues, and clustered applications
+* Each pod can have its own persistent storage that survives restarts
+* Operations happen in order - creation (0→1→2) and deletion (2→1→0)
+* Pod names are predictable and never change
+* Use StatefulSets whenever your application needs identity, stability, and persistence.
\ No newline at end of file
diff --git a/website/docs/introduction/basics/workload-management/tests/hook-cronjob-first-run.sh b/website/docs/introduction/basics/workload-management/tests/hook-cronjob-first-run.sh
new file mode 100644
index 0000000000..52cebdb92e
--- /dev/null
+++ b/website/docs/introduction/basics/workload-management/tests/hook-cronjob-first-run.sh
@@ -0,0 +1,41 @@
+set -Eeuo pipefail
+
+before() {
+ echo "Ensuring CronJob 'catalog-cleanup' has created at least one job..."
+
+ # Check if any jobs already exist from the CronJob
+ job_count=$(kubectl get jobs -n catalog --no-headers 2>/dev/null | grep -c "catalog-cleanup-" || echo "0")
+
+ if [[ "$job_count" -ge 1 ]]; then
+ echo "CronJob has already created $job_count job(s)."
+ return
+ fi
+
+ # If no jobs exist, wait a bit for the CronJob to run naturally
+ echo "No existing jobs found. Waiting up to 90 seconds for CronJob to run..."
+ for i in {1..9}; do
+ job_count=$(kubectl get jobs -n catalog --no-headers 2>/dev/null | grep -c "catalog-cleanup-" || echo "0")
+ if [[ "$job_count" -ge 1 ]]; then
+ echo "CronJob has created $job_count job(s)."
+ return
+ fi
+ sleep 10
+ done
+
+ echo "CronJob hasn't run yet. This is normal for CronJobs with minute-based schedules."
+ echo "The test will proceed - CronJob jobs may appear in subsequent runs."
+}
+
+after() {
+ echo "Checking for CronJob-created jobs..."
+ job_count=$(kubectl get jobs -n catalog --no-headers 2>/dev/null | grep -c "catalog-cleanup-" || echo "0")
+
+ if [[ "$job_count" -ge 1 ]]; then
+ echo "Found $job_count CronJob-created job(s). Verification successful."
+ else
+ echo "No CronJob-created jobs found yet. This is normal - CronJobs run on schedule."
+ echo "The manual job 'manual-cleanup' demonstrates the same functionality."
+ fi
+}
+
+"$@"
\ No newline at end of file
diff --git a/website/docs/introduction/getting-started/about.md b/website/docs/introduction/getting-started/about.md
deleted file mode 100644
index 4bb4819b4b..0000000000
--- a/website/docs/introduction/getting-started/about.md
+++ /dev/null
@@ -1,26 +0,0 @@
----
-title: Sample application
-sidebar_position: 10
----
-
-Most of the labs in this workshop use a common sample application to provide actual container components that we can work on during the exercises. The sample application models a simple web store application, where customers can browse a catalog, add items to their cart and complete an order through the checkout process.
-
-
-
-
-
-The application has several components and dependencies:
-
-
-
-| Component | Description |
-| --------- | --------------------------------------------------------------------------------------------- |
-| UI | Provides the front end user interface and aggregates API calls to the various other services. |
-| Catalog | API for product listings and details |
-| Cart | API for customer shopping carts |
-| Checkout | API to orchestrate the checkout process |
-| Orders | API to receive and process customer orders |
-
-Initially we'll deploy the application in a manner that is self-contained in the Amazon EKS cluster, without using any AWS services like load balancers or a managed database. Over the course of the labs we'll leverage different features of EKS to take advantage of broader AWS services and features for our retail store.
-
-You can find the full source code for the sample application on [GitHub](https://github.com/aws-containers/retail-store-sample-app).
diff --git a/website/docs/introduction/getting-started/assets/catalog-microservice.webp b/website/docs/introduction/getting-started/assets/catalog-microservice.webp
deleted file mode 100644
index 3e213f9481..0000000000
Binary files a/website/docs/introduction/getting-started/assets/catalog-microservice.webp and /dev/null differ
diff --git a/website/docs/introduction/getting-started/assets/ide-base.webp b/website/docs/introduction/getting-started/assets/ide-base.webp
deleted file mode 100644
index 49b50ab379..0000000000
Binary files a/website/docs/introduction/getting-started/assets/ide-base.webp and /dev/null differ
diff --git a/website/docs/introduction/getting-started/assets/ide-initial.webp b/website/docs/introduction/getting-started/assets/ide-initial.webp
deleted file mode 100644
index f1861e6529..0000000000
Binary files a/website/docs/introduction/getting-started/assets/ide-initial.webp and /dev/null differ
diff --git a/website/docs/introduction/getting-started/assets/ide-modules.webp b/website/docs/introduction/getting-started/assets/ide-modules.webp
deleted file mode 100644
index d28b9902c8..0000000000
Binary files a/website/docs/introduction/getting-started/assets/ide-modules.webp and /dev/null differ
diff --git a/website/docs/introduction/getting-started/assets/microservices.webp b/website/docs/introduction/getting-started/assets/microservices.webp
deleted file mode 100644
index e00e007ee1..0000000000
Binary files a/website/docs/introduction/getting-started/assets/microservices.webp and /dev/null differ
diff --git a/website/docs/introduction/getting-started/finish.md b/website/docs/introduction/getting-started/finish.md
deleted file mode 100644
index ece3c1316a..0000000000
--- a/website/docs/introduction/getting-started/finish.md
+++ /dev/null
@@ -1,63 +0,0 @@
----
-title: Other components
-sidebar_position: 50
----
-
-In this lab exercise, we'll deploy the rest of the sample application efficiently using the power of Kustomize. The following kustomization file shows how you can reference other kustomizations and deploy multiple components together:
-
-```file
-manifests/base-application/kustomization.yaml
-```
-
-:::tip
-Notice that the catalog API is in this kustomization, didn't we already deploy it?
-
-Because Kubernetes uses a declarative mechanism we can apply the manifests for the catalog API again and expect that because all of the resources are already created Kubernetes will take no action.
-:::
-
-Apply this kustomization to our cluster to deploy the rest of the components:
-
-```bash wait=10
-$ kubectl apply -k ~/environment/eks-workshop/base-application
-```
-
-After this is complete we can use `kubectl wait` to make sure all the components have started before we proceed:
-
-```bash timeout=200
-$ kubectl wait --for=condition=Ready --timeout=180s pods \
- -l app.kubernetes.io/created-by=eks-workshop -A
-```
-
-We'll now have a Namespace for each of our application components:
-
-```bash
-$ kubectl get namespaces -l app.kubernetes.io/created-by=eks-workshop
-NAME STATUS AGE
-carts Active 62s
-catalog Active 7m17s
-checkout Active 62s
-orders Active 62s
-other Active 62s
-ui Active 62s
-```
-
-We can also see all of the Deployments created for the components:
-
-```bash
-$ kubectl get deployment -l app.kubernetes.io/created-by=eks-workshop -A
-NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE
-carts carts 1/1 1 1 90s
-carts carts-dynamodb 1/1 1 1 90s
-catalog catalog 1/1 1 1 7m46s
-checkout checkout 1/1 1 1 90s
-checkout checkout-redis 1/1 1 1 90s
-orders orders 1/1 1 1 90s
-orders orders-postgresql 1/1 1 1 90s
-ui ui 1/1 1 1 90s
-```
-
-The sample application is now deployed and ready to provide a foundation for us to use in the rest of the labs in this workshop!
-
-:::tip
-If you want to understand more about Kustomize take a look at the [optional module](../kustomize/index.md) provided in this workshop.
-:::
diff --git a/website/docs/introduction/getting-started/first.md b/website/docs/introduction/getting-started/first.md
deleted file mode 100644
index afff87f19b..0000000000
--- a/website/docs/introduction/getting-started/first.md
+++ /dev/null
@@ -1,144 +0,0 @@
----
-title: Deploying our first component
-sidebar_position: 40
----
-
-The sample application is composed of a set of Kubernetes manifests organized in a way that can be easily applied with Kustomize. Kustomize is an open-source tool also provided as a native feature of the `kubectl` CLI. This workshop uses Kustomize to apply changes to Kubernetes manifests, making it easier to understand changes to manifest files without needing to manually edit YAML. As we work through the various modules of this workshop, we'll incrementally apply overlays and patches with Kustomize.
-
-The easiest way to browse the YAML manifests for the sample application and the modules in this workshop is using the file browser in the IDE:
-
-
-
-Expanding the `eks-workshop` and then `base-application` items will allow you to browse the manifests that make up the initial state of the sample application:
-
-
-
-The structure consists of a directory for each application component that was outlined in the **Sample application** section.
-
-The `modules` directory contains sets of manifests that we will apply to the cluster throughout the subsequent lab exercises:
-
-
-
-Before we do anything lets inspect the current Namespaces in our EKS cluster:
-
-```bash
-$ kubectl get namespaces
-NAME STATUS AGE
-default Active 1h
-kube-node-lease Active 1h
-kube-public Active 1h
-kube-system Active 1h
-```
-
-All of the entries listed are Namespaces for system components that were pre-installed for us. We'll ignore these by using [Kubernetes labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) to filter the Namespaces down to only those we've created:
-
-```bash
-$ kubectl get namespaces -l app.kubernetes.io/created-by=eks-workshop
-No resources found
-```
-
-The first thing we'll do is deploy the catalog component by itself. The manifests for this component can be found in `~/environment/eks-workshop/base-application/catalog`.
-
-```bash
-$ ls ~/environment/eks-workshop/base-application/catalog
-configMap.yaml
-deployment.yaml
-kustomization.yaml
-namespace.yaml
-secrets.yaml
-service-mysql.yaml
-service.yaml
-serviceAccount.yaml
-statefulset-mysql.yaml
-```
-
-These manifests include the Deployment for the catalog API which expresses the desired state of the catalog API component:
-
-::yaml{file="manifests/base-application/catalog/deployment.yaml" paths="spec.replicas,spec.template.metadata.labels,spec.template.spec.containers.0.image,spec.template.spec.containers.0.ports,spec.template.spec.containers.0.livenessProbe,spec.template.spec.containers.0.resources"}
-
-1. Run a single replica
-2. Apply labels to the Pods so other resources can refer to them
-3. Use the `public.ecr.aws/aws-containers/retail-store-sample-catalog` container image
-4. Expose the container on port 8080 named `http`
-5. Run [probes/healthchecks](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) against the `/health` path
-6. [Requests](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) a specific amount of CPU and memory so the Kubernetes scheduler can place it on a node with enough available resources
-
-The manifests also include the Service used by other components to access the catalog API:
-
-::yaml{file="manifests/base-application/catalog/service.yaml" paths="spec.ports,spec.selector"}
-
-1. Exposes itself on port 80 and targets the `http` port exposed by the Deployment, which translates to port 8080
-2. Selects catalog Pods using labels that match what we expressed in the Deployment above
-
-Let's create the catalog component:
-
-```bash
-$ kubectl apply -k ~/environment/eks-workshop/base-application/catalog
-namespace/catalog created
-serviceaccount/catalog created
-configmap/catalog created
-secret/catalog-db created
-service/catalog created
-service/catalog-mysql created
-deployment.apps/catalog created
-statefulset.apps/catalog-mysql created
-```
-
-Now we'll see a new Namespace:
-
-```bash
-$ kubectl get namespaces -l app.kubernetes.io/created-by=eks-workshop
-NAME STATUS AGE
-catalog Active 15s
-```
-
-We can take a look at the Pods running in this namespace:
-
-```bash
-$ kubectl get pod -n catalog
-NAME READY STATUS RESTARTS AGE
-catalog-846479dcdd-fznf5 1/1 Running 2 (43s ago) 46s
-catalog-mysql-0 1/1 Running 0 46s
-```
-
-Notice we have a Pod for our catalog API and another for the MySQL database. If the `catalog` Pod is showing a status of `CrashLoopBackOff`, it needs to be able to connect to the `catalog-mysql` Pod before it will start. Kubernetes will keep restarting it until this is the case. In that case, we can use [kubectl wait](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#wait) to monitor specific Pods until they are in a Ready state:
-
-```bash
-$ kubectl wait --for=condition=Ready pods --all -n catalog --timeout=180s
-```
-
-Now that the Pods are running we can [check their logs](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#logs), for example the catalog API:
-
-:::tip
-You can ["follow" the kubectl logs output](https://kubernetes.io/docs/reference/kubectl/cheatsheet/) by using the '-f' option with the command. (Use CTRL-C to stop following the output)
-:::
-
-```bash
-$ kubectl logs -n catalog deployment/catalog
-```
-
-Kubernetes also allows us to easily scale the number of catalog Pods horizontally:
-
-```bash
-$ kubectl scale -n catalog --replicas 3 deployment/catalog
-deployment.apps/catalog scaled
-$ kubectl wait --for=condition=Ready pods --all -n catalog --timeout=180s
-```
-
-The manifests we applied also create a Service for each of our application and MySQL Pods that can be used by other components in the cluster to connect:
-
-```bash
-$ kubectl get svc -n catalog
-NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
-catalog ClusterIP 172.20.83.84 80/TCP 2m48s
-catalog-mysql ClusterIP 172.20.181.252 3306/TCP 2m48s
-```
-
-These Services are internal to the cluster, so we cannot access them from the Internet or even the VPC. However, we can use [exec](https://kubernetes.io/docs/tasks/debug/debug-application/get-shell-running-container/) to access an existing Pod in the EKS cluster to check the catalog API is working:
-
-```bash
-$ kubectl -n catalog exec -i \
- deployment/catalog -- curl catalog.catalog.svc/catalog/products | jq .
-```
-
-You should receive back a JSON payload with product information. Congratulations, you've just deployed your first microservice to Kubernetes with EKS!
diff --git a/website/docs/introduction/getting-started/index.md b/website/docs/introduction/getting-started/index.md
index bd4d533cb9..f5c6790884 100644
--- a/website/docs/introduction/getting-started/index.md
+++ b/website/docs/introduction/getting-started/index.md
@@ -1,25 +1,149 @@
---
-title: Getting started
-sidebar_position: 30
+title: Getting Started
+sidebar_position: 50
sidebar_custom_props: { "module": true }
description: "Learn the basics of running workloads on Amazon Elastic Kubernetes Service."
---
::required-time
-Welcome to the first hands-on lab in the EKS workshop. The goal of this exercise is to familiarize ourselves with the sample application we'll use for many of the coming lab exercises and in doing so touch on some basic concepts related to deploying workloads to EKS. We'll explore the architecture of the application and deploy out the components to our EKS cluster.
-
-Let's deploy your first workload to the EKS cluster in your lab environment and explore!
+Welcome to the first hands-on lab in the EKS workshop. The goal of this exercise is to prepare the IDE with necessary configurations and explore the structure.
Before we begin we need to run the following command to prepare our IDE environment and EKS cluster:
+:::tip Prepare your environment for this section:
+
```bash
$ prepare-environment introduction/getting-started
```
+This command will clone the EKS workshop Git repository into the IDE environment.
+:::
+
+
+What does prepare-environment do? (Click to expand)
+
+The `prepare-environment` command is a crucial tool that sets up your lab environment for each workshop module. Here's what it does behind the scenes:
+
+- **Repository Setup**: Downloads the latest EKS Workshop content from GitHub to `/eks-workshop/repository` and links Kubernetes manifests to `~/environment/eks-workshop`
+- **Cluster Reset & Cleanup**: Resets the sample retail application to its base state. Removes any leftover resources from previous labs and restores EKS managed node groups to initial size (3 nodes).
+- **Lab-Specific Infrastructure**: Ensure the target module is ready to use by creating any extra AWS resources using Terraform, deploying the required Kubernetes manifests, configuring environment variables, and installing necessary add-ons or components.
+
+
+
+## Workshop Structure
+
+After running `prepare-environment`, you'll have access to the workshop materials at `~/environment/eks-workshop/`. The workshop is organized into modular sections that you can complete in any order.
+
+## Exploring Your EKS Cluster
+
+Now that your environment is ready, let's explore the EKS cluster that's been provisioned for you. Run these commands to get familiar with your cluster:
+
+### Cluster Information
+
+First, let's verify your cluster connection and get basic information:
+
+```bash
+$ kubectl cluster-info
+Kubernetes control plane is running at https://XXXXXXXXXXXXXXXXXXXXXXXXXX.gr7.us-west-2.eks.amazonaws.com
+CoreDNS is running at https://XXXXXXXXXXXXXXXXXXXXXXXXXX.gr7.us-west-2.eks.amazonaws.com/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
+
+To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
+```
+
+Check the cluster version
+```bash
+$ kubectl version
+Client Version: v1.33.5
+Kustomize Version: v5.6.0
+Server Version: v1.33.5-eks-113cf36
+```
+
+Check worker nodes in the cluster
+
+```bash
+$ kubectl get nodes -o wide
+NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
+ip-10-42-121-153.us-west-2.compute.internal Ready 26h v1.33.5-eks-113cf36 10.42.121.153 Amazon Linux 2023.9.20250929 6.12.46-66.121.amzn2023.x86_64 containerd://1.7.27
+ip-10-42-141-241.us-west-2.compute.internal Ready 26h v1.33.5-eks-113cf36 10.42.141.241 Amazon Linux 2023.9.20250929 6.12.46-66.121.amzn2023.x86_64 containerd://1.7.27
+ip-10-42-183-73.us-west-2.compute.internal Ready 26h v1.33.5-eks-113cf36 10.42.183.73 Amazon Linux 2023.9.20250929 6.12.46-66.121.amzn2023.x86_64 containerd://1.7.27
+```
+
+This shows your worker nodes, their Kubernetes version, internal/external IPs, and the container runtime being used.
+
+### Explore Cluster Components
+
+Let's look at the system components running in your cluster:
+
+```bash
+$ kubectl get pods -n kube-system
+NAME READY STATUS RESTARTS AGE
+aws-node-8cz4d 2/2 Running 0 26h
+aws-node-jlg4q 2/2 Running 0 26h
+aws-node-vdc56 2/2 Running 0 26h
+coredns-7bf648ff5d-4fqv9 1/1 Running 0 26h
+coredns-7bf648ff5d-bfwwf 1/1 Running 0 26h
+kube-proxy-77ln2 1/1 Running 0 26h
+kube-proxy-7bwbj 1/1 Running 0 26h
+kube-proxy-jnhfx 1/1 Running 0 26h
+metrics-server-7fb96f5556-2k4lh 1/1 Running 0 26h
+metrics-server-7fb96f5556-mpj78 1/1 Running 0 26h
+```
+
+You'll see essential components like:
+- **CoreDNS** - Provides DNS services for the cluster
+- **AWS Load Balancer Controller** - Manages AWS load balancers for services
+- **VPC CNI** - Handles pod networking within your VPC
+- **kube-proxy** - Manages network rules on each node
+
+## Deploy the Sample Application
+
+Let's deploy the retail store application to see Kubernetes in action. We'll use Kustomize, which is built into kubectl:
+
+```bash wait=10
+$ kubectl apply -k ~/environment/eks-workshop/base-application
+```
+
+After this is complete we can use `kubectl wait` to make sure all the components have started before we proceed:
+
+```bash timeout=200
+$ kubectl wait --for=condition=Ready --timeout=180s pods \
+ -l app.kubernetes.io/created-by=eks-workshop -A
+```
+
+We'll now have a Namespace for each of our application components:
+
+```bash
+$ kubectl get namespaces -l app.kubernetes.io/created-by=eks-workshop
+NAME STATUS AGE
+carts Active 62s
+catalog Active 7m17s
+checkout Active 62s
+orders Active 62s
+other Active 62s
+ui Active 62s
+```
+
+We can also see all of the Deployments created for the components:
+
+```bash
+$ kubectl get deployment -l app.kubernetes.io/created-by=eks-workshop -A
+NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE
+carts carts 1/1 1 1 90s
+carts carts-dynamodb 1/1 1 1 90s
+catalog catalog 1/1 1 1 7m46s
+checkout checkout 1/1 1 1 90s
+checkout checkout-redis 1/1 1 1 90s
+orders orders 1/1 1 1 90s
+orders orders-postgresql 1/1 1 1 90s
+ui ui 1/1 1 1 90s
+```
+
+The sample application is now deployed and ready to provide a foundation for us to use in the rest of the labs in this workshop!
-What is this command doing? For this lab it is cloning the EKS Workshop Git repository in to the IDE environment so the Kubernetes manifest files we need are present on the file system.
+## What's Next?
-You'll notice in subsequent labs we'll also run this command, where it will perform two important additional functions:
+Your EKS cluster is ready and the sample application is deployed! You can now jump into any workshop module based on your learning goals.
-1. Reset the EKS cluster back to its initial state
-2. Install any additional components needed in to the cluster for the upcoming lab exercise
+:::tip
+Each module is self-contained and includes its own `prepare-environment` command to set up the required resources. You can complete them in any order!
+:::
diff --git a/website/docs/introduction/getting-started/microservices.md b/website/docs/introduction/getting-started/microservices.md
deleted file mode 100644
index b806c8d12a..0000000000
--- a/website/docs/introduction/getting-started/microservices.md
+++ /dev/null
@@ -1,22 +0,0 @@
----
-title: Microservices on Kubernetes
-sidebar_position: 30
----
-
-Now that we're familiar with the overall architecture of the sample application, how will we initially deploy this in to EKS? Let's explore some of the Kubernetes building blocks by looking at the **catalog** component:
-
-
-
-There are a number of things to consider in this diagram:
-
-- The application that provides the catalog API runs as a [Pod](https://kubernetes.io/docs/concepts/workloads/pods/), which is the smallest deployable unit in Kubernetes. Application Pods will run the container images we outlined in the previous section.
-- The Pods that run for the catalog component are created by a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) which may manage one or more "replicas" of the catalog Pod, allowing it to scale horizontally.
-- A [Service](https://kubernetes.io/docs/concepts/services-networking/service/) is an abstract way to expose an application running as a set of Pods, and this allows our catalog API to be called by other components inside the Kubernetes cluster. Each Service is given its own DNS entry.
-- We're starting this workshop with a MySQL database that runs inside our Kubernetes cluster as a [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/), which is designed to manage stateful workloads.
-- All of these Kubernetes constructs are grouped in their own dedicated catalog Namespace. Each of the application components has its own Namespace.
-
-Each of the components in the microservices architecture is conceptually similar to the catalog, using Deployments to manage application workload Pods and Services to route traffic to those Pods. If we expand out our view of the architecture we can consider how traffic is routed throughout the broader system:
-
-
-
-The **ui** component receives HTTP requests from, for example, a user's browser. It then makes HTTP requests to other API components in the architecture to fulfill that request and returns a response to the user. Each of the downstream components may have their own data stores or other infrastructure. The Namespaces are a logical grouping of the resources for each microservice and also act as a soft isolation boundary, which can be used to effectively implement controls using Kubernetes RBAC and Network Policies.
diff --git a/website/docs/introduction/index.md b/website/docs/introduction/index.md
index 30ce71c000..d4ff2be426 100644
--- a/website/docs/introduction/index.md
+++ b/website/docs/introduction/index.md
@@ -8,7 +8,7 @@ Welcome to the **AWS Elastic Kubernetes Service (EKS) workshop**!
This workshop guides you through a set of hands-on lab exercises to learn and explore the various features provided by EKS and how it integrates with the broader set of services offered by AWS. The labs are grouped across a number of areas:
-- **Introduction** - Learn the format and structure of this workshop
+- **Introduction** - Get Started with the workshop’s format and structure, provision EKS cluster, Kubernetes basics and familiarize yourself with the sample application.
- **Fundamentals** - Familiarize yourself with basic EKS concepts such as managed node groups, Fargate, exposing your applications and utilizing storage
- **Autoscaling** - Understand how to automatically scale your applications and clusters horizontally and vertically
- **Observability** - Monitoring is a critical factor getting a workload to production
diff --git a/website/docs/introduction/navigating-labs.md b/website/docs/introduction/navigating-labs.md
index 60e4acc083..8c0b774201 100644
--- a/website/docs/introduction/navigating-labs.md
+++ b/website/docs/introduction/navigating-labs.md
@@ -1,12 +1,12 @@
---
-title: Navigating the labs
-sidebar_position: 25
+title: Navigating the Labs
+sidebar_position: 30
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-Let’s review how to navigate this web site and the content provided.
+Let’s review how to navigate this website and the content provided.
## Structure
@@ -15,21 +15,61 @@ The content of this workshop is made up of:
1. Individual lab exercises
2. Supporting content that explains concepts related to the labs
-The lab exercises are designed in a way that you can run any modules as a self-contained exercise. Lab exercises will be displayed in the sidebar to the left and are designated by the icon shown here:
+The lab exercises are designed in a way that you can run any modules as a self-contained exercise. Lab exercises will be displayed in the sidebar to the left and are designated by the `LAB` icon.
-
+## Prepare Environment
+
+The `prepare-environment` tool helps you set up and configure your lab environment for each section. Simply run:
+
+```
+$ prepare-environment $MODULE_NAME
+```
+
+### Basic Usage Patterns
+```
+$ prepare-environment $MODULE_NAME/$LAB
+```
+
+**Examples**
+```
+# For the getting started lab
+$ prepare-environment introduction/getting-started
+
+# For Karpenter autoscaling
+$ prepare-environment autoscaling/compute/karpenter
+
+# For storage with EBS
+$ prepare-environment fundamentals/storage/ebs
+
+# For networking security groups
+$ prepare-environment networking/securitygroups-for-pods
+```
+
+### Reset Entire Environment
+```
+# Resets everything back to base state
+$ prepare-environment
+```
-This module contains a single lab named **Getting started** which will be visible on the left side of your screen.
:::caution
-You should start each lab from the page indicated by this badge. Starting in the middle of a lab will cause unpredictable behavior.
+You should start each lab from the page indicated by "BEFORE YOU START" badge. Starting in the middle of a lab will cause unpredictable behavior.
:::
+## Resetting Your Cluster
+In the event that you accidentally configure your cluster or module in a way that is not functioning you have been provided with a mechanism to reset your EKS cluster as best we can which can be run at any time. Simply run the command prepare-environment and wait until it completes. This may take several minutes depending on the state of your cluster when it is run.
+
+```bash test=false
+$ prepare-environment
+```
+
+## Tips
+
+### Copy/Paste Permission
Depending on your browser the first time you copy/paste content in to the VSCode terminal you may be presented with a prompt that looks like this:

-
-## Terminal commands
+### Terminal commands
Most of the interaction you will do in this workshop will be done with terminal commands, which you can either manually type or copy/paste to the IDE terminal. You will see this terminal commands displayed like this:
@@ -59,10 +99,6 @@ Fri Aug 30 12:26:58 MDT 2024
In this case you can either copy each command individually or copy all of the commands using the clipboard icon in the top right of the terminal window. Give it a shot!
-## Resetting your EKS cluster
-
-In the event that you accidentally configure your cluster in a way that is not functioning you have been provided with a mechanism to reset your EKS cluster as best we can which can be run at any time. Simply run the command `prepare-environment` and wait until it completes. This may take several minutes depending on the state of your cluster when it is run.
-
## Next Steps
-Now that you're familiar with the format of this workshop, head to the [Getting started](/docs/introduction/getting-started) lab or skip ahead to any module in the workshop with the top navigation bar.
+Now that you're familiar with the format of this workshop, head to the [Application Overview](/docs/introduction/application-overview) to learn about the sample application, then proceed to [Getting Started](/docs/introduction/getting-started) lab or skip ahead to any module in the workshop with the top navigation bar.
diff --git a/website/docs/introduction/setup/index.md b/website/docs/introduction/setup/index.md
index 96bc98fa31..ececc47e04 100644
--- a/website/docs/introduction/setup/index.md
+++ b/website/docs/introduction/setup/index.md
@@ -1,5 +1,5 @@
---
-title: Setup
+title: Environment Setup
sidebar_position: 20
---
diff --git a/website/test-durations.json b/website/test-durations.json
index 638ac5e9c3..fa722989a2 100644
--- a/website/test-durations.json
+++ b/website/test-durations.json
@@ -94,9 +94,25 @@
"/introduction/getting-started/finish.md": 17926,
"/introduction/getting-started/first.md": 12371,
"/introduction/getting-started/index.md": 832,
- "/introduction/helm/index.md": 182112,
+ "/introduction/basics/index.md": 99061,
+ "/introduction/basics/access/index.md": 1,
+ "/introduction/basics/access/kubectl.md": 80,
+ "/introduction/basics/access/kubeconfig.md": 1183,
+ "/introduction/basics/namespaces/index.md": 12875,
+ "/introduction/basics/pods/index.md": 24964,
+ "/introduction/basics/workload-management/index.md": 1,
+ "/introduction/basics/workload-management/deployments.md": 14198,
+ "/introduction/basics/workload-management/statefulsets.md": 9980,
+ "/introduction/basics/workload-management/daemonsets.md": 2824,
+ "/introduction/basics/workload-management/jobs.md": 58839,
+ "/introduction/basics/services/index.md": 65391,
+ "/introduction/basics/configuration/index.md": 1,
+ "/introduction/basics/configuration/configmaps/index.md": 11993,
+ "/introduction/basics/configuration/secrets/index.md": 11806,
+ "/introduction/basics/package-management/index.md": 12048,
+ "/introduction/basics/kustomize/index.md": 53626,
+ "/introduction/basics/helm/index.md": 72437,
"/introduction/index.md": 1,
- "/introduction/kustomize/index.md": 155526,
"/networking/index.md": 1,
"/networking/vpc-cni/custom-networking/configure-vpc-cni.md": 153572,
"/networking/vpc-cni/custom-networking/deploy-sample-application.md": 26497,
@@ -210,4 +226,4 @@
"/troubleshooting/dns/index.md": 16049,
"/troubleshooting/pod/index.md": 16049,
"/troubleshooting/workernodes/index.md": 16049
-}
+}
\ No newline at end of file