-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathstart.sh
More file actions
173 lines (148 loc) · 5.43 KB
/
start.sh
File metadata and controls
173 lines (148 loc) · 5.43 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
#!/bin/bash
set -euo pipefail
# Resolve project root (directory containing this script).
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
export PROJECT_ROOT
source "${PROJECT_ROOT}/check_requirements.sh"
# Allow placeholder kubeconfig while the cluster is being bootstrapped.
ALLOW_PLACEHOLDER_KUBECONFIG=1 ensure_requirements
CLUSTER_PROVIDER="${CLUSTER_PROVIDER:-k3s}"
HOST_IPV4="${HOST_IPV4:-}"
USE_RUNTIME_CLASS="${LAB_USE_RUNTIME_CLASS:-1}"
detect_host_ipv4() {
if [ -n "${HOST_IPV4}" ]; then
echo "${HOST_IPV4}"
return
fi
local ip
ip="$(hostname -I 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i !~ /^127\./) {print $i; exit}}')"
if [ -z "${ip}" ]; then
ip="$(ip -4 route get 1.1.1.1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i ~ /^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$/) {print $i; exit}}')"
fi
echo "${ip}"
}
ensure_k3s_cluster() {
if ! command -v k3s >/dev/null 2>&1; then
echo "k3s binary not found. Run install_requirements.sh first." >&2
exit 1
fi
if [[ "${USE_RUNTIME_CLASS}" != "0" && "${USE_RUNTIME_CLASS,,}" != "false" && "${USE_RUNTIME_CLASS,,}" != "no" ]]; then
if ! command -v runsc >/dev/null 2>&1; then
echo "runsc (gVisor) binary not found. Install gVisor or set LAB_USE_RUNTIME_CLASS=0." >&2
exit 1
fi
fi
local host_ip
host_ip="$(detect_host_ipv4)"
if [ -z "${host_ip}" ]; then
echo "Failed to determine host IPv4 address for kube-apiserver access." >&2
exit 1
fi
HOST_IPV4="${host_ip}"
export LAB_API_SERVER="https://${host_ip}:6443"
if ! sudo systemctl is-active --quiet k3s; then
echo "[+] Starting k3s service"
sudo systemctl start k3s
else
echo "[=] k3s service already running"
fi
echo "[+] Waiting for k3s API to become ready"
local ready=0
for _ in $(seq 1 60); do
if sudo k3s kubectl get nodes >/dev/null 2>&1; then
ready=1
break
fi
sleep 2
done
if [ "${ready}" -ne 1 ]; then
echo "k3s API did not become ready in time." >&2
exit 1
fi
echo "[+] Syncing kubeconfig for controller"
local kube_dir="${PROJECT_ROOT}/.kube"
mkdir -p "${kube_dir}"
sudo cat /etc/rancher/k3s/k3s.yaml > "${kube_dir}/config"
sudo chown "$(id -u):$(id -g)" "${kube_dir}/config"
python3 - "$kube_dir/config" "$host_ip" <<'PY'
import sys
from pathlib import Path
try:
import yaml
except ImportError as exc:
raise SystemExit(f"PyYAML is required to adjust kubeconfig: {exc}")
config_path = Path(sys.argv[1])
host_ip = sys.argv[2]
with config_path.open("r", encoding="utf-8") as fh:
config = yaml.safe_load(fh)
server = f"https://{host_ip}:6443"
for cluster in config.get("clusters", []):
cluster.setdefault("cluster", {})["server"] = server
cluster.setdefault("cluster", {})["insecure-skip-tls-verify"] = True
if "certificate-authority-data" in cluster.get("cluster", {}):
del cluster["cluster"]["certificate-authority-data"]
with config_path.open("w", encoding="utf-8") as fh:
yaml.safe_dump(config, fh)
PY
export KUBECONFIG_FILE="${kube_dir}/config"
export KUBECONFIG_DIR="${kube_dir}"
export KUBECONFIG="${KUBECONFIG_FILE}"
}
kubectl_cmd() {
case "${CLUSTER_PROVIDER}" in
k3s)
if command -v kubectl >/dev/null 2>&1; then
kubectl --kubeconfig "${KUBECONFIG_FILE}" --insecure-skip-tls-verify --validate=false "$@"
else
sudo k3s kubectl --kubeconfig "${KUBECONFIG_FILE}" --insecure-skip-tls-verify --validate=false "$@"
fi
;;
skip)
if command -v kubectl >/dev/null 2>&1; then
kubectl --kubeconfig "${KUBECONFIG_FILE}" --validate=false "$@"
else
echo "kubectl not found and CLUSTER_PROVIDER=skip. Install kubectl or use k3s." >&2
exit 1
fi
;;
*)
echo "Unsupported CLUSTER_PROVIDER '${CLUSTER_PROVIDER}'. Supported: k3s, skip." >&2
exit 1
;;
esac
}
case "${CLUSTER_PROVIDER}" in
k3s)
ensure_k3s_cluster
;;
skip)
echo "[=] Cluster management skipped (CLUSTER_PROVIDER=skip)."
;;
*)
echo "Unsupported CLUSTER_PROVIDER '${CLUSTER_PROVIDER}'. Supported: k3s, skip." >&2
exit 1
;;
esac
# Re-run the requirements check now that kubeconfig is populated.
ensure_requirements
SESSION_IMAGE="${SESSION_IMAGE:-hackersir/session:latest}"
echo "[+] Applying Kubernetes manifests (namespace/RBAC/runtimeclass)"
kubectl_cmd apply -f "${PROJECT_ROOT}/k8s/namespace.yaml"
kubectl_cmd apply -f "${PROJECT_ROOT}/k8s/rbac-controller.yaml"
if [[ "${USE_RUNTIME_CLASS}" != "0" && "${USE_RUNTIME_CLASS,,}" != "false" && "${USE_RUNTIME_CLASS,,}" != "no" ]]; then
kubectl_cmd apply -f "${PROJECT_ROOT}/k8s/runtimeclass-runsc.yaml"
fi
echo "[+] Building controller service"
"${DOCKER_COMPOSE_CMD[@]}" -f "${PROJECT_ROOT}/docker-compose.yml" build controller
echo "[+] Starting metrics-proxy (for kubectl diagnostics)"
"${DOCKER_COMPOSE_CMD[@]}" -f "${PROJECT_ROOT}/docker-compose.yml" up -d metrics-proxy
echo "[+] Verifying Kubernetes connectivity from metrics-proxy"
if "${DOCKER_COMPOSE_CMD[@]}" -f "${PROJECT_ROOT}/docker-compose.yml" exec -T metrics-proxy kubectl get ns >/dev/null 2>&1; then
echo "[+] Kubernetes is reachable from containers."
else
echo "[!] Warning: Kubernetes is NOT reachable from containers."
echo "[!] The controller will return 503 on /launch until connectivity is fixed."
fi
echo "[+] Controller accessible at http://localhost:8000"
echo "[+] Starting controller in background"
"${DOCKER_COMPOSE_CMD[@]}" -f "${PROJECT_ROOT}/docker-compose.yml" up controller