From 66381d21ebaf288c99bf2bc8e4927541decd1ebf Mon Sep 17 00:00:00 2001 From: palakchheda <42711310+palakchheda@users.noreply.github.com> Date: Fri, 30 Jan 2026 10:05:35 -0800 Subject: [PATCH 01/12] ECS and AKS deployment option added. Testing yet pending --- Access Broker/aks-deployment/Dockerfile | 41 ++ Access Broker/aks-deployment/README.md | 262 ++++++++++ Access Broker/aks-deployment/deploy.sh | 293 +++++++++++ Access Broker/aks-deployment/deployment.yaml | 267 ++++++++++ Access Broker/aks-deployment/supervisord.conf | 15 + .../ecs-fargate-deployment/Dockerfile | 56 +++ .../ecs-fargate-deployment/README.md | 385 ++++++++++++++ .../ecs-fargate-deployment/deploy.sh | 474 ++++++++++++++++++ .../ecs-fargate-deployment/start-broker.sh | 48 ++ .../ecs-fargate-deployment/supervisord.conf | 15 + .../task-definition.json | 56 +++ .../ecs-fargate-deployment/token-generator.sh | 4 + 12 files changed, 1916 insertions(+) create mode 100644 Access Broker/aks-deployment/Dockerfile create mode 100644 Access Broker/aks-deployment/README.md create mode 100755 Access Broker/aks-deployment/deploy.sh create mode 100644 Access Broker/aks-deployment/deployment.yaml create mode 100644 Access Broker/aks-deployment/supervisord.conf create mode 100644 Access Broker/ecs-fargate-deployment/Dockerfile create mode 100644 Access Broker/ecs-fargate-deployment/README.md create mode 100755 Access Broker/ecs-fargate-deployment/deploy.sh create mode 100755 Access Broker/ecs-fargate-deployment/start-broker.sh create mode 100644 Access Broker/ecs-fargate-deployment/supervisord.conf create mode 100644 Access Broker/ecs-fargate-deployment/task-definition.json create mode 100755 Access Broker/ecs-fargate-deployment/token-generator.sh diff --git a/Access Broker/aks-deployment/Dockerfile b/Access Broker/aks-deployment/Dockerfile new file mode 100644 index 0000000..76fccbc --- /dev/null +++ b/Access Broker/aks-deployment/Dockerfile @@ -0,0 +1,41 @@ +# Britive Access Broker - Azure AKS Deployment +# This Dockerfile creates a container image for running the Britive Access Broker +# on Azure Kubernetes Service (AKS) + +FROM --platform=linux/amd64 ubuntu:24.04 + +# Avoid prompts from apt +ENV DEBIAN_FRONTEND=noninteractive + +# Install required packages +RUN apt-get update && apt-get install -y \ + openjdk-21-jre-headless \ + curl \ + wget \ + unzip \ + ca-certificates \ + supervisor \ + jq \ + && rm -rf /var/lib/apt/lists/* + +# Install kubectl +RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \ + && chmod +x kubectl \ + && mv kubectl /usr/local/bin/ + +# Create directories +RUN mkdir -p /root/broker/config \ + && mkdir -p /root/broker/bootstrap \ + && mkdir -p /root/broker/cache \ + && mkdir -p /root/.kube \ + && mkdir -p /var/log/supervisor \ + && mkdir -p /var/run/sshd + +# Copy Britive broker JAR +COPY britive-broker-1.0.0.jar /root/broker/ + +# Copy supervisor configuration +COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf + +# Default command - start supervisor +CMD ["/usr/bin/supervisord", "-n", "-c", "/etc/supervisor/conf.d/supervisord.conf"] diff --git a/Access Broker/aks-deployment/README.md b/Access Broker/aks-deployment/README.md new file mode 100644 index 0000000..ea1ce15 --- /dev/null +++ b/Access Broker/aks-deployment/README.md @@ -0,0 +1,262 @@ +# Britive Access Broker - Azure AKS Deployment + +This directory contains everything needed to deploy the Britive Access Broker on Azure Kubernetes Service (AKS). + +## Overview + +The Britive Access Broker enables secure, just-in-time access to your Kubernetes clusters through the Britive platform. This deployment uses Azure Container Registry (ACR) to store the container image and AKS for orchestration. + +## Prerequisites + +Before deploying, ensure you have: + +1. **Azure CLI** installed and configured + ```bash + # Install Azure CLI + # macOS + brew install azure-cli + + # Linux + curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + + # Login to Azure + az login + ``` + +2. **Docker** installed and running + ```bash + # Verify Docker is running + docker info + ``` + +3. **kubectl** installed and configured for your AKS cluster + ```bash + # Install kubectl + az aks install-cli + + # Get credentials for your AKS cluster + az aks get-credentials --resource-group --name + + # Verify connection + kubectl cluster-info + ``` + +4. **AKS Cluster** running in Azure + +5. **Britive Broker Pool Token** from the Britive console + - Navigate to: System Administration > Broker Pools + - Create a new pool or select an existing one + - Copy the broker pool token + +6. **britive-broker-1.0.0.jar** file in this directory + +## Quick Start + +### Option 1: Automated Deployment (Recommended) + +1. Copy the broker JAR file to this directory: + ```bash + cp /path/to/britive-broker-1.0.0.jar . + ``` + +2. Edit `deploy.sh` and set your configuration: + ```bash + BRITIVE_TOKEN="your-britive-token-here" + ACR_NAME="britivebroker" # Your ACR name + RESOURCE_GROUP="your-rg-name" # Your Azure resource group + ``` + +3. Run the deployment script: + ```bash + chmod +x deploy.sh + ./deploy.sh + ``` + +The script will: +- Validate all prerequisites +- Create ACR if it doesn't exist +- Attach ACR to your AKS cluster +- Build and push the Docker image +- Deploy the broker to AKS +- Wait for deployment completion + +### Option 2: Manual Deployment + +1. **Create Azure Container Registry** (if needed): + ```bash + az acr create --resource-group --name --sku Basic + ``` + +2. **Attach ACR to AKS**: + ```bash + az aks update --name --resource-group --attach-acr + ``` + +3. **Build and push the Docker image**: + ```bash + # Login to ACR + az acr login --name + + # Build image + docker build --platform linux/amd64 -t britive-broker:latest . + + # Tag and push + docker tag britive-broker:latest .azurecr.io/britive-broker:latest + docker push .azurecr.io/britive-broker:latest + ``` + +4. **Update deployment.yaml**: + - Replace `YOUR_ACR_NAME.azurecr.io` with your actual ACR login server + - Replace `REPLACE_WITH_BASE64_TOKEN` with your base64-encoded token: + ```bash + echo -n "your-token" | base64 + ``` + +5. **Apply the deployment**: + ```bash + kubectl apply -f deployment.yaml + ``` + +## Configuration + +### Environment Variables + +| Variable | Description | Required | +|----------|-------------|----------| +| `BRITIVE_TOKEN` | Broker pool authentication token | Yes | +| `KUBECONFIG` | Path to kubeconfig (auto-configured) | No | + +### Resource Limits + +| Resource | Request | Limit | +|----------|---------|-------| +| Memory | 512Mi | 1Gi | +| CPU | 250m | 500m | + +### Replicas + +The default deployment creates 2 replicas for high availability. Modify `spec.replicas` in `deployment.yaml` to change this. + +## Files + +| File | Description | +|------|-------------| +| `deploy.sh` | Automated deployment script | +| `deployment.yaml` | Kubernetes manifests (ServiceAccount, RBAC, ConfigMap, Secret, Deployment, Service) | +| `Dockerfile` | Container image definition | +| `supervisord.conf` | Process supervisor configuration | +| `README.md` | This documentation | + +## Kubernetes Resources Created + +1. **ServiceAccount** (`britive-broker-sa`) - Identity for the broker pods +2. **Secret** (`britive-broker-sa-token`) - Auto-generated service account token +3. **ClusterRole** (`britive-broker-role`) - RBAC permissions for the broker +4. **ClusterRoleBinding** (`britive-broker-binding`) - Binds role to service account +5. **ConfigMap** (`britive-config`) - Configuration files and scripts +6. **Secret** (`britive-secrets`) - Britive authentication token +7. **Deployment** (`britive-broker`) - The broker pods +8. **Service** (`britive-broker-service`) - Internal cluster service + +## RBAC Permissions + +The broker requires the following Kubernetes permissions: + +| API Group | Resources | Verbs | +|-----------|-----------|-------| +| rbac.authorization.k8s.io | roles, rolebindings, clusterroles, clusterrolebindings | get, list, watch, create, update, patch, delete | +| "" (core) | serviceaccounts, namespaces | get, list, watch, create, update, patch, delete | + +These permissions enable the broker to manage access control for just-in-time access. + +## Monitoring & Troubleshooting + +### Check Deployment Status +```bash +kubectl get pods -l app=britive-broker +kubectl get deployment britive-broker +``` + +### View Logs +```bash +# All broker pods +kubectl logs -l app=britive-broker -f + +# Specific pod +kubectl logs -f + +# Previous container (if crashed) +kubectl logs --previous +``` + +### Describe Pod (for troubleshooting) +```bash +kubectl describe pod -l app=britive-broker +``` + +### Check Events +```bash +kubectl get events --sort-by='.lastTimestamp' | grep britive +``` + +### Verify ACR Access +```bash +# Check if AKS can pull from ACR +az aks check-acr --name --resource-group --acr +``` + +### Common Issues + +1. **ImagePullBackOff**: ACR not attached to AKS + ```bash + az aks update --name --resource-group --attach-acr + ``` + +2. **CrashLoopBackOff**: Check logs for Java errors + ```bash + kubectl logs -l app=britive-broker --previous + ``` + +3. **Pending Pods**: Check resource quotas and node capacity + ```bash + kubectl describe pod -l app=britive-broker + ``` + +## Cleanup + +To remove the deployment: + +```bash +kubectl delete -f deployment.yaml + +# Or delete individual resources +kubectl delete deployment britive-broker +kubectl delete service britive-broker-service +kubectl delete configmap britive-config +kubectl delete secret britive-secrets britive-broker-sa-token +kubectl delete clusterrolebinding britive-broker-binding +kubectl delete clusterrole britive-broker-role +kubectl delete serviceaccount britive-broker-sa +``` + +To also remove the ACR repository: +```bash +az acr repository delete --name --repository britive-broker --yes +``` + +## Security Considerations + +1. **Token Security**: The Britive token is stored as a Kubernetes Secret. Ensure your cluster has appropriate RBAC policies. + +2. **Network Policies**: Consider implementing network policies to restrict broker communication. + +3. **ACR Security**: Use Azure Private Link for ACR if your AKS cluster uses private networking. + +4. **Pod Security**: The broker runs as root for kubectl access. Consider pod security policies for additional hardening. + +## Support + +For issues with: +- **Britive Platform**: Contact Britive support +- **AKS/Azure**: Check Azure documentation or contact Azure support +- **This deployment**: Check the troubleshooting section above diff --git a/Access Broker/aks-deployment/deploy.sh b/Access Broker/aks-deployment/deploy.sh new file mode 100755 index 0000000..78af9ca --- /dev/null +++ b/Access Broker/aks-deployment/deploy.sh @@ -0,0 +1,293 @@ +#!/bin/bash + +# Britive Access Broker - Azure AKS Deployment Script +# This script automates the deployment of Britive Access Broker to Azure Kubernetes Service +# +# Prerequisites: +# 1. Azure CLI installed and configured (az login) +# 2. Docker installed and running +# 3. kubectl installed and configured for your AKS cluster +# 4. AKS cluster running +# 5. britive-broker-1.0.0.jar in current directory +# +# Usage: +# 1. Set BRITIVE_TOKEN below with your broker pool token from Britive console +# 2. Run: ./deploy.sh + +set -e + +#============================================================================== +# CONFIGURATION - MODIFY THESE VALUES +#============================================================================== + +# Your Britive broker pool token (required) +# Get this from: Britive Console > System Administration > Broker Pools > Create/Select Pool > Token +BRITIVE_TOKEN="your-britive-token-here" + +# Azure Container Registry name (will be created if it doesn't exist) +ACR_NAME="britivebroker" + +# Resource group for ACR (should match your AKS resource group or be accessible) +RESOURCE_GROUP="" + +# Image name +IMAGE_NAME="britive-broker" +IMAGE_TAG="latest" + +#============================================================================== +# DO NOT MODIFY BELOW THIS LINE +#============================================================================== + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Check if token is configured +if [ "$BRITIVE_TOKEN" == "your-britive-token-here" ]; then + log_error "Please set BRITIVE_TOKEN in this script before running" + log_info "Get your token from: Britive Console > System Administration > Broker Pools" + exit 1 +fi + +# Check for required files +log_info "Checking required files..." +if [ ! -f "britive-broker-1.0.0.jar" ]; then + log_error "britive-broker-1.0.0.jar not found in current directory" + log_info "Please copy the broker JAR file to this directory" + exit 1 +fi + +if [ ! -f "supervisord.conf" ]; then + log_error "supervisord.conf not found in current directory" + exit 1 +fi + +log_success "Required files found" + +# Check Azure CLI +log_info "Checking Azure CLI..." +if ! command -v az &> /dev/null; then + log_error "Azure CLI not found. Please install it:" + log_info " macOS: brew install azure-cli" + log_info " Linux: curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash" + log_info " Windows: Download from https://aka.ms/installazurecliwindows" + exit 1 +fi + +# Check if logged in to Azure +if ! az account show &> /dev/null; then + log_error "Not logged in to Azure. Please run: az login" + exit 1 +fi + +SUBSCRIPTION_ID=$(az account show --query id -o tsv) +SUBSCRIPTION_NAME=$(az account show --query name -o tsv) +log_success "Azure CLI configured - Subscription: $SUBSCRIPTION_NAME" + +# Check Docker +log_info "Checking Docker..." +if ! command -v docker &> /dev/null; then + log_error "Docker not found. Please install Docker Desktop" + exit 1 +fi + +if ! docker info &> /dev/null; then + log_error "Docker daemon not running. Please start Docker Desktop" + exit 1 +fi +log_success "Docker is running" + +# Check kubectl +log_info "Checking kubectl..." +if ! command -v kubectl &> /dev/null; then + log_error "kubectl not found. Please install kubectl" + exit 1 +fi + +# Verify kubectl context is AKS +CURRENT_CONTEXT=$(kubectl config current-context 2>/dev/null || echo "none") +log_info "Current kubectl context: $CURRENT_CONTEXT" + +if [[ ! "$CURRENT_CONTEXT" =~ ^[a-zA-Z0-9_-]+$ ]]; then + log_warning "Could not determine kubectl context" +fi + +# Try to get cluster info +if ! kubectl cluster-info &> /dev/null; then + log_error "Cannot connect to Kubernetes cluster. Please configure kubectl for your AKS cluster:" + log_info " az aks get-credentials --resource-group --name " + exit 1 +fi +log_success "kubectl connected to cluster" + +# Get or set resource group +if [ -z "$RESOURCE_GROUP" ]; then + log_info "Attempting to detect resource group from AKS cluster..." + # Try to get resource group from current context + if [[ "$CURRENT_CONTEXT" =~ ^([a-zA-Z0-9_-]+)$ ]]; then + # List AKS clusters to find matching one + AKS_INFO=$(az aks list --query "[?name=='$CURRENT_CONTEXT'] | [0]" -o json 2>/dev/null || echo "{}") + if [ "$AKS_INFO" != "{}" ] && [ "$AKS_INFO" != "null" ] && [ -n "$AKS_INFO" ]; then + RESOURCE_GROUP=$(echo "$AKS_INFO" | jq -r '.resourceGroup') + fi + fi + + if [ -z "$RESOURCE_GROUP" ] || [ "$RESOURCE_GROUP" == "null" ]; then + log_warning "Could not auto-detect resource group" + echo "" + echo "Available resource groups:" + az group list --query "[].name" -o tsv + echo "" + read -p "Enter the resource group name for ACR: " RESOURCE_GROUP + fi +fi + +log_info "Using resource group: $RESOURCE_GROUP" + +# Check/Create ACR +log_info "Checking Azure Container Registry..." +ACR_EXISTS=$(az acr show --name "$ACR_NAME" --resource-group "$RESOURCE_GROUP" --query name -o tsv 2>/dev/null || echo "") + +if [ -z "$ACR_EXISTS" ]; then + log_info "Creating Azure Container Registry: $ACR_NAME" + az acr create --resource-group "$RESOURCE_GROUP" --name "$ACR_NAME" --sku Basic + log_success "ACR created: $ACR_NAME" +else + log_success "ACR exists: $ACR_NAME" +fi + +# Get ACR login server +ACR_LOGIN_SERVER=$(az acr show --name "$ACR_NAME" --resource-group "$RESOURCE_GROUP" --query loginServer -o tsv) +log_info "ACR Login Server: $ACR_LOGIN_SERVER" + +# Attach ACR to AKS (if not already attached) +log_info "Ensuring ACR is attached to AKS cluster..." +AKS_NAME=$(kubectl config current-context) +az aks update --name "$AKS_NAME" --resource-group "$RESOURCE_GROUP" --attach-acr "$ACR_NAME" 2>/dev/null || \ + log_warning "Could not attach ACR to AKS. You may need to do this manually or ensure proper permissions." + +# Clean up existing resources +log_info "Cleaning up existing Kubernetes resources..." +kubectl delete deployment britive-broker --ignore-not-found=true +kubectl delete service britive-broker-service --ignore-not-found=true +kubectl delete configmap britive-config --ignore-not-found=true +kubectl delete secret britive-secrets --ignore-not-found=true +kubectl delete secret britive-broker-sa-token --ignore-not-found=true +kubectl delete clusterrolebinding britive-broker-binding --ignore-not-found=true +kubectl delete clusterrole britive-broker-role --ignore-not-found=true +kubectl delete serviceaccount britive-broker-sa --ignore-not-found=true +log_success "Cleanup complete" + +# Authenticate Docker with ACR +log_info "Authenticating Docker with ACR..." +az acr login --name "$ACR_NAME" +log_success "Docker authenticated with ACR" + +# Build Docker image +log_info "Building Docker image (AMD64 architecture)..." +docker build --platform linux/amd64 -t "$IMAGE_NAME:$IMAGE_TAG" . + +# Verify architecture +ARCH=$(docker inspect "$IMAGE_NAME:$IMAGE_TAG" --format '{{.Architecture}}') +log_info "Image architecture: $ARCH" + +if [ "$ARCH" != "amd64" ]; then + log_error "Image architecture is not amd64. AKS requires amd64 images." + exit 1 +fi +log_success "Image built successfully" + +# Test image locally +log_info "Testing image locally..." +CONTAINER_ID=$(docker run -d --name test-broker -e BRITIVE_TOKEN="test" "$IMAGE_NAME:$IMAGE_TAG") +sleep 5 + +if docker ps | grep -q test-broker; then + log_success "Container started successfully" + docker logs test-broker 2>&1 | head -20 || true +else + log_error "Container failed to start" + docker logs test-broker 2>&1 || true +fi + +docker stop test-broker 2>/dev/null || true +docker rm test-broker 2>/dev/null || true + +# Tag and push image +FULL_IMAGE_NAME="$ACR_LOGIN_SERVER/$IMAGE_NAME:$IMAGE_TAG" +log_info "Tagging image as: $FULL_IMAGE_NAME" +docker tag "$IMAGE_NAME:$IMAGE_TAG" "$FULL_IMAGE_NAME" + +log_info "Pushing image to ACR..." +docker push "$FULL_IMAGE_NAME" +log_success "Image pushed to ACR" + +# Update deployment.yaml with correct values +log_info "Updating deployment.yaml..." +ENCODED_TOKEN=$(echo -n "$BRITIVE_TOKEN" | base64) + +# Create a temporary deployment file +cp deployment.yaml deployment-temp.yaml + +# Update image URL +sed -i.bak "s|YOUR_ACR_NAME.azurecr.io/britive-broker:latest|$FULL_IMAGE_NAME|g" deployment-temp.yaml + +# Update token +sed -i.bak "s|REPLACE_WITH_BASE64_TOKEN|$ENCODED_TOKEN|g" deployment-temp.yaml + +# Clean up backup files +rm -f deployment-temp.yaml.bak + +log_success "Deployment manifest updated" + +# Apply deployment +log_info "Applying Kubernetes deployment..." +kubectl apply -f deployment-temp.yaml + +# Clean up temp file +rm -f deployment-temp.yaml + +# Wait for rollout +log_info "Waiting for deployment rollout..." +kubectl rollout status deployment/britive-broker --timeout=300s + +log_success "Deployment complete!" + +# Show status +echo "" +echo "==============================================" +echo " DEPLOYMENT STATUS" +echo "==============================================" +echo "" +kubectl get pods -l app=britive-broker +echo "" +kubectl get deployment britive-broker +echo "" +kubectl get service britive-broker-service +echo "" +echo "==============================================" +echo "" +log_info "To view logs: kubectl logs -l app=britive-broker -f" +log_info "To check status: kubectl get pods -l app=britive-broker" +log_info "To describe pod: kubectl describe pod -l app=britive-broker" +echo "" +log_success "Britive Access Broker deployed to AKS!" diff --git a/Access Broker/aks-deployment/deployment.yaml b/Access Broker/aks-deployment/deployment.yaml new file mode 100644 index 0000000..f0490d0 --- /dev/null +++ b/Access Broker/aks-deployment/deployment.yaml @@ -0,0 +1,267 @@ +# Britive Access Broker - Azure AKS Deployment +# This manifest deploys the Britive Access Broker on Azure Kubernetes Service +# +# Prerequisites: +# 1. AKS cluster running and kubectl configured +# 2. Azure Container Registry (ACR) with the broker image +# 3. Britive broker pool token from Britive console +# +# Usage: +# 1. Run deploy.sh to build and deploy automatically, OR +# 2. Update image URL and token, then: kubectl apply -f deployment.yaml + +--- +# ServiceAccount for the broker +apiVersion: v1 +kind: ServiceAccount +metadata: + name: britive-broker-sa + namespace: default + +--- +# Secret for ServiceAccount token (auto-generated) +apiVersion: v1 +kind: Secret +metadata: + name: britive-broker-sa-token + namespace: default + annotations: + kubernetes.io/service-account.name: britive-broker-sa +type: kubernetes.io/service-account-token + +--- +# ClusterRole with required RBAC permissions +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: britive-broker-role +rules: + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings", "clusterroles", "clusterrolebindings"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["serviceaccounts", "namespaces"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + +--- +# ClusterRoleBinding to grant permissions to ServiceAccount +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: britive-broker-binding +subjects: + - kind: ServiceAccount + name: britive-broker-sa + namespace: default +roleRef: + kind: ClusterRole + name: britive-broker-role + apiGroup: rbac.authorization.k8s.io + +--- +# ConfigMap with broker configuration and scripts +apiVersion: v1 +kind: ConfigMap +metadata: + name: britive-config + namespace: default +data: + broker-config.yml: | + config: + bootstrap: + tenant_subdomain: + authentication_token: "" + + token-generator.sh: | + #!/bin/bash + echo "$BRITIVE_TOKEN" + + start-broker.sh: | + #!/bin/bash + + # Signal handler for graceful shutdown + cleanup() { + echo "Received shutdown signal, cleaning up..." + if [ ! -z "$BROKER_PID" ]; then + kill -TERM "$BROKER_PID" 2>/dev/null + wait "$BROKER_PID" 2>/dev/null + fi + exit 0 + } + + trap cleanup SIGTERM SIGINT + + # Optional startup delay + if [ ! -z "$1" ]; then + echo "Waiting $1 seconds before starting..." + sleep $1 + fi + + echo "Starting Britive broker..." + cd /root/broker + java -Djavax.net.debug=all -jar britive-broker-1.0.0.jar >> /var/log/britive-broker.log 2>&1 & + BROKER_PID=$! + + echo "Broker started with PID: $BROKER_PID" + wait $BROKER_PID + + create-kubeconfig.sh: | + #!/bin/bash + + # Create kubeconfig from service account credentials + # This enables kubectl access from within the pod + + SERVICEACCOUNT=/var/run/secrets/kubernetes.io/serviceaccount + NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) + TOKEN=$(cat ${SERVICEACCOUNT}/token) + CACERT=${SERVICEACCOUNT}/ca.crt + + # Get the API server address + APISERVER=https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT} + + # Create kubeconfig + kubectl config set-cluster ${CLUSTER_NAME:-kubernetes} \ + --embed-certs=true \ + --server=${APISERVER} \ + --certificate-authority=${CACERT} \ + --kubeconfig=/root/.kube/config + + kubectl config set-credentials britive-broker-sa \ + --token=${TOKEN} \ + --kubeconfig=/root/.kube/config + + kubectl config set-context default \ + --cluster=${CLUSTER_NAME:-kubernetes} \ + --user=britive-broker-sa \ + --kubeconfig=/root/.kube/config + + kubectl config use-context default --kubeconfig=/root/.kube/config + + echo "Kubeconfig created at /root/.kube/config" + + # Test the configuration + kubectl auth can-i get pods --kubeconfig=/root/.kube/config + +--- +# Secret containing the Britive token +apiVersion: v1 +kind: Secret +metadata: + name: britive-secrets + namespace: default +type: Opaque +data: + # Base64-encoded Britive token (updated by deploy.sh) + britive-token: "REPLACE_WITH_BASE64_TOKEN" + +--- +# Deployment for the Britive broker +apiVersion: apps/v1 +kind: Deployment +metadata: + name: britive-broker + namespace: default + labels: + app: britive-broker +spec: + replicas: 2 + selector: + matchLabels: + app: britive-broker + template: + metadata: + labels: + app: britive-broker + spec: + serviceAccountName: britive-broker-sa + + # Init container to set up kubeconfig + initContainers: + - name: setup-kubeconfig + image: YOUR_ACR_NAME.azurecr.io/britive-broker:latest + command: + - /bin/bash + - -c + - | + mkdir -p /root/.kube + cp /config/create-kubeconfig.sh /root/create-kubeconfig.sh + chmod +x /root/create-kubeconfig.sh + /root/create-kubeconfig.sh + cp /root/.kube/config /kube-config/config + env: + - name: CLUSTER_NAME + value: "kubernetes" + volumeMounts: + - name: config + mountPath: /config + - name: kube-config + mountPath: /kube-config + + containers: + - name: britive-broker + image: YOUR_ACR_NAME.azurecr.io/britive-broker:latest + imagePullPolicy: Always + ports: + - containerPort: 22 + name: ssh + env: + - name: BRITIVE_TOKEN + valueFrom: + secretKeyRef: + name: britive-secrets + key: britive-token + - name: KUBECONFIG + value: "/root/.kube/config" + volumeMounts: + - name: config + mountPath: /root/broker/config/broker-config.yml + subPath: broker-config.yml + - name: config + mountPath: /root/broker/bootstrap/token-generator.sh + subPath: token-generator.sh + - name: config + mountPath: /root/start-broker.sh + subPath: start-broker.sh + - name: kube-config + mountPath: /root/.kube + resources: + requests: + memory: "512Mi" + cpu: "250m" + limits: + memory: "1Gi" + cpu: "500m" + livenessProbe: + exec: + command: + - pgrep + - -f + - java + initialDelaySeconds: 90 + periodSeconds: 30 + failureThreshold: 3 + + volumes: + - name: config + configMap: + name: britive-config + defaultMode: 0755 + - name: kube-config + emptyDir: {} + +--- +# Service to expose the broker (optional - enable if direct SSH access needed) +apiVersion: v1 +kind: Service +metadata: + name: britive-broker-service + namespace: default +spec: + selector: + app: britive-broker + ports: + - protocol: TCP + port: 22 + targetPort: 22 + name: ssh + type: ClusterIP diff --git a/Access Broker/aks-deployment/supervisord.conf b/Access Broker/aks-deployment/supervisord.conf new file mode 100644 index 0000000..816d0aa --- /dev/null +++ b/Access Broker/aks-deployment/supervisord.conf @@ -0,0 +1,15 @@ +[supervisord] +nodaemon=true +user=root +logfile=/var/log/supervisor/supervisord.log +pidfile=/var/run/supervisord.pid +childlogdir=/var/log/supervisor + +[program:britive-broker] +command=/root/start-broker.sh 5 +stdout_logfile=/var/log/supervisor/broker.log +stderr_logfile=/var/log/supervisor/broker.log +autostart=true +autorestart=true +environment=BRITIVE_TOKEN="%(ENV_BRITIVE_TOKEN)s" +stopasgroup=true diff --git a/Access Broker/ecs-fargate-deployment/Dockerfile b/Access Broker/ecs-fargate-deployment/Dockerfile new file mode 100644 index 0000000..623466e --- /dev/null +++ b/Access Broker/ecs-fargate-deployment/Dockerfile @@ -0,0 +1,56 @@ +# Britive Access Broker - AWS ECS Fargate Deployment +# This Dockerfile creates a container image for running the Britive Access Broker +# on AWS ECS Fargate (serverless container orchestration) + +FROM --platform=linux/amd64 ubuntu:24.04 + +# Avoid prompts from apt +ENV DEBIAN_FRONTEND=noninteractive + +# Install required packages +RUN apt-get update && apt-get install -y \ + openjdk-21-jre-headless \ + curl \ + wget \ + unzip \ + ca-certificates \ + supervisor \ + jq \ + && rm -rf /var/lib/apt/lists/* + +# Install kubectl (for managing external Kubernetes clusters) +RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \ + && chmod +x kubectl \ + && mv kubectl /usr/local/bin/ + +# Install AWS CLI v2 (for EKS cluster access if needed) +RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" \ + && unzip awscliv2.zip \ + && ./aws/install \ + && rm -rf awscliv2.zip aws + +# Create directories +RUN mkdir -p /root/broker/config \ + && mkdir -p /root/broker/bootstrap \ + && mkdir -p /root/broker/cache \ + && mkdir -p /root/.kube \ + && mkdir -p /var/log/supervisor \ + && mkdir -p /var/run/sshd + +# Copy Britive broker JAR +COPY britive-broker-1.0.0.jar /root/broker/ + +# Copy supervisor configuration +COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf + +# Copy startup scripts +COPY start-broker.sh /root/start-broker.sh +COPY token-generator.sh /root/broker/bootstrap/token-generator.sh +RUN chmod +x /root/start-broker.sh /root/broker/bootstrap/token-generator.sh + +# Health check - verify Java process is running +HEALTHCHECK --interval=30s --timeout=10s --start-period=90s --retries=3 \ + CMD pgrep -f java || exit 1 + +# Default command - start supervisor +CMD ["/usr/bin/supervisord", "-n", "-c", "/etc/supervisor/conf.d/supervisord.conf"] diff --git a/Access Broker/ecs-fargate-deployment/README.md b/Access Broker/ecs-fargate-deployment/README.md new file mode 100644 index 0000000..dac1389 --- /dev/null +++ b/Access Broker/ecs-fargate-deployment/README.md @@ -0,0 +1,385 @@ +# Britive Access Broker - AWS ECS Fargate Deployment + +This directory contains everything needed to deploy the Britive Access Broker on AWS ECS Fargate (serverless container orchestration). + +## Overview + +The Britive Access Broker enables secure, just-in-time access management through the Britive platform. This deployment uses AWS ECS Fargate for serverless container orchestration, eliminating the need to manage underlying EC2 instances. + +## Prerequisites + +Before deploying, ensure you have: + +1. **AWS CLI** installed and configured + ```bash + # Install AWS CLI + # macOS + brew install awscli + + # Linux + pip install awscli + + # Configure credentials + aws configure + ``` + +2. **Docker** installed and running + ```bash + # Verify Docker is running + docker info + ``` + +3. **Britive Broker Pool Token** from the Britive console + - Navigate to: System Administration > Broker Pools + - Create a new pool or select an existing one + - Copy the broker pool token + +4. **britive-broker-1.0.0.jar** file in this directory + +5. **VPC with subnets** (default VPC works, or specify custom VPC) + +## Quick Start + +### Option 1: Automated Deployment (Recommended) + +1. Copy the broker JAR file to this directory: + ```bash + cp /path/to/britive-broker-1.0.0.jar . + ``` + +2. Edit `deploy.sh` and set your configuration: + ```bash + BRITIVE_TOKEN="your-britive-token-here" + AWS_REGION="us-west-2" # Your AWS region + ECS_CLUSTER_NAME="britive-broker-cluster" # Cluster name + DESIRED_COUNT=2 # Number of tasks + ``` + +3. Run the deployment script: + ```bash + chmod +x deploy.sh + ./deploy.sh + ``` + +The script will: +- Validate all prerequisites +- Create ECR repository and push the container image +- Create IAM roles for ECS tasks +- Store the Britive token in AWS Secrets Manager +- Create CloudWatch log group +- Register the ECS task definition +- Create ECS cluster and service +- Deploy the specified number of tasks + +### Option 2: Manual Deployment + +1. **Create ECR Repository**: + ```bash + aws ecr create-repository --repository-name britive-broker + ``` + +2. **Build and push Docker image**: + ```bash + # Login to ECR + aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin .dkr.ecr.us-west-2.amazonaws.com + + # Build image + docker build --platform linux/amd64 -t britive-broker:latest . + + # Tag and push + docker tag britive-broker:latest .dkr.ecr.us-west-2.amazonaws.com/britive-broker:latest + docker push .dkr.ecr.us-west-2.amazonaws.com/britive-broker:latest + ``` + +3. **Create Secrets Manager secret**: + ```bash + aws secretsmanager create-secret \ + --name britive-broker/token \ + --secret-string '{"britive-token":"your-token-here"}' + ``` + +4. **Create IAM roles** (see IAM section below) + +5. **Update task-definition.json** with your values + +6. **Register task definition**: + ```bash + aws ecs register-task-definition --cli-input-json file://task-definition.json + ``` + +7. **Create cluster and service**: + ```bash + aws ecs create-cluster --cluster-name britive-broker-cluster + + aws ecs create-service \ + --cluster britive-broker-cluster \ + --service-name britive-broker-service \ + --task-definition britive-broker \ + --desired-count 2 \ + --launch-type FARGATE \ + --network-configuration "awsvpcConfiguration={subnets=[subnet-xxx],securityGroups=[sg-xxx],assignPublicIp=ENABLED}" + ``` + +## Configuration + +### Environment Variables + +| Variable | Description | Required | Source | +|----------|-------------|----------|--------| +| `BRITIVE_TOKEN` | Broker pool authentication token | Yes | Secrets Manager | +| `KUBECONFIG` | Path to kubeconfig | Auto | Container | +| `KUBECONFIG_BASE64` | Base64-encoded kubeconfig (for external clusters) | No | Task definition | +| `EKS_CLUSTER_NAME` | EKS cluster name (auto-configures kubectl) | No | Task definition | +| `AWS_REGION` | AWS region for EKS access | No | Task definition | + +### Resource Configuration + +| Resource | Value | Description | +|----------|-------|-------------| +| CPU | 512 (0.5 vCPU) | Fargate CPU units | +| Memory | 1024 MB | Fargate memory | +| Desired Count | 2 | Number of tasks | + +### Networking Requirements + +- **VPC**: Tasks run in your VPC +- **Subnets**: Specify subnets with internet access (for Britive connectivity) +- **Security Group**: Allow outbound HTTPS (443) for Britive API +- **Public IP**: Enabled by default (or use NAT Gateway for private subnets) + +## Files + +| File | Description | +|------|-------------| +| `deploy.sh` | Automated deployment script | +| `task-definition.json` | ECS task definition template | +| `Dockerfile` | Container image definition | +| `supervisord.conf` | Process supervisor configuration | +| `start-broker.sh` | Broker startup script | +| `token-generator.sh` | Token provider script | +| `README.md` | This documentation | + +## AWS Resources Created + +1. **ECR Repository** (`britive-broker`) - Container image storage +2. **ECS Cluster** (`britive-broker-cluster`) - Fargate cluster +3. **ECS Service** (`britive-broker-service`) - Manages broker tasks +4. **ECS Task Definition** (`britive-broker`) - Task configuration +5. **IAM Roles**: + - `ecsTaskExecutionRole` - For pulling images and accessing secrets + - `britive-broker-task-role` - For broker runtime permissions +6. **Secrets Manager Secret** (`britive-broker/token`) - Stores Britive token +7. **CloudWatch Log Group** (`/ecs/britive-broker`) - Container logs +8. **Security Group** (`britive-broker-sg`) - Network security + +## IAM Permissions + +### Task Execution Role (`ecsTaskExecutionRole`) +Required for ECS to pull images and retrieve secrets: +- `AmazonECSTaskExecutionRolePolicy` (AWS managed) +- Secrets Manager access for the Britive token + +### Task Role (`britive-broker-task-role`) +Permissions for the broker at runtime: +```json +{ + "Effect": "Allow", + "Action": [ + "eks:DescribeCluster", + "eks:ListClusters", + "sts:AssumeRole" + ], + "Resource": "*" +} +``` + +**Note**: If the broker needs to manage EKS clusters, ensure the task role can assume roles that have Kubernetes RBAC permissions on those clusters. + +## Managing External Kubernetes Clusters + +Since ECS Fargate isn't a Kubernetes environment, the broker can manage external Kubernetes clusters in two ways: + +### Option 1: EKS Clusters (Recommended for AWS) +Add environment variables to the task definition: +```json +{ + "name": "EKS_CLUSTER_NAME", + "value": "your-eks-cluster" +}, +{ + "name": "AWS_REGION", + "value": "us-west-2" +} +``` + +The startup script will automatically configure kubectl for the specified EKS cluster. + +### Option 2: Any Kubernetes Cluster (via kubeconfig) +Provide a base64-encoded kubeconfig: +```json +{ + "name": "KUBECONFIG_BASE64", + "value": "base64-encoded-kubeconfig-here" +} +``` + +Generate the base64 value: +```bash +cat ~/.kube/config | base64 +``` + +## Monitoring & Troubleshooting + +### View Logs +```bash +# Stream logs +aws logs tail /ecs/britive-broker --follow --region us-west-2 + +# View recent logs +aws logs get-log-events \ + --log-group-name /ecs/britive-broker \ + --log-stream-name ecs/britive-broker/ \ + --region us-west-2 +``` + +### Check Service Status +```bash +# Describe service +aws ecs describe-services \ + --cluster britive-broker-cluster \ + --services britive-broker-service \ + --region us-west-2 + +# List running tasks +aws ecs list-tasks \ + --cluster britive-broker-cluster \ + --service-name britive-broker-service \ + --region us-west-2 + +# Describe specific task +aws ecs describe-tasks \ + --cluster britive-broker-cluster \ + --tasks \ + --region us-west-2 +``` + +### View in AWS Console +``` +https://us-west-2.console.aws.amazon.com/ecs/home?region=us-west-2#/clusters/britive-broker-cluster/services +``` + +### Common Issues + +1. **Task fails to start**: Check CloudWatch logs for Java errors + ```bash + aws logs tail /ecs/britive-broker --since 10m --region us-west-2 + ``` + +2. **Image pull failures**: Verify ECR repository and execution role permissions + ```bash + aws ecr describe-images --repository-name britive-broker --region us-west-2 + ``` + +3. **Secret access denied**: Check execution role has Secrets Manager permissions + +4. **Network timeout**: Ensure security group allows outbound HTTPS (443) + +5. **Task stuck in PENDING**: Check subnet has available IP addresses and internet access + +## Scaling + +### Manual Scaling +```bash +aws ecs update-service \ + --cluster britive-broker-cluster \ + --service britive-broker-service \ + --desired-count 4 \ + --region us-west-2 +``` + +### Auto Scaling (Optional) +Configure Application Auto Scaling for the ECS service: +```bash +# Register scalable target +aws application-autoscaling register-scalable-target \ + --service-namespace ecs \ + --resource-id service/britive-broker-cluster/britive-broker-service \ + --scalable-dimension ecs:service:DesiredCount \ + --min-capacity 2 \ + --max-capacity 10 + +# Create scaling policy (CPU-based) +aws application-autoscaling put-scaling-policy \ + --service-namespace ecs \ + --resource-id service/britive-broker-cluster/britive-broker-service \ + --scalable-dimension ecs:service:DesiredCount \ + --policy-name cpu-scaling \ + --policy-type TargetTrackingScaling \ + --target-tracking-scaling-policy-configuration file://scaling-policy.json +``` + +## Cleanup + +To remove the deployment: + +```bash +# Delete service (stops all tasks) +aws ecs update-service \ + --cluster britive-broker-cluster \ + --service britive-broker-service \ + --desired-count 0 \ + --region us-west-2 + +aws ecs delete-service \ + --cluster britive-broker-cluster \ + --service britive-broker-service \ + --region us-west-2 + +# Delete cluster +aws ecs delete-cluster --cluster britive-broker-cluster --region us-west-2 + +# Delete task definition (all revisions) +TASK_DEFS=$(aws ecs list-task-definitions --family-prefix britive-broker --query "taskDefinitionArns" --output text --region us-west-2) +for td in $TASK_DEFS; do + aws ecs deregister-task-definition --task-definition $td --region us-west-2 +done + +# Delete ECR repository +aws ecr delete-repository --repository-name britive-broker --force --region us-west-2 + +# Delete secret +aws secretsmanager delete-secret --secret-id britive-broker/token --force-delete-without-recovery --region us-west-2 + +# Delete log group +aws logs delete-log-group --log-group-name /ecs/britive-broker --region us-west-2 + +# Delete security group +aws ec2 delete-security-group --group-name britive-broker-sg --region us-west-2 + +# Delete IAM roles (if created by this deployment) +aws iam delete-role-policy --role-name britive-broker-task-role --policy-name britive-broker-policy +aws iam delete-role --role-name britive-broker-task-role +``` + +## Cost Optimization + +ECS Fargate pricing is based on vCPU and memory per second. To optimize costs: + +1. **Right-size tasks**: Start with 0.5 vCPU / 1GB and adjust based on metrics +2. **Use Fargate Spot**: Add `capacityProviderStrategy` for non-critical workloads +3. **Scale based on demand**: Implement auto-scaling to reduce tasks during low usage + +## Security Considerations + +1. **Secrets**: Britive token stored in Secrets Manager (encrypted at rest) +2. **Network**: Use private subnets with NAT Gateway for production +3. **IAM**: Follow least-privilege principle for task roles +4. **Logging**: CloudWatch logs are encrypted by default +5. **Image scanning**: Enable ECR image scanning for vulnerabilities + +## Support + +For issues with: +- **Britive Platform**: Contact Britive support +- **AWS ECS/Fargate**: Check AWS documentation or contact AWS support +- **This deployment**: Check the troubleshooting section above diff --git a/Access Broker/ecs-fargate-deployment/deploy.sh b/Access Broker/ecs-fargate-deployment/deploy.sh new file mode 100755 index 0000000..40d4c1a --- /dev/null +++ b/Access Broker/ecs-fargate-deployment/deploy.sh @@ -0,0 +1,474 @@ +#!/bin/bash + +# Britive Access Broker - AWS ECS Fargate Deployment Script +# This script automates the deployment of Britive Access Broker to AWS ECS Fargate +# +# Prerequisites: +# 1. AWS CLI installed and configured +# 2. Docker installed and running +# 3. britive-broker-1.0.0.jar in current directory +# +# Usage: +# 1. Set configuration variables below +# 2. Run: ./deploy.sh + +set -e + +#============================================================================== +# CONFIGURATION - MODIFY THESE VALUES +#============================================================================== + +# Your Britive broker pool token (required) +# Get this from: Britive Console > System Administration > Broker Pools > Create/Select Pool > Token +BRITIVE_TOKEN="your-britive-token-here" + +# AWS Configuration +AWS_REGION="${AWS_REGION:-us-west-2}" + +# ECS Configuration +ECS_CLUSTER_NAME="britive-broker-cluster" +ECS_SERVICE_NAME="britive-broker-service" +ECR_REPO_NAME="britive-broker" +TASK_FAMILY="britive-broker" + +# Networking - Set these to your VPC configuration +# Leave empty to auto-detect default VPC +VPC_ID="" +SUBNET_IDS="" # Comma-separated subnet IDs (e.g., "subnet-xxx,subnet-yyy") +SECURITY_GROUP_ID="" # Security group for the tasks + +# Number of tasks (replicas) +DESIRED_COUNT=2 + +#============================================================================== +# DO NOT MODIFY BELOW THIS LINE +#============================================================================== + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Check if token is configured +if [ "$BRITIVE_TOKEN" == "your-britive-token-here" ]; then + log_error "Please set BRITIVE_TOKEN in this script before running" + log_info "Get your token from: Britive Console > System Administration > Broker Pools" + exit 1 +fi + +# Check for required files +log_info "Checking required files..." +REQUIRED_FILES=("britive-broker-1.0.0.jar" "supervisord.conf" "start-broker.sh" "token-generator.sh" "task-definition.json") +for file in "${REQUIRED_FILES[@]}"; do + if [ ! -f "$file" ]; then + log_error "$file not found in current directory" + exit 1 + fi +done +log_success "Required files found" + +# Check AWS CLI +log_info "Checking AWS CLI..." +if ! command -v aws &> /dev/null; then + log_error "AWS CLI not found. Please install it:" + log_info " macOS: brew install awscli" + log_info " Linux: pip install awscli" + exit 1 +fi + +# Check AWS credentials +if ! aws sts get-caller-identity &> /dev/null; then + log_error "AWS credentials not configured. Please run: aws configure" + exit 1 +fi + +AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) +log_success "AWS CLI configured - Account: $AWS_ACCOUNT_ID, Region: $AWS_REGION" + +# Check Docker +log_info "Checking Docker..." +if ! command -v docker &> /dev/null; then + log_error "Docker not found. Please install Docker Desktop" + exit 1 +fi + +if ! docker info &> /dev/null; then + log_error "Docker daemon not running. Please start Docker Desktop" + exit 1 +fi +log_success "Docker is running" + +# Auto-detect VPC if not specified +if [ -z "$VPC_ID" ]; then + log_info "Auto-detecting default VPC..." + VPC_ID=$(aws ec2 describe-vpcs --filters "Name=isDefault,Values=true" --query "Vpcs[0].VpcId" --output text --region "$AWS_REGION" 2>/dev/null || echo "") + + if [ -z "$VPC_ID" ] || [ "$VPC_ID" == "None" ]; then + log_error "No default VPC found. Please set VPC_ID in this script." + exit 1 + fi + log_success "Using default VPC: $VPC_ID" +fi + +# Auto-detect subnets if not specified +if [ -z "$SUBNET_IDS" ]; then + log_info "Auto-detecting subnets..." + SUBNET_IDS=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=$VPC_ID" --query "Subnets[*].SubnetId" --output text --region "$AWS_REGION" | tr '\t' ',') + + if [ -z "$SUBNET_IDS" ]; then + log_error "No subnets found in VPC. Please set SUBNET_IDS in this script." + exit 1 + fi + log_success "Using subnets: $SUBNET_IDS" +fi + +# Create security group if not specified +if [ -z "$SECURITY_GROUP_ID" ]; then + log_info "Creating security group for Britive broker..." + + # Check if security group already exists + EXISTING_SG=$(aws ec2 describe-security-groups --filters "Name=group-name,Values=britive-broker-sg" "Name=vpc-id,Values=$VPC_ID" --query "SecurityGroups[0].GroupId" --output text --region "$AWS_REGION" 2>/dev/null || echo "None") + + if [ "$EXISTING_SG" != "None" ] && [ -n "$EXISTING_SG" ]; then + SECURITY_GROUP_ID="$EXISTING_SG" + log_success "Using existing security group: $SECURITY_GROUP_ID" + else + SECURITY_GROUP_ID=$(aws ec2 create-security-group \ + --group-name "britive-broker-sg" \ + --description "Security group for Britive Access Broker" \ + --vpc-id "$VPC_ID" \ + --query "GroupId" \ + --output text \ + --region "$AWS_REGION") + + # Allow outbound traffic (default) + log_info "Security group created: $SECURITY_GROUP_ID" + fi +fi + +# Create ECR repository +log_info "Setting up ECR repository..." +ECR_REPO_URI="${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_REPO_NAME}" + +if ! aws ecr describe-repositories --repository-names "$ECR_REPO_NAME" --region "$AWS_REGION" &> /dev/null; then + log_info "Creating ECR repository: $ECR_REPO_NAME" + aws ecr create-repository --repository-name "$ECR_REPO_NAME" --region "$AWS_REGION" > /dev/null + log_success "ECR repository created" +else + log_success "ECR repository exists" +fi + +# Authenticate Docker with ECR +log_info "Authenticating Docker with ECR..." +aws ecr get-login-password --region "$AWS_REGION" | docker login --username AWS --password-stdin "${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com" +log_success "Docker authenticated with ECR" + +# Build Docker image +log_info "Building Docker image (AMD64 architecture)..." +docker build --platform linux/amd64 -t "$ECR_REPO_NAME:latest" . + +# Verify architecture +ARCH=$(docker inspect "$ECR_REPO_NAME:latest" --format '{{.Architecture}}') +log_info "Image architecture: $ARCH" + +if [ "$ARCH" != "amd64" ]; then + log_error "Image architecture is not amd64. Fargate requires amd64 images." + exit 1 +fi +log_success "Image built successfully" + +# Test image locally +log_info "Testing image locally..." +CONTAINER_ID=$(docker run -d --name test-broker -e BRITIVE_TOKEN="test" "$ECR_REPO_NAME:latest") +sleep 5 + +if docker ps | grep -q test-broker; then + log_success "Container started successfully" + docker logs test-broker 2>&1 | head -20 || true +else + log_error "Container failed to start" + docker logs test-broker 2>&1 || true +fi + +docker stop test-broker 2>/dev/null || true +docker rm test-broker 2>/dev/null || true + +# Tag and push image +log_info "Pushing image to ECR..." +docker tag "$ECR_REPO_NAME:latest" "$ECR_REPO_URI:latest" +docker push "$ECR_REPO_URI:latest" +log_success "Image pushed to ECR" + +# Create CloudWatch log group +log_info "Setting up CloudWatch log group..." +aws logs create-log-group --log-group-name "/ecs/britive-broker" --region "$AWS_REGION" 2>/dev/null || true +log_success "CloudWatch log group ready" + +# Create or get IAM roles +log_info "Setting up IAM roles..." + +# ECS Task Execution Role +EXECUTION_ROLE_NAME="ecsTaskExecutionRole" +EXECUTION_ROLE_ARN="arn:aws:iam::${AWS_ACCOUNT_ID}:role/${EXECUTION_ROLE_NAME}" + +if ! aws iam get-role --role-name "$EXECUTION_ROLE_NAME" &> /dev/null; then + log_info "Creating ECS task execution role..." + + cat > /tmp/trust-policy.json << 'EOF' +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "ecs-tasks.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} +EOF + + aws iam create-role \ + --role-name "$EXECUTION_ROLE_NAME" \ + --assume-role-policy-document file:///tmp/trust-policy.json > /dev/null + + aws iam attach-role-policy \ + --role-name "$EXECUTION_ROLE_NAME" \ + --policy-arn "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" + + rm /tmp/trust-policy.json + log_success "Execution role created" +else + log_success "Execution role exists" +fi + +# Create Task Role for Britive broker +TASK_ROLE_NAME="britive-broker-task-role" +TASK_ROLE_ARN="arn:aws:iam::${AWS_ACCOUNT_ID}:role/${TASK_ROLE_NAME}" + +if ! aws iam get-role --role-name "$TASK_ROLE_NAME" &> /dev/null; then + log_info "Creating Britive broker task role..." + + cat > /tmp/trust-policy.json << 'EOF' +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "ecs-tasks.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} +EOF + + aws iam create-role \ + --role-name "$TASK_ROLE_NAME" \ + --assume-role-policy-document file:///tmp/trust-policy.json > /dev/null + + # Create policy for EKS access (if broker needs to manage EKS clusters) + cat > /tmp/broker-policy.json << 'EOF' +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "eks:DescribeCluster", + "eks:ListClusters" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "sts:AssumeRole" + ], + "Resource": "*" + } + ] +} +EOF + + aws iam put-role-policy \ + --role-name "$TASK_ROLE_NAME" \ + --policy-name "britive-broker-policy" \ + --policy-document file:///tmp/broker-policy.json + + rm /tmp/trust-policy.json /tmp/broker-policy.json + log_success "Task role created" +else + log_success "Task role exists" +fi + +# Create Secrets Manager secret for Britive token +log_info "Setting up Secrets Manager secret..." +SECRET_NAME="britive-broker/token" +SECRET_ARN="" + +if aws secretsmanager describe-secret --secret-id "$SECRET_NAME" --region "$AWS_REGION" &> /dev/null; then + log_info "Updating existing secret..." + aws secretsmanager update-secret \ + --secret-id "$SECRET_NAME" \ + --secret-string "{\"britive-token\":\"$BRITIVE_TOKEN\"}" \ + --region "$AWS_REGION" > /dev/null + SECRET_ARN=$(aws secretsmanager describe-secret --secret-id "$SECRET_NAME" --query "ARN" --output text --region "$AWS_REGION") +else + log_info "Creating new secret..." + SECRET_ARN=$(aws secretsmanager create-secret \ + --name "$SECRET_NAME" \ + --description "Britive Access Broker token" \ + --secret-string "{\"britive-token\":\"$BRITIVE_TOKEN\"}" \ + --query "ARN" \ + --output text \ + --region "$AWS_REGION") +fi +log_success "Secret configured: $SECRET_ARN" + +# Add Secrets Manager permission to execution role +log_info "Adding Secrets Manager permissions to execution role..." +cat > /tmp/secrets-policy.json << EOF +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue" + ], + "Resource": "$SECRET_ARN" + } + ] +} +EOF + +aws iam put-role-policy \ + --role-name "$EXECUTION_ROLE_NAME" \ + --policy-name "britive-secrets-access" \ + --policy-document file:///tmp/secrets-policy.json 2>/dev/null || true + +rm /tmp/secrets-policy.json + +# Update task definition +log_info "Preparing task definition..." +cp task-definition.json task-definition-deploy.json + +sed -i.bak "s|REPLACE_WITH_ECR_IMAGE|$ECR_REPO_URI:latest|g" task-definition-deploy.json +sed -i.bak "s|REPLACE_WITH_EXECUTION_ROLE_ARN|$EXECUTION_ROLE_ARN|g" task-definition-deploy.json +sed -i.bak "s|REPLACE_WITH_TASK_ROLE_ARN|$TASK_ROLE_ARN|g" task-definition-deploy.json +sed -i.bak "s|REPLACE_WITH_SECRET_ARN|$SECRET_ARN|g" task-definition-deploy.json +sed -i.bak "s|REPLACE_WITH_REGION|$AWS_REGION|g" task-definition-deploy.json + +rm -f task-definition-deploy.json.bak + +log_success "Task definition prepared" + +# Register task definition +log_info "Registering task definition..." +TASK_DEFINITION_ARN=$(aws ecs register-task-definition \ + --cli-input-json file://task-definition-deploy.json \ + --query "taskDefinition.taskDefinitionArn" \ + --output text \ + --region "$AWS_REGION") + +rm -f task-definition-deploy.json +log_success "Task definition registered: $TASK_DEFINITION_ARN" + +# Create ECS cluster if it doesn't exist +log_info "Setting up ECS cluster..." +if ! aws ecs describe-clusters --clusters "$ECS_CLUSTER_NAME" --query "clusters[?status=='ACTIVE']" --output text --region "$AWS_REGION" | grep -q "$ECS_CLUSTER_NAME"; then + log_info "Creating ECS cluster: $ECS_CLUSTER_NAME" + aws ecs create-cluster --cluster-name "$ECS_CLUSTER_NAME" --region "$AWS_REGION" > /dev/null + log_success "ECS cluster created" +else + log_success "ECS cluster exists" +fi + +# Convert subnet IDs to JSON array +SUBNET_ARRAY=$(echo "$SUBNET_IDS" | tr ',' '\n' | head -2 | jq -R . | jq -s .) + +# Create or update ECS service +log_info "Deploying ECS service..." + +SERVICE_EXISTS=$(aws ecs describe-services --cluster "$ECS_CLUSTER_NAME" --services "$ECS_SERVICE_NAME" --query "services[?status=='ACTIVE'].serviceName" --output text --region "$AWS_REGION" 2>/dev/null || echo "") + +if [ -n "$SERVICE_EXISTS" ]; then + log_info "Updating existing service..." + aws ecs update-service \ + --cluster "$ECS_CLUSTER_NAME" \ + --service "$ECS_SERVICE_NAME" \ + --task-definition "$TASK_DEFINITION_ARN" \ + --desired-count "$DESIRED_COUNT" \ + --force-new-deployment \ + --region "$AWS_REGION" > /dev/null +else + log_info "Creating new service..." + aws ecs create-service \ + --cluster "$ECS_CLUSTER_NAME" \ + --service-name "$ECS_SERVICE_NAME" \ + --task-definition "$TASK_DEFINITION_ARN" \ + --desired-count "$DESIRED_COUNT" \ + --launch-type FARGATE \ + --network-configuration "awsvpcConfiguration={subnets=$SUBNET_ARRAY,securityGroups=[\"$SECURITY_GROUP_ID\"],assignPublicIp=ENABLED}" \ + --region "$AWS_REGION" > /dev/null +fi + +log_success "Service deployed" + +# Wait for service to stabilize +log_info "Waiting for service to stabilize..." +aws ecs wait services-stable --cluster "$ECS_CLUSTER_NAME" --services "$ECS_SERVICE_NAME" --region "$AWS_REGION" || \ + log_warning "Service may still be stabilizing. Check AWS console for status." + +log_success "Deployment complete!" + +# Show status +echo "" +echo "==============================================" +echo " DEPLOYMENT STATUS" +echo "==============================================" +echo "" +echo "Cluster: $ECS_CLUSTER_NAME" +echo "Service: $ECS_SERVICE_NAME" +echo "Task Def: $TASK_DEFINITION_ARN" +echo "Region: $AWS_REGION" +echo "" + +# Get running tasks +log_info "Running tasks:" +aws ecs list-tasks --cluster "$ECS_CLUSTER_NAME" --service-name "$ECS_SERVICE_NAME" --query "taskArns" --output table --region "$AWS_REGION" + +echo "" +echo "==============================================" +echo "" +log_info "To view logs:" +log_info " aws logs tail /ecs/britive-broker --follow --region $AWS_REGION" +echo "" +log_info "To check service status:" +log_info " aws ecs describe-services --cluster $ECS_CLUSTER_NAME --services $ECS_SERVICE_NAME --region $AWS_REGION" +echo "" +log_info "To view in AWS Console:" +log_info " https://${AWS_REGION}.console.aws.amazon.com/ecs/home?region=${AWS_REGION}#/clusters/${ECS_CLUSTER_NAME}/services/${ECS_SERVICE_NAME}/details" +echo "" +log_success "Britive Access Broker deployed to ECS Fargate!" diff --git a/Access Broker/ecs-fargate-deployment/start-broker.sh b/Access Broker/ecs-fargate-deployment/start-broker.sh new file mode 100755 index 0000000..e8bdc18 --- /dev/null +++ b/Access Broker/ecs-fargate-deployment/start-broker.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Britive Access Broker startup script for ECS Fargate +# Handles graceful shutdown and logging + +# Signal handler for graceful shutdown +cleanup() { + echo "Received shutdown signal, cleaning up..." + if [ ! -z "$BROKER_PID" ]; then + kill -TERM "$BROKER_PID" 2>/dev/null + wait "$BROKER_PID" 2>/dev/null + fi + exit 0 +} + +trap cleanup SIGTERM SIGINT + +# Optional startup delay (default 5 seconds) +DELAY=${1:-5} +if [ "$DELAY" -gt 0 ]; then + echo "Waiting $DELAY seconds before starting..." + sleep $DELAY +fi + +# Setup kubeconfig from environment if provided +if [ ! -z "$KUBECONFIG_BASE64" ]; then + echo "Setting up kubeconfig from environment..." + mkdir -p /root/.kube + echo "$KUBECONFIG_BASE64" | base64 -d > /root/.kube/config + chmod 600 /root/.kube/config + export KUBECONFIG=/root/.kube/config + echo "Kubeconfig configured" +fi + +# Setup kubeconfig for EKS cluster if EKS variables provided +if [ ! -z "$EKS_CLUSTER_NAME" ] && [ ! -z "$AWS_REGION" ]; then + echo "Configuring kubectl for EKS cluster: $EKS_CLUSTER_NAME" + aws eks update-kubeconfig --name "$EKS_CLUSTER_NAME" --region "$AWS_REGION" + echo "EKS kubeconfig configured" +fi + +echo "Starting Britive broker..." +cd /root/broker +java -Djavax.net.debug=all -jar britive-broker-1.0.0.jar >> /var/log/britive-broker.log 2>&1 & +BROKER_PID=$! + +echo "Broker started with PID: $BROKER_PID" +wait $BROKER_PID diff --git a/Access Broker/ecs-fargate-deployment/supervisord.conf b/Access Broker/ecs-fargate-deployment/supervisord.conf new file mode 100644 index 0000000..816d0aa --- /dev/null +++ b/Access Broker/ecs-fargate-deployment/supervisord.conf @@ -0,0 +1,15 @@ +[supervisord] +nodaemon=true +user=root +logfile=/var/log/supervisor/supervisord.log +pidfile=/var/run/supervisord.pid +childlogdir=/var/log/supervisor + +[program:britive-broker] +command=/root/start-broker.sh 5 +stdout_logfile=/var/log/supervisor/broker.log +stderr_logfile=/var/log/supervisor/broker.log +autostart=true +autorestart=true +environment=BRITIVE_TOKEN="%(ENV_BRITIVE_TOKEN)s" +stopasgroup=true diff --git a/Access Broker/ecs-fargate-deployment/task-definition.json b/Access Broker/ecs-fargate-deployment/task-definition.json new file mode 100644 index 0000000..743c6c1 --- /dev/null +++ b/Access Broker/ecs-fargate-deployment/task-definition.json @@ -0,0 +1,56 @@ +{ + "family": "britive-broker", + "networkMode": "awsvpc", + "requiresCompatibilities": ["FARGATE"], + "cpu": "512", + "memory": "1024", + "executionRoleArn": "REPLACE_WITH_EXECUTION_ROLE_ARN", + "taskRoleArn": "REPLACE_WITH_TASK_ROLE_ARN", + "containerDefinitions": [ + { + "name": "britive-broker", + "image": "REPLACE_WITH_ECR_IMAGE", + "essential": true, + "portMappings": [ + { + "containerPort": 22, + "protocol": "tcp" + } + ], + "environment": [ + { + "name": "KUBECONFIG", + "value": "/root/.kube/config" + } + ], + "secrets": [ + { + "name": "BRITIVE_TOKEN", + "valueFrom": "REPLACE_WITH_SECRET_ARN:britive-token::" + } + ], + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-group": "/ecs/britive-broker", + "awslogs-region": "REPLACE_WITH_REGION", + "awslogs-stream-prefix": "ecs" + } + }, + "healthCheck": { + "command": ["CMD-SHELL", "pgrep -f java || exit 1"], + "interval": 30, + "timeout": 10, + "retries": 3, + "startPeriod": 90 + }, + "linuxParameters": { + "initProcessEnabled": true + } + } + ], + "runtimePlatform": { + "cpuArchitecture": "X86_64", + "operatingSystemFamily": "LINUX" + } +} diff --git a/Access Broker/ecs-fargate-deployment/token-generator.sh b/Access Broker/ecs-fargate-deployment/token-generator.sh new file mode 100755 index 0000000..1d5fbce --- /dev/null +++ b/Access Broker/ecs-fargate-deployment/token-generator.sh @@ -0,0 +1,4 @@ +#!/bin/bash +# Token generator script for Britive broker +# Returns the BRITIVE_TOKEN environment variable +echo "$BRITIVE_TOKEN" From d42a501ee91844dec8ecc43b8be42c842bf57cbb Mon Sep 17 00:00:00 2001 From: palakchheda <42711310+palakchheda@users.noreply.github.com> Date: Mon, 2 Feb 2026 11:26:09 -0800 Subject: [PATCH 02/12] ecs SM support added --- Access Broker/aks-deployment/README.md | 30 +- .../ecs-fargate-deployment/Dockerfile | 4 +- .../ecs-fargate-deployment/README.md | 283 +++++++---- .../ecs-fargate-deployment/deploy.sh | 167 +++++-- .../ecs-fargate-deployment/manage-secrets.sh | 439 ++++++++++++++++++ .../ecs-fargate-deployment/secrets.json | 24 + .../ecs-fargate-deployment/start-broker.sh | 30 +- .../task-definition.json | 9 +- 8 files changed, 862 insertions(+), 124 deletions(-) create mode 100644 Access Broker/ecs-fargate-deployment/manage-secrets.sh create mode 100644 Access Broker/ecs-fargate-deployment/secrets.json diff --git a/Access Broker/aks-deployment/README.md b/Access Broker/aks-deployment/README.md index ea1ce15..91e2366 100644 --- a/Access Broker/aks-deployment/README.md +++ b/Access Broker/aks-deployment/README.md @@ -11,6 +11,7 @@ The Britive Access Broker enables secure, just-in-time access to your Kubernetes Before deploying, ensure you have: 1. **Azure CLI** installed and configured + ```bash # Install Azure CLI # macOS @@ -24,12 +25,14 @@ Before deploying, ensure you have: ``` 2. **Docker** installed and running + ```bash # Verify Docker is running docker info ``` 3. **kubectl** installed and configured for your AKS cluster + ```bash # Install kubectl az aks install-cli @@ -55,11 +58,13 @@ Before deploying, ensure you have: ### Option 1: Automated Deployment (Recommended) 1. Copy the broker JAR file to this directory: + ```bash cp /path/to/britive-broker-1.0.0.jar . ``` 2. Edit `deploy.sh` and set your configuration: + ```bash BRITIVE_TOKEN="your-britive-token-here" ACR_NAME="britivebroker" # Your ACR name @@ -67,12 +72,14 @@ Before deploying, ensure you have: ``` 3. Run the deployment script: + ```bash chmod +x deploy.sh ./deploy.sh ``` The script will: + - Validate all prerequisites - Create ACR if it doesn't exist - Attach ACR to your AKS cluster @@ -83,16 +90,19 @@ The script will: ### Option 2: Manual Deployment 1. **Create Azure Container Registry** (if needed): + ```bash az acr create --resource-group --name --sku Basic ``` 2. **Attach ACR to AKS**: + ```bash az aks update --name --resource-group --attach-acr ``` 3. **Build and push the Docker image**: + ```bash # Login to ACR az acr login --name @@ -108,11 +118,13 @@ The script will: 4. **Update deployment.yaml**: - Replace `YOUR_ACR_NAME.azurecr.io` with your actual ACR login server - Replace `REPLACE_WITH_BASE64_TOKEN` with your base64-encoded token: + ```bash echo -n "your-token" | base64 ``` 5. **Apply the deployment**: + ```bash kubectl apply -f deployment.yaml ``` @@ -140,7 +152,7 @@ The default deployment creates 2 replicas for high availability. Modify `spec.re ## Files | File | Description | -|------|-------------| +| ------ | ------------- | | `deploy.sh` | Automated deployment script | | `deployment.yaml` | Kubernetes manifests (ServiceAccount, RBAC, ConfigMap, Secret, Deployment, Service) | | `Dockerfile` | Container image definition | @@ -172,12 +184,14 @@ These permissions enable the broker to manage access control for just-in-time ac ## Monitoring & Troubleshooting ### Check Deployment Status + ```bash kubectl get pods -l app=britive-broker kubectl get deployment britive-broker ``` ### View Logs + ```bash # All broker pods kubectl logs -l app=britive-broker -f @@ -190,16 +204,19 @@ kubectl logs --previous ``` ### Describe Pod (for troubleshooting) + ```bash kubectl describe pod -l app=britive-broker ``` ### Check Events + ```bash kubectl get events --sort-by='.lastTimestamp' | grep britive ``` ### Verify ACR Access + ```bash # Check if AKS can pull from ACR az aks check-acr --name --resource-group --acr @@ -208,16 +225,19 @@ az aks check-acr --name --resource-group --acr ### Common Issues 1. **ImagePullBackOff**: ACR not attached to AKS + ```bash az aks update --name --resource-group --attach-acr ``` 2. **CrashLoopBackOff**: Check logs for Java errors + ```bash kubectl logs -l app=britive-broker --previous ``` 3. **Pending Pods**: Check resource quotas and node capacity + ```bash kubectl describe pod -l app=britive-broker ``` @@ -240,6 +260,7 @@ kubectl delete serviceaccount britive-broker-sa ``` To also remove the ACR repository: + ```bash az acr repository delete --name --repository britive-broker --yes ``` @@ -253,10 +274,3 @@ az acr repository delete --name --repository britive-broker --yes 3. **ACR Security**: Use Azure Private Link for ACR if your AKS cluster uses private networking. 4. **Pod Security**: The broker runs as root for kubectl access. Consider pod security policies for additional hardening. - -## Support - -For issues with: -- **Britive Platform**: Contact Britive support -- **AKS/Azure**: Check Azure documentation or contact Azure support -- **This deployment**: Check the troubleshooting section above diff --git a/Access Broker/ecs-fargate-deployment/Dockerfile b/Access Broker/ecs-fargate-deployment/Dockerfile index 623466e..9606873 100644 --- a/Access Broker/ecs-fargate-deployment/Dockerfile +++ b/Access Broker/ecs-fargate-deployment/Dockerfile @@ -33,9 +33,11 @@ RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2 RUN mkdir -p /root/broker/config \ && mkdir -p /root/broker/bootstrap \ && mkdir -p /root/broker/cache \ + && mkdir -p /root/broker/secrets \ && mkdir -p /root/.kube \ && mkdir -p /var/log/supervisor \ - && mkdir -p /var/run/sshd + && mkdir -p /var/run/sshd \ + && chmod 700 /root/broker/secrets # Copy Britive broker JAR COPY britive-broker-1.0.0.jar /root/broker/ diff --git a/Access Broker/ecs-fargate-deployment/README.md b/Access Broker/ecs-fargate-deployment/README.md index dac1389..55340bc 100644 --- a/Access Broker/ecs-fargate-deployment/README.md +++ b/Access Broker/ecs-fargate-deployment/README.md @@ -6,11 +6,18 @@ This directory contains everything needed to deploy the Britive Access Broker on The Britive Access Broker enables secure, just-in-time access management through the Britive platform. This deployment uses AWS ECS Fargate for serverless container orchestration, eliminating the need to manage underlying EC2 instances. +**Key Features:** +- All secrets stored securely in AWS Secrets Manager +- Support for multiple secrets with easy management +- Secrets available as environment variables and files at runtime +- Easy secret rotation without code changes + ## Prerequisites Before deploying, ensure you have: 1. **AWS CLI** installed and configured + ```bash # Install AWS CLI # macOS @@ -24,118 +31,213 @@ Before deploying, ensure you have: ``` 2. **Docker** installed and running + ```bash # Verify Docker is running docker info ``` -3. **Britive Broker Pool Token** from the Britive console +3. **jq** installed (for JSON processing) + + ```bash + # macOS + brew install jq + + # Linux + apt install jq + ``` + +4. **Britive Broker Pool Token** from the Britive console - Navigate to: System Administration > Broker Pools - Create a new pool or select an existing one - Copy the broker pool token -4. **britive-broker-1.0.0.jar** file in this directory +5. **britive-broker-1.0.0.jar** file in this directory -5. **VPC with subnets** (default VPC works, or specify custom VPC) +6. **VPC with subnets** (default VPC works, or specify custom VPC) ## Quick Start -### Option 1: Automated Deployment (Recommended) +### Option 1: Using secrets.json (Recommended) 1. Copy the broker JAR file to this directory: + ```bash cp /path/to/britive-broker-1.0.0.jar . ``` -2. Edit `deploy.sh` and set your configuration: - ```bash - BRITIVE_TOKEN="your-britive-token-here" - AWS_REGION="us-west-2" # Your AWS region - ECS_CLUSTER_NAME="britive-broker-cluster" # Cluster name - DESIRED_COUNT=2 # Number of tasks +2. Edit `secrets.json` and configure your secrets: + + ```json + { + "secrets": { + "BRITIVE_TOKEN": { + "description": "Britive broker pool authentication token", + "value": "your-actual-token-here", + "required": true, + "inject_as": "env" + } + }, + "custom_secrets": { + "MY_API_KEY": "optional-api-key-value" + } + } ``` 3. Run the deployment script: + ```bash - chmod +x deploy.sh + chmod +x deploy.sh manage-secrets.sh ./deploy.sh ``` -The script will: -- Validate all prerequisites -- Create ECR repository and push the container image -- Create IAM roles for ECS tasks -- Store the Britive token in AWS Secrets Manager -- Create CloudWatch log group -- Register the ECS task definition -- Create ECS cluster and service -- Deploy the specified number of tasks +### Option 2: Direct Token Configuration -### Option 2: Manual Deployment +1. Copy the broker JAR file: -1. **Create ECR Repository**: ```bash - aws ecr create-repository --repository-name britive-broker + cp /path/to/britive-broker-1.0.0.jar . ``` -2. **Build and push Docker image**: +2. Edit `deploy.sh` and set your token directly: + ```bash - # Login to ECR - aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin .dkr.ecr.us-west-2.amazonaws.com + BRITIVE_TOKEN="your-britive-token-here" + AWS_REGION="us-west-2" + ``` + +3. Run the deployment: + + ```bash + chmod +x deploy.sh + ./deploy.sh + ``` + +## Secrets Management + +All secrets are stored in AWS Secrets Manager under the prefix `britive-broker/secrets/`. This provides: + +- **Encryption at rest** using AWS KMS +- **Audit logging** via CloudTrail +- **Fine-grained IAM access control** +- **Easy rotation** without redeployment + +### Managing Secrets with manage-secrets.sh + +The `manage-secrets.sh` script provides a CLI for managing secrets: + +```bash +# Make the script executable +chmod +x manage-secrets.sh + +# List all secrets +./manage-secrets.sh list + +# Add or update a secret +./manage-secrets.sh set MY_SECRET "secret-value" "Description of the secret" + +# Get a secret value +./manage-secrets.sh get MY_SECRET + +# Delete a secret +./manage-secrets.sh delete MY_SECRET + +# Sync secrets from secrets.json to AWS +./manage-secrets.sh sync + +# Restart ECS tasks to pick up new secrets +./manage-secrets.sh restart-tasks + +# Update IAM permissions for all secrets +./manage-secrets.sh update-iam +``` + +### Adding New Secrets - # Build image - docker build --platform linux/amd64 -t britive-broker:latest . +**Method 1: Using the CLI** - # Tag and push - docker tag britive-broker:latest .dkr.ecr.us-west-2.amazonaws.com/britive-broker:latest - docker push .dkr.ecr.us-west-2.amazonaws.com/britive-broker:latest +```bash +# Add a new secret +./manage-secrets.sh set DATABASE_PASSWORD "my-db-password" "Database credentials" + +# Update IAM permissions (if deploy.sh hasn't been run recently) +./manage-secrets.sh update-iam + +# Restart tasks to load new secrets +./manage-secrets.sh restart-tasks +``` + +**Method 2: Using secrets.json** + +1. Edit `secrets.json`: + + ```json + { + "secrets": { + "BRITIVE_TOKEN": { + "description": "Britive broker pool token", + "value": "your-token", + "required": true, + "inject_as": "env" + } + }, + "custom_secrets": { + "DATABASE_PASSWORD": "my-db-password", + "API_KEY": "my-api-key" + } + } ``` -3. **Create Secrets Manager secret**: +2. Sync and restart: + ```bash - aws secretsmanager create-secret \ - --name britive-broker/token \ - --secret-string '{"britive-token":"your-token-here"}' + ./manage-secrets.sh sync + ./manage-secrets.sh restart-tasks ``` -4. **Create IAM roles** (see IAM section below) +### Accessing Secrets at Runtime -5. **Update task-definition.json** with your values +Secrets are available to the broker in two ways: + +1. **Environment Variables**: All secrets are injected as environment variables with their key names -6. **Register task definition**: ```bash - aws ecs register-task-definition --cli-input-json file://task-definition.json + # In the container + echo $BRITIVE_TOKEN + echo $DATABASE_PASSWORD ``` -7. **Create cluster and service**: +2. **Files**: Secrets are also written to `/root/broker/secrets/` directory + ```bash - aws ecs create-cluster --cluster-name britive-broker-cluster - - aws ecs create-service \ - --cluster britive-broker-cluster \ - --service-name britive-broker-service \ - --task-definition britive-broker \ - --desired-count 2 \ - --launch-type FARGATE \ - --network-configuration "awsvpcConfiguration={subnets=[subnet-xxx],securityGroups=[sg-xxx],assignPublicIp=ENABLED}" + # In the container + cat /root/broker/secrets/BRITIVE_TOKEN + cat /root/broker/secrets/DATABASE_PASSWORD ``` +### Secret Naming Conventions + +- **Standard secrets**: Use uppercase with underscores (e.g., `BRITIVE_TOKEN`, `API_KEY`) +- **Custom broker secrets**: Prefix with `BROKER_` for automatic file writing (e.g., `BROKER_CUSTOM_CONFIG`) +- **AWS Secrets Manager path**: `britive-broker/secrets/` + ## Configuration ### Environment Variables | Variable | Description | Required | Source | -|----------|-------------|----------|--------| +| -------- | ----------- | -------- | ------ | | `BRITIVE_TOKEN` | Broker pool authentication token | Yes | Secrets Manager | | `KUBECONFIG` | Path to kubeconfig | Auto | Container | -| `KUBECONFIG_BASE64` | Base64-encoded kubeconfig (for external clusters) | No | Task definition | +| `KUBECONFIG_BASE64` | Base64-encoded kubeconfig (for external clusters) | No | Secrets Manager | | `EKS_CLUSTER_NAME` | EKS cluster name (auto-configures kubectl) | No | Task definition | | `AWS_REGION` | AWS region for EKS access | No | Task definition | +| `SECRETS_DIR` | Directory for file-based secrets | Auto | `/root/broker/secrets` | ### Resource Configuration | Resource | Value | Description | -|----------|-------|-------------| +| -------- | ----- | ----------- | | CPU | 512 (0.5 vCPU) | Fargate CPU units | | Memory | 1024 MB | Fargate memory | | Desired Count | 2 | Number of tasks | @@ -150,8 +252,10 @@ The script will: ## Files | File | Description | -|------|-------------| +| ---- | ----------- | | `deploy.sh` | Automated deployment script | +| `manage-secrets.sh` | Secrets management CLI | +| `secrets.json` | Secrets configuration file | | `task-definition.json` | ECS task definition template | | `Dockerfile` | Container image definition | | `supervisord.conf` | Process supervisor configuration | @@ -168,19 +272,23 @@ The script will: 5. **IAM Roles**: - `ecsTaskExecutionRole` - For pulling images and accessing secrets - `britive-broker-task-role` - For broker runtime permissions -6. **Secrets Manager Secret** (`britive-broker/token`) - Stores Britive token +6. **Secrets Manager Secrets** (`britive-broker/secrets/*`) - All configured secrets 7. **CloudWatch Log Group** (`/ecs/britive-broker`) - Container logs 8. **Security Group** (`britive-broker-sg`) - Network security ## IAM Permissions ### Task Execution Role (`ecsTaskExecutionRole`) + Required for ECS to pull images and retrieve secrets: + - `AmazonECSTaskExecutionRolePolicy` (AWS managed) -- Secrets Manager access for the Britive token +- Secrets Manager access for all `britive-broker/secrets/*` secrets ### Task Role (`britive-broker-task-role`) + Permissions for the broker at runtime: + ```json { "Effect": "Allow", @@ -200,7 +308,9 @@ Permissions for the broker at runtime: Since ECS Fargate isn't a Kubernetes environment, the broker can manage external Kubernetes clusters in two ways: ### Option 1: EKS Clusters (Recommended for AWS) + Add environment variables to the task definition: + ```json { "name": "EKS_CLUSTER_NAME", @@ -215,22 +325,24 @@ Add environment variables to the task definition: The startup script will automatically configure kubectl for the specified EKS cluster. ### Option 2: Any Kubernetes Cluster (via kubeconfig) -Provide a base64-encoded kubeconfig: -```json -{ - "name": "KUBECONFIG_BASE64", - "value": "base64-encoded-kubeconfig-here" -} -``` -Generate the base64 value: +Store the kubeconfig as a secret: + ```bash -cat ~/.kube/config | base64 +# Base64 encode your kubeconfig +KUBECONFIG_B64=$(cat ~/.kube/config | base64) + +# Store in Secrets Manager +./manage-secrets.sh set KUBECONFIG_BASE64 "$KUBECONFIG_B64" "Kubeconfig for external cluster" + +# Restart tasks +./manage-secrets.sh restart-tasks ``` ## Monitoring & Troubleshooting ### View Logs + ```bash # Stream logs aws logs tail /ecs/britive-broker --follow --region us-west-2 @@ -243,6 +355,7 @@ aws logs get-log-events \ ``` ### Check Service Status + ```bash # Describe service aws ecs describe-services \ @@ -264,6 +377,7 @@ aws ecs describe-tasks \ ``` ### View in AWS Console + ``` https://us-west-2.console.aws.amazon.com/ecs/home?region=us-west-2#/clusters/britive-broker-cluster/services ``` @@ -271,24 +385,37 @@ https://us-west-2.console.aws.amazon.com/ecs/home?region=us-west-2#/clusters/bri ### Common Issues 1. **Task fails to start**: Check CloudWatch logs for Java errors + ```bash aws logs tail /ecs/britive-broker --since 10m --region us-west-2 ``` 2. **Image pull failures**: Verify ECR repository and execution role permissions + ```bash aws ecr describe-images --repository-name britive-broker --region us-west-2 ``` 3. **Secret access denied**: Check execution role has Secrets Manager permissions + ```bash + ./manage-secrets.sh update-iam + ``` + 4. **Network timeout**: Ensure security group allows outbound HTTPS (443) 5. **Task stuck in PENDING**: Check subnet has available IP addresses and internet access +6. **Secrets not updating**: Force restart tasks after changing secrets + + ```bash + ./manage-secrets.sh restart-tasks + ``` + ## Scaling ### Manual Scaling + ```bash aws ecs update-service \ --cluster britive-broker-cluster \ @@ -298,7 +425,9 @@ aws ecs update-service \ ``` ### Auto Scaling (Optional) + Configure Application Auto Scaling for the ECS service: + ```bash # Register scalable target aws application-autoscaling register-scalable-target \ @@ -347,8 +476,11 @@ done # Delete ECR repository aws ecr delete-repository --repository-name britive-broker --force --region us-west-2 -# Delete secret -aws secretsmanager delete-secret --secret-id britive-broker/token --force-delete-without-recovery --region us-west-2 +# Delete all secrets +SECRETS=$(aws secretsmanager list-secrets --filter Key=name,Values="britive-broker/secrets" --query "SecretList[*].Name" --output text --region us-west-2) +for secret in $SECRETS; do + aws secretsmanager delete-secret --secret-id $secret --force-delete-without-recovery --region us-west-2 +done # Delete log group aws logs delete-log-group --log-group-name /ecs/britive-broker --region us-west-2 @@ -371,15 +503,10 @@ ECS Fargate pricing is based on vCPU and memory per second. To optimize costs: ## Security Considerations -1. **Secrets**: Britive token stored in Secrets Manager (encrypted at rest) -2. **Network**: Use private subnets with NAT Gateway for production -3. **IAM**: Follow least-privilege principle for task roles -4. **Logging**: CloudWatch logs are encrypted by default -5. **Image scanning**: Enable ECR image scanning for vulnerabilities - -## Support - -For issues with: -- **Britive Platform**: Contact Britive support -- **AWS ECS/Fargate**: Check AWS documentation or contact AWS support -- **This deployment**: Check the troubleshooting section above +1. **Secrets**: All secrets stored in AWS Secrets Manager (encrypted at rest with KMS) +2. **No local secrets**: Secrets are never stored in code, environment files, or container images +3. **Network**: Use private subnets with NAT Gateway for production +4. **IAM**: Follow least-privilege principle for task roles +5. **Logging**: CloudWatch logs are encrypted by default +6. **Image scanning**: Enable ECR image scanning for vulnerabilities +7. **Secret rotation**: Update secrets via `manage-secrets.sh` and restart tasks diff --git a/Access Broker/ecs-fargate-deployment/deploy.sh b/Access Broker/ecs-fargate-deployment/deploy.sh index 40d4c1a..61af70b 100755 --- a/Access Broker/ecs-fargate-deployment/deploy.sh +++ b/Access Broker/ecs-fargate-deployment/deploy.sh @@ -18,10 +18,16 @@ set -e # CONFIGURATION - MODIFY THESE VALUES #============================================================================== -# Your Britive broker pool token (required) -# Get this from: Britive Console > System Administration > Broker Pools > Create/Select Pool > Token +# Secrets Configuration +# Option 1: Set BRITIVE_TOKEN directly here (for simple deployments) +# Option 2: Use secrets.json file for multiple secrets (recommended) +# +# Get your Britive token from: Britive Console > System Administration > Broker Pools > Create/Select Pool > Token BRITIVE_TOKEN="your-britive-token-here" +# Secrets are stored in AWS Secrets Manager under this prefix +SECRETS_PREFIX="britive-broker/secrets" + # AWS Configuration AWS_REGION="${AWS_REGION:-us-west-2}" @@ -85,6 +91,13 @@ for file in "${REQUIRED_FILES[@]}"; do done log_success "Required files found" +# Check for secrets.json (optional but recommended) +USE_SECRETS_FILE=false +if [ -f "secrets.json" ]; then + log_success "Found secrets.json - will use for secrets configuration" + USE_SECRETS_FILE=true +fi + # Check AWS CLI log_info "Checking AWS CLI..." if ! command -v aws &> /dev/null; then @@ -322,31 +335,94 @@ else log_success "Task role exists" fi -# Create Secrets Manager secret for Britive token -log_info "Setting up Secrets Manager secret..." -SECRET_NAME="britive-broker/token" -SECRET_ARN="" +# Create Secrets Manager secrets +log_info "Setting up Secrets Manager secrets..." + +# Array to track all secret ARNs for IAM policy +declare -a SECRET_ARNS +declare -a SECRET_NAMES + +# Function to create or update a secret +create_or_update_secret() { + local secret_key="$1" + local secret_value="$2" + local description="$3" + local full_secret_name="${SECRETS_PREFIX}/${secret_key}" + + if aws secretsmanager describe-secret --secret-id "$full_secret_name" --region "$AWS_REGION" &> /dev/null; then + log_info "Updating secret: $secret_key" + aws secretsmanager update-secret \ + --secret-id "$full_secret_name" \ + --secret-string "$secret_value" \ + --region "$AWS_REGION" > /dev/null + else + log_info "Creating secret: $secret_key" + aws secretsmanager create-secret \ + --name "$full_secret_name" \ + --description "$description" \ + --secret-string "$secret_value" \ + --region "$AWS_REGION" > /dev/null + fi + + # Get the ARN + local arn=$(aws secretsmanager describe-secret --secret-id "$full_secret_name" --query "ARN" --output text --region "$AWS_REGION") + SECRET_ARNS+=("$arn") + SECRET_NAMES+=("$secret_key") + log_success "Secret configured: $secret_key" +} -if aws secretsmanager describe-secret --secret-id "$SECRET_NAME" --region "$AWS_REGION" &> /dev/null; then - log_info "Updating existing secret..." - aws secretsmanager update-secret \ - --secret-id "$SECRET_NAME" \ - --secret-string "{\"britive-token\":\"$BRITIVE_TOKEN\"}" \ - --region "$AWS_REGION" > /dev/null - SECRET_ARN=$(aws secretsmanager describe-secret --secret-id "$SECRET_NAME" --query "ARN" --output text --region "$AWS_REGION") +# Process secrets from secrets.json if available +if [ "$USE_SECRETS_FILE" = true ]; then + log_info "Processing secrets from secrets.json..." + + # Process main secrets + SECRETS_JSON=$(cat secrets.json) + + # Extract and create each secret from the secrets object + echo "$SECRETS_JSON" | jq -r '.secrets | to_entries[] | select(.value.value != "" and .value.value != "your-britive-token-here") | @base64' | while read -r entry; do + key=$(echo "$entry" | base64 -d | jq -r '.key') + value=$(echo "$entry" | base64 -d | jq -r '.value.value') + desc=$(echo "$entry" | base64 -d | jq -r '.value.description // "Britive broker secret"') + + if [ -n "$value" ] && [ "$value" != "null" ]; then + create_or_update_secret "$key" "$value" "$desc" + fi + done + + # Process custom secrets + echo "$SECRETS_JSON" | jq -r '.custom_secrets | to_entries[] | select(.value != "" and .value != null) | @base64' | while read -r entry; do + key=$(echo "$entry" | base64 -d | jq -r '.key') + value=$(echo "$entry" | base64 -d | jq -r '.value') + + if [ -n "$value" ] && [ "$value" != "null" ]; then + create_or_update_secret "$key" "$value" "Custom secret for Britive broker" + fi + done else - log_info "Creating new secret..." - SECRET_ARN=$(aws secretsmanager create-secret \ - --name "$SECRET_NAME" \ - --description "Britive Access Broker token" \ - --secret-string "{\"britive-token\":\"$BRITIVE_TOKEN\"}" \ - --query "ARN" \ - --output text \ - --region "$AWS_REGION") + # Fallback: Use BRITIVE_TOKEN from script configuration + if [ "$BRITIVE_TOKEN" != "your-britive-token-here" ]; then + create_or_update_secret "BRITIVE_TOKEN" "$BRITIVE_TOKEN" "Britive Access Broker token" + fi fi -log_success "Secret configured: $SECRET_ARN" -# Add Secrets Manager permission to execution role +# Get all secrets for this deployment (in case some were created outside this run) +ALL_SECRET_ARNS=$(aws secretsmanager list-secrets \ + --filter Key=name,Values="${SECRETS_PREFIX}" \ + --query "SecretList[*].ARN" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null | tr '\t' '\n') + +if [ -z "$ALL_SECRET_ARNS" ]; then + log_error "No secrets found. Please configure BRITIVE_TOKEN or secrets.json" + exit 1 +fi + +# Build JSON array of all secret ARNs +ARN_ARRAY=$(echo "$ALL_SECRET_ARNS" | jq -R . | jq -s .) + +log_success "Total secrets configured: $(echo "$ALL_SECRET_ARNS" | wc -l | tr -d ' ')" + +# Add Secrets Manager permission to execution role for all secrets log_info "Adding Secrets Manager permissions to execution role..." cat > /tmp/secrets-policy.json << EOF { @@ -357,7 +433,7 @@ cat > /tmp/secrets-policy.json << EOF "Action": [ "secretsmanager:GetSecretValue" ], - "Resource": "$SECRET_ARN" + "Resource": ${ARN_ARRAY} } ] } @@ -374,15 +450,44 @@ rm /tmp/secrets-policy.json log_info "Preparing task definition..." cp task-definition.json task-definition-deploy.json -sed -i.bak "s|REPLACE_WITH_ECR_IMAGE|$ECR_REPO_URI:latest|g" task-definition-deploy.json -sed -i.bak "s|REPLACE_WITH_EXECUTION_ROLE_ARN|$EXECUTION_ROLE_ARN|g" task-definition-deploy.json -sed -i.bak "s|REPLACE_WITH_TASK_ROLE_ARN|$TASK_ROLE_ARN|g" task-definition-deploy.json -sed -i.bak "s|REPLACE_WITH_SECRET_ARN|$SECRET_ARN|g" task-definition-deploy.json -sed -i.bak "s|REPLACE_WITH_REGION|$AWS_REGION|g" task-definition-deploy.json +# Build secrets array for task definition +# Each secret in Secrets Manager becomes an environment variable +SECRETS_JSON_ARRAY="[" +first_secret=true + +for secret_arn in $ALL_SECRET_ARNS; do + # Extract secret name from ARN (last part after the colon) + secret_name=$(echo "$secret_arn" | sed 's/.*:secret://' | sed 's/-.*//' | sed "s|${SECRETS_PREFIX}/||") + # Get the full secret name (without the random suffix) + full_name=$(aws secretsmanager describe-secret --secret-id "$secret_arn" --query "Name" --output text --region "$AWS_REGION" 2>/dev/null) + # Extract just the key name from the path + key_name="${full_name#${SECRETS_PREFIX}/}" + + if [ "$first_secret" = true ]; then + first_secret=false + else + SECRETS_JSON_ARRAY+="," + fi -rm -f task-definition-deploy.json.bak + SECRETS_JSON_ARRAY+="{\"name\":\"${key_name}\",\"valueFrom\":\"${secret_arn}\"}" +done -log_success "Task definition prepared" +SECRETS_JSON_ARRAY+="]" + +# Use jq to properly inject secrets array into task definition +jq --argjson secrets "$SECRETS_JSON_ARRAY" \ + --arg image "$ECR_REPO_URI:latest" \ + --arg exec_role "$EXECUTION_ROLE_ARN" \ + --arg task_role "$TASK_ROLE_ARN" \ + --arg region "$AWS_REGION" \ + '.executionRoleArn = $exec_role | + .taskRoleArn = $task_role | + .containerDefinitions[0].image = $image | + .containerDefinitions[0].secrets = $secrets | + .containerDefinitions[0].logConfiguration.options["awslogs-region"] = $region' \ + task-definition.json > task-definition-deploy.json + +log_success "Task definition prepared with $(echo "$ALL_SECRET_ARNS" | wc -l | tr -d ' ') secrets" # Register task definition log_info "Registering task definition..." diff --git a/Access Broker/ecs-fargate-deployment/manage-secrets.sh b/Access Broker/ecs-fargate-deployment/manage-secrets.sh new file mode 100644 index 0000000..9af9f0b --- /dev/null +++ b/Access Broker/ecs-fargate-deployment/manage-secrets.sh @@ -0,0 +1,439 @@ +#!/bin/bash + +# Britive Access Broker - Secrets Management Script +# This script manages secrets in AWS Secrets Manager for the ECS Fargate deployment +# +# Usage: +# ./manage-secrets.sh [options] +# +# Commands: +# list - List all secrets in the britive-broker namespace +# get - Get a secret value +# set - Create or update a secret +# delete - Delete a secret +# sync - Sync secrets from secrets.json to AWS Secrets Manager +# export - Export current AWS secrets to secrets.json format +# restart-tasks - Force restart ECS tasks to pick up new secrets + +set -e + +#============================================================================== +# CONFIGURATION +#============================================================================== + +AWS_REGION="${AWS_REGION:-us-west-2}" +SECRETS_PREFIX="britive-broker/secrets" +ECS_CLUSTER_NAME="${ECS_CLUSTER_NAME:-britive-broker-cluster}" +ECS_SERVICE_NAME="${ECS_SERVICE_NAME:-britive-broker-service}" + +#============================================================================== +# Colors for output +#============================================================================== + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +#============================================================================== +# Functions +#============================================================================== + +check_prerequisites() { + if ! command -v aws &> /dev/null; then + log_error "AWS CLI not found. Please install it first." + exit 1 + fi + + if ! aws sts get-caller-identity &> /dev/null; then + log_error "AWS credentials not configured. Please run: aws configure" + exit 1 + fi + + if ! command -v jq &> /dev/null; then + log_error "jq not found. Please install it: brew install jq (macOS) or apt install jq (Linux)" + exit 1 + fi +} + +list_secrets() { + log_info "Listing secrets in AWS Secrets Manager (prefix: ${SECRETS_PREFIX})..." + echo "" + + # Get all secrets with the prefix + SECRETS=$(aws secretsmanager list-secrets \ + --filter Key=name,Values="${SECRETS_PREFIX}" \ + --query "SecretList[*].[Name,Description,LastChangedDate]" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "") + + if [ -z "$SECRETS" ]; then + log_warning "No secrets found with prefix: ${SECRETS_PREFIX}" + echo "" + echo "To create secrets, use:" + echo " ./manage-secrets.sh set " + echo " ./manage-secrets.sh sync (to sync from secrets.json)" + return + fi + + echo "----------------------------------------------------------------------" + printf "%-40s %-25s %s\n" "SECRET NAME" "LAST MODIFIED" "DESCRIPTION" + echo "----------------------------------------------------------------------" + + echo "$SECRETS" | while IFS=$'\t' read -r name desc modified; do + # Extract the short name (remove prefix) + short_name="${name#${SECRETS_PREFIX}/}" + # Format the date if available + if [ "$modified" != "None" ] && [ -n "$modified" ]; then + mod_date=$(echo "$modified" | cut -d'T' -f1) + else + mod_date="N/A" + fi + printf "%-40s %-25s %s\n" "$short_name" "$mod_date" "${desc:-N/A}" + done + echo "----------------------------------------------------------------------" +} + +get_secret() { + local secret_name="$1" + + if [ -z "$secret_name" ]; then + log_error "Usage: ./manage-secrets.sh get " + exit 1 + fi + + local full_secret_name="${SECRETS_PREFIX}/${secret_name}" + + log_info "Retrieving secret: $secret_name" + + SECRET_VALUE=$(aws secretsmanager get-secret-value \ + --secret-id "$full_secret_name" \ + --query "SecretString" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null) + + if [ $? -ne 0 ] || [ -z "$SECRET_VALUE" ]; then + log_error "Secret not found: $secret_name" + exit 1 + fi + + echo "" + echo "Secret: $secret_name" + echo "Value: $SECRET_VALUE" + echo "" +} + +set_secret() { + local secret_name="$1" + local secret_value="$2" + local description="${3:-Britive broker secret}" + + if [ -z "$secret_name" ] || [ -z "$secret_value" ]; then + log_error "Usage: ./manage-secrets.sh set [description]" + exit 1 + fi + + local full_secret_name="${SECRETS_PREFIX}/${secret_name}" + + # Check if secret exists + if aws secretsmanager describe-secret --secret-id "$full_secret_name" --region "$AWS_REGION" &> /dev/null; then + log_info "Updating existing secret: $secret_name" + aws secretsmanager update-secret \ + --secret-id "$full_secret_name" \ + --secret-string "$secret_value" \ + --region "$AWS_REGION" > /dev/null + log_success "Secret updated: $secret_name" + else + log_info "Creating new secret: $secret_name" + aws secretsmanager create-secret \ + --name "$full_secret_name" \ + --description "$description" \ + --secret-string "$secret_value" \ + --region "$AWS_REGION" > /dev/null + log_success "Secret created: $secret_name" + fi + + echo "" + log_warning "Note: ECS tasks need to be restarted to pick up the new secret value." + echo "Run: ./manage-secrets.sh restart-tasks" +} + +delete_secret() { + local secret_name="$1" + + if [ -z "$secret_name" ]; then + log_error "Usage: ./manage-secrets.sh delete " + exit 1 + fi + + local full_secret_name="${SECRETS_PREFIX}/${secret_name}" + + log_warning "This will permanently delete secret: $secret_name" + read -p "Are you sure? (y/N): " confirm + + if [ "$confirm" != "y" ] && [ "$confirm" != "Y" ]; then + log_info "Cancelled." + exit 0 + fi + + aws secretsmanager delete-secret \ + --secret-id "$full_secret_name" \ + --force-delete-without-recovery \ + --region "$AWS_REGION" > /dev/null + + log_success "Secret deleted: $secret_name" +} + +sync_secrets() { + local secrets_file="${1:-secrets.json}" + + if [ ! -f "$secrets_file" ]; then + log_error "Secrets file not found: $secrets_file" + log_info "Create a secrets.json file or specify the path: ./manage-secrets.sh sync " + exit 1 + fi + + log_info "Syncing secrets from: $secrets_file" + echo "" + + # Track created/updated secrets for IAM policy + declare -a SECRET_ARNS + + # Process main secrets + SECRETS=$(jq -r '.secrets | to_entries[] | select(.value.value != "" and .value.value != "your-britive-token-here") | @json' "$secrets_file" 2>/dev/null) + + if [ -n "$SECRETS" ]; then + echo "$SECRETS" | while read -r entry; do + key=$(echo "$entry" | jq -r '.key') + value=$(echo "$entry" | jq -r '.value.value') + desc=$(echo "$entry" | jq -r '.value.description // "Britive broker secret"') + + if [ -n "$value" ]; then + log_info "Syncing secret: $key" + set_secret "$key" "$value" "$desc" 2>/dev/null || true + fi + done + fi + + # Process custom secrets + CUSTOM_SECRETS=$(jq -r '.custom_secrets | to_entries[] | select(.value != "") | @json' "$secrets_file" 2>/dev/null) + + if [ -n "$CUSTOM_SECRETS" ]; then + echo "$CUSTOM_SECRETS" | while read -r entry; do + key=$(echo "$entry" | jq -r '.key') + value=$(echo "$entry" | jq -r '.value') + + if [ -n "$value" ]; then + log_info "Syncing custom secret: $key" + set_secret "$key" "$value" "Custom secret for Britive broker" 2>/dev/null || true + fi + done + fi + + echo "" + log_success "Secrets synced to AWS Secrets Manager" + echo "" + log_info "Next steps:" + echo " 1. Run ./deploy.sh to update IAM permissions and task definition" + echo " 2. Or run ./manage-secrets.sh restart-tasks to restart with existing config" +} + +export_secrets() { + log_info "Exporting secrets from AWS Secrets Manager..." + + echo "{" + echo ' "secrets": {' + + SECRETS=$(aws secretsmanager list-secrets \ + --filter Key=name,Values="${SECRETS_PREFIX}" \ + --query "SecretList[*].Name" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "") + + if [ -n "$SECRETS" ]; then + first=true + for secret_name in $SECRETS; do + short_name="${secret_name#${SECRETS_PREFIX}/}" + value=$(aws secretsmanager get-secret-value \ + --secret-id "$secret_name" \ + --query "SecretString" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "") + + if [ "$first" = true ]; then + first=false + else + echo "," + fi + + echo -n " \"${short_name}\": \"***HIDDEN***\"" + done + fi + + echo "" + echo ' }' + echo "}" + + echo "" + log_warning "Secret values are hidden. Use 'get' command to retrieve individual values." +} + +restart_tasks() { + log_info "Forcing ECS service to restart tasks..." + + # Check if service exists + SERVICE_STATUS=$(aws ecs describe-services \ + --cluster "$ECS_CLUSTER_NAME" \ + --services "$ECS_SERVICE_NAME" \ + --query "services[0].status" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "") + + if [ "$SERVICE_STATUS" != "ACTIVE" ]; then + log_error "ECS service not found or not active: $ECS_SERVICE_NAME" + log_info "Cluster: $ECS_CLUSTER_NAME" + exit 1 + fi + + aws ecs update-service \ + --cluster "$ECS_CLUSTER_NAME" \ + --service "$ECS_SERVICE_NAME" \ + --force-new-deployment \ + --region "$AWS_REGION" > /dev/null + + log_success "Service update triggered. New tasks will be launched with updated secrets." + echo "" + log_info "Monitor progress:" + echo " aws ecs describe-services --cluster $ECS_CLUSTER_NAME --services $ECS_SERVICE_NAME --region $AWS_REGION" +} + +update_iam_permissions() { + log_info "Updating IAM permissions for secrets access..." + + EXECUTION_ROLE_NAME="ecsTaskExecutionRole" + + # Get all secret ARNs + SECRET_ARNS=$(aws secretsmanager list-secrets \ + --filter Key=name,Values="${SECRETS_PREFIX}" \ + --query "SecretList[*].ARN" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null) + + if [ -z "$SECRET_ARNS" ]; then + log_warning "No secrets found to configure permissions for." + return + fi + + # Build JSON array of ARNs + ARN_ARRAY=$(echo "$SECRET_ARNS" | tr '\t' '\n' | jq -R . | jq -s .) + + # Create policy document + cat > /tmp/secrets-policy.json << EOF +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue" + ], + "Resource": ${ARN_ARRAY} + } + ] +} +EOF + + aws iam put-role-policy \ + --role-name "$EXECUTION_ROLE_NAME" \ + --policy-name "britive-secrets-access" \ + --policy-document file:///tmp/secrets-policy.json 2>/dev/null || true + + rm -f /tmp/secrets-policy.json + + log_success "IAM permissions updated for ${#SECRET_ARNS[@]} secrets" +} + +show_help() { + echo "Britive Access Broker - Secrets Management" + echo "" + echo "Usage: ./manage-secrets.sh [options]" + echo "" + echo "Commands:" + echo " list List all secrets in AWS Secrets Manager" + echo " get Get a secret value" + echo " set [desc] Create or update a secret" + echo " delete Delete a secret" + echo " sync [file] Sync secrets from secrets.json (or specified file)" + echo " export Export secrets list (values hidden)" + echo " restart-tasks Restart ECS tasks to pick up new secrets" + echo " update-iam Update IAM permissions for all secrets" + echo "" + echo "Environment Variables:" + echo " AWS_REGION AWS region (default: us-west-2)" + echo " ECS_CLUSTER_NAME ECS cluster name (default: britive-broker-cluster)" + echo " ECS_SERVICE_NAME ECS service name (default: britive-broker-service)" + echo "" + echo "Examples:" + echo " ./manage-secrets.sh list" + echo " ./manage-secrets.sh set MY_API_KEY 'secret-value-here' 'API key for external service'" + echo " ./manage-secrets.sh get MY_API_KEY" + echo " ./manage-secrets.sh sync" + echo " ./manage-secrets.sh restart-tasks" +} + +#============================================================================== +# Main +#============================================================================== + +check_prerequisites + +case "${1:-}" in + list) + list_secrets + ;; + get) + get_secret "$2" + ;; + set) + set_secret "$2" "$3" "$4" + ;; + delete) + delete_secret "$2" + ;; + sync) + sync_secrets "$2" + ;; + export) + export_secrets + ;; + restart-tasks|restart) + restart_tasks + ;; + update-iam) + update_iam_permissions + ;; + help|--help|-h) + show_help + ;; + *) + show_help + exit 1 + ;; +esac diff --git a/Access Broker/ecs-fargate-deployment/secrets.json b/Access Broker/ecs-fargate-deployment/secrets.json new file mode 100644 index 0000000..5b28961 --- /dev/null +++ b/Access Broker/ecs-fargate-deployment/secrets.json @@ -0,0 +1,24 @@ +{ + "_comment": "Britive Access Broker - Secrets Configuration", + "_description": "Define secrets to be stored in AWS Secrets Manager and injected into the ECS task", + "_instructions": "Add your secrets below. Each secret will be stored in AWS Secrets Manager under 'britive-broker/secrets/' and made available as environment variables or files in the container.", + + "secrets": { + "BRITIVE_TOKEN": { + "description": "Britive broker pool authentication token (required)", + "value": "your-britive-token-here", + "required": true, + "inject_as": "env" + }, + "KUBECONFIG_BASE64": { + "description": "Base64-encoded kubeconfig for external Kubernetes clusters (optional)", + "value": "", + "required": false, + "inject_as": "env" + } + }, + + "_custom_secrets_comment": "Add additional secrets below as needed. They will be available as environment variables or files.", + "custom_secrets": { + } +} diff --git a/Access Broker/ecs-fargate-deployment/start-broker.sh b/Access Broker/ecs-fargate-deployment/start-broker.sh index e8bdc18..1b32735 100755 --- a/Access Broker/ecs-fargate-deployment/start-broker.sh +++ b/Access Broker/ecs-fargate-deployment/start-broker.sh @@ -1,7 +1,7 @@ #!/bin/bash # Britive Access Broker startup script for ECS Fargate -# Handles graceful shutdown and logging +# Handles graceful shutdown, secrets management, and logging # Signal handler for graceful shutdown cleanup() { @@ -22,6 +22,34 @@ if [ "$DELAY" -gt 0 ]; then sleep $DELAY fi +# Secrets directory for file-based secrets +SECRETS_DIR="${SECRETS_DIR:-/root/broker/secrets}" +mkdir -p "$SECRETS_DIR" + +# Write environment-based secrets to files for applications that need file-based access +# This allows secrets to be accessed either as environment variables or files +echo "Setting up secrets directory: $SECRETS_DIR" + +# Write BRITIVE_TOKEN to file if set +if [ ! -z "$BRITIVE_TOKEN" ]; then + echo "$BRITIVE_TOKEN" > "$SECRETS_DIR/BRITIVE_TOKEN" + chmod 600 "$SECRETS_DIR/BRITIVE_TOKEN" + echo "BRITIVE_TOKEN written to secrets directory" +fi + +# Write any custom secrets that start with BROKER_ prefix to files +env | grep "^BROKER_" | while IFS='=' read -r key value; do + if [ ! -z "$value" ]; then + echo "$value" > "$SECRETS_DIR/$key" + chmod 600 "$SECRETS_DIR/$key" + echo "$key written to secrets directory" + fi +done + +# List secrets directory contents (without showing values) +echo "Secrets directory contents:" +ls -la "$SECRETS_DIR" 2>/dev/null || echo " (empty)" + # Setup kubeconfig from environment if provided if [ ! -z "$KUBECONFIG_BASE64" ]; then echo "Setting up kubeconfig from environment..." diff --git a/Access Broker/ecs-fargate-deployment/task-definition.json b/Access Broker/ecs-fargate-deployment/task-definition.json index 743c6c1..37106ef 100644 --- a/Access Broker/ecs-fargate-deployment/task-definition.json +++ b/Access Broker/ecs-fargate-deployment/task-definition.json @@ -21,14 +21,13 @@ { "name": "KUBECONFIG", "value": "/root/.kube/config" - } - ], - "secrets": [ + }, { - "name": "BRITIVE_TOKEN", - "valueFrom": "REPLACE_WITH_SECRET_ARN:britive-token::" + "name": "SECRETS_DIR", + "value": "/root/broker/secrets" } ], + "secrets": [], "logConfiguration": { "logDriver": "awslogs", "options": { From 64b745aa057dda322de25b44c63b41c7ab9a4d15 Mon Sep 17 00:00:00 2001 From: palakchheda <42711310+palakchheda@users.noreply.github.com> Date: Fri, 6 Feb 2026 11:42:56 -0800 Subject: [PATCH 03/12] Beta TAGS --- Access Broker/eks-deployment/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Access Broker/eks-deployment/README.md b/Access Broker/eks-deployment/README.md index 8e827d8..23efad9 100644 --- a/Access Broker/eks-deployment/README.md +++ b/Access Broker/eks-deployment/README.md @@ -1,3 +1,5 @@ +# DRAFT - Beta Deployment strategy + # Britive Broker AWS EKS Deployment Same deployment as GKE version, adapted for AWS EKS with ECR. From a056a402ea501eb1a3470a1931f7e7af9ca0214d Mon Sep 17 00:00:00 2001 From: palakchheda <42711310+palakchheda@users.noreply.github.com> Date: Fri, 6 Feb 2026 11:44:00 -0800 Subject: [PATCH 04/12] README fixes --- Access Broker/ecs-fargate-deployment/README.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/Access Broker/ecs-fargate-deployment/README.md b/Access Broker/ecs-fargate-deployment/README.md index 55340bc..afffad6 100644 --- a/Access Broker/ecs-fargate-deployment/README.md +++ b/Access Broker/ecs-fargate-deployment/README.md @@ -1,3 +1,6 @@ +# DRAFT - Beta Deployment strategy + + # Britive Access Broker - AWS ECS Fargate Deployment This directory contains everything needed to deploy the Britive Access Broker on AWS ECS Fargate (serverless container orchestration). @@ -7,6 +10,7 @@ This directory contains everything needed to deploy the Britive Access Broker on The Britive Access Broker enables secure, just-in-time access management through the Britive platform. This deployment uses AWS ECS Fargate for serverless container orchestration, eliminating the need to manage underlying EC2 instances. **Key Features:** + - All secrets stored securely in AWS Secrets Manager - Support for multiple secrets with easy management - Secrets available as environment variables and files at runtime @@ -376,12 +380,6 @@ aws ecs describe-tasks \ --region us-west-2 ``` -### View in AWS Console - -``` -https://us-west-2.console.aws.amazon.com/ecs/home?region=us-west-2#/clusters/britive-broker-cluster/services -``` - ### Common Issues 1. **Task fails to start**: Check CloudWatch logs for Java errors From 930f304e771404be1b136599ba217e5ee2226d07 Mon Sep 17 00:00:00 2001 From: palakchheda <42711310+palakchheda@users.noreply.github.com> Date: Mon, 23 Feb 2026 09:28:35 -0800 Subject: [PATCH 05/12] bug fixes and updates --- .../ecs-fargate-deployment/README.md | 10 +++--- .../ecs-fargate-deployment/deploy.sh | 35 +++++++++++++------ .../ecs-fargate-deployment/manage-secrets.sh | 9 ++--- .../ecs-fargate-deployment/start-broker.sh | 4 ++- .../ecs-fargate-deployment/supervisord.conf | 5 ++- .../task-definition.json | 6 ---- 6 files changed, 39 insertions(+), 30 deletions(-) diff --git a/Access Broker/ecs-fargate-deployment/README.md b/Access Broker/ecs-fargate-deployment/README.md index afffad6..05f9c74 100644 --- a/Access Broker/ecs-fargate-deployment/README.md +++ b/Access Broker/ecs-fargate-deployment/README.md @@ -1,6 +1,3 @@ -# DRAFT - Beta Deployment strategy - - # Britive Access Broker - AWS ECS Fargate Deployment This directory contains everything needed to deploy the Britive Access Broker on AWS ECS Fargate (serverless container orchestration). @@ -64,6 +61,9 @@ Before deploying, ensure you have: ### Option 1: Using secrets.json (Recommended) +With this option, `BRITIVE_TOKEN` in `deploy.sh` can be left as the placeholder — the token is read +from `secrets.json` instead. `deploy.sh` detects `secrets.json` before the token check. + 1. Copy the broker JAR file to this directory: ```bash @@ -249,8 +249,8 @@ Secrets are available to the broker in two ways: ### Networking Requirements - **VPC**: Tasks run in your VPC -- **Subnets**: Specify subnets with internet access (for Britive connectivity) -- **Security Group**: Allow outbound HTTPS (443) for Britive API +- **Subnets**: Specify subnets with internet access (for Britive connectivity). By default `deploy.sh` uses the first 2 subnets discovered; edit the `head -2` line in `deploy.sh` to use more AZs. +- **Security Group**: Allow outbound HTTPS (443) for Britive API. No inbound rules are required. - **Public IP**: Enabled by default (or use NAT Gateway for private subnets) ## Files diff --git a/Access Broker/ecs-fargate-deployment/deploy.sh b/Access Broker/ecs-fargate-deployment/deploy.sh index 61af70b..5aa13cc 100755 --- a/Access Broker/ecs-fargate-deployment/deploy.sh +++ b/Access Broker/ecs-fargate-deployment/deploy.sh @@ -73,10 +73,19 @@ log_error() { echo -e "${RED}[ERROR]${NC} $1" } -# Check if token is configured -if [ "$BRITIVE_TOKEN" == "your-britive-token-here" ]; then - log_error "Please set BRITIVE_TOKEN in this script before running" +# Check for secrets.json first - it determines whether BRITIVE_TOKEN must be set here +# (Option 1: secrets.json is the recommended path; Option 2: token set directly in this script) +USE_SECRETS_FILE=false +if [ -f "secrets.json" ]; then + log_success "Found secrets.json - will use for secrets configuration" + USE_SECRETS_FILE=true +fi + +# Check if token is configured (only required when NOT using secrets.json) +if [ "$USE_SECRETS_FILE" = false ] && [ "$BRITIVE_TOKEN" == "your-britive-token-here" ]; then + log_error "Please set BRITIVE_TOKEN in this script, or create a secrets.json file (recommended)" log_info "Get your token from: Britive Console > System Administration > Broker Pools" + log_info "See README.md Option 1 for the secrets.json approach" exit 1 fi @@ -91,13 +100,6 @@ for file in "${REQUIRED_FILES[@]}"; do done log_success "Required files found" -# Check for secrets.json (optional but recommended) -USE_SECRETS_FILE=false -if [ -f "secrets.json" ]; then - log_success "Found secrets.json - will use for secrets configuration" - USE_SECRETS_FILE=true -fi - # Check AWS CLI log_info "Checking AWS CLI..." if ! command -v aws &> /dev/null; then @@ -116,6 +118,16 @@ fi AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) log_success "AWS CLI configured - Account: $AWS_ACCOUNT_ID, Region: $AWS_REGION" +# Check jq +log_info "Checking jq..." +if ! command -v jq &> /dev/null; then + log_error "jq not found. Please install it:" + log_info " macOS: brew install jq" + log_info " Linux: apt install jq" + exit 1 +fi +log_success "jq is available" + # Check Docker log_info "Checking Docker..." if ! command -v docker &> /dev/null; then @@ -510,7 +522,8 @@ else log_success "ECS cluster exists" fi -# Convert subnet IDs to JSON array +# Convert subnet IDs to JSON array (limit to 2 subnets for the service network config; +# for higher availability across more AZs, increase or remove the head -2 limit) SUBNET_ARRAY=$(echo "$SUBNET_IDS" | tr ',' '\n' | head -2 | jq -R . | jq -s .) # Create or update ECS service diff --git a/Access Broker/ecs-fargate-deployment/manage-secrets.sh b/Access Broker/ecs-fargate-deployment/manage-secrets.sh index 9af9f0b..9d5c929 100644 --- a/Access Broker/ecs-fargate-deployment/manage-secrets.sh +++ b/Access Broker/ecs-fargate-deployment/manage-secrets.sh @@ -271,11 +271,6 @@ export_secrets() { first=true for secret_name in $SECRETS; do short_name="${secret_name#${SECRETS_PREFIX}/}" - value=$(aws secretsmanager get-secret-value \ - --secret-id "$secret_name" \ - --query "SecretString" \ - --output text \ - --region "$AWS_REGION" 2>/dev/null || echo "") if [ "$first" = true ]; then first=false @@ -283,6 +278,7 @@ export_secrets() { echo "," fi + # Values are intentionally not fetched here; use 'get ' to retrieve individual values echo -n " \"${short_name}\": \"***HIDDEN***\"" done fi @@ -367,7 +363,8 @@ EOF rm -f /tmp/secrets-policy.json - log_success "IAM permissions updated for ${#SECRET_ARNS[@]} secrets" + SECRET_COUNT=$(echo "$SECRET_ARNS" | tr '\t' '\n' | grep -c . || echo 0) + log_success "IAM permissions updated for ${SECRET_COUNT} secrets" } show_help() { diff --git a/Access Broker/ecs-fargate-deployment/start-broker.sh b/Access Broker/ecs-fargate-deployment/start-broker.sh index 1b32735..571e6af 100755 --- a/Access Broker/ecs-fargate-deployment/start-broker.sh +++ b/Access Broker/ecs-fargate-deployment/start-broker.sh @@ -69,7 +69,9 @@ fi echo "Starting Britive broker..." cd /root/broker -java -Djavax.net.debug=all -jar britive-broker-1.0.0.jar >> /var/log/britive-broker.log 2>&1 & +# Note: broker stdout/stderr is written to /var/log/britive-broker.log (visible inside container). +# Startup messages from this script are captured by supervisord → CloudWatch via awslogs driver. +java -jar britive-broker-1.0.0.jar >> /var/log/britive-broker.log 2>&1 & BROKER_PID=$! echo "Broker started with PID: $BROKER_PID" diff --git a/Access Broker/ecs-fargate-deployment/supervisord.conf b/Access Broker/ecs-fargate-deployment/supervisord.conf index 816d0aa..665892c 100644 --- a/Access Broker/ecs-fargate-deployment/supervisord.conf +++ b/Access Broker/ecs-fargate-deployment/supervisord.conf @@ -11,5 +11,8 @@ stdout_logfile=/var/log/supervisor/broker.log stderr_logfile=/var/log/supervisor/broker.log autostart=true autorestart=true -environment=BRITIVE_TOKEN="%(ENV_BRITIVE_TOKEN)s" +# Do NOT set environment= here. Omitting it causes supervisord to inherit the full +# parent environment, which passes all ECS-injected secrets and env vars (BRITIVE_TOKEN, +# KUBECONFIG_BASE64, EKS_CLUSTER_NAME, AWS_REGION, BROKER_*, etc.) to the broker process. +# Setting environment= would replace the environment entirely, causing those vars to be lost. stopasgroup=true diff --git a/Access Broker/ecs-fargate-deployment/task-definition.json b/Access Broker/ecs-fargate-deployment/task-definition.json index 37106ef..09f3fc5 100644 --- a/Access Broker/ecs-fargate-deployment/task-definition.json +++ b/Access Broker/ecs-fargate-deployment/task-definition.json @@ -11,12 +11,6 @@ "name": "britive-broker", "image": "REPLACE_WITH_ECR_IMAGE", "essential": true, - "portMappings": [ - { - "containerPort": 22, - "protocol": "tcp" - } - ], "environment": [ { "name": "KUBECONFIG", From b49f1f4d05326ab9e1eb78d095166d86019ee9e1 Mon Sep 17 00:00:00 2001 From: palakchheda <42711310+palakchheda@users.noreply.github.com> Date: Mon, 23 Feb 2026 09:37:30 -0800 Subject: [PATCH 06/12] file updates and typo correction --- .gitignore | 1 + Access Broker/ecs-fargate-deployment/Dockerfile | 7 +++++-- Access Broker/ecs-fargate-deployment/README.md | 6 +++--- Access Broker/ecs-fargate-deployment/deploy.sh | 4 ++-- Access Broker/ecs-fargate-deployment/start-broker.sh | 2 +- 5 files changed, 12 insertions(+), 8 deletions(-) diff --git a/.gitignore b/.gitignore index 92129a1..3067974 100644 --- a/.gitignore +++ b/.gitignore @@ -719,3 +719,4 @@ britive-broker-0.1.3.jar britive-broker-1.0.0.jar *.jar docker.zip +secrets.json diff --git a/Access Broker/ecs-fargate-deployment/Dockerfile b/Access Broker/ecs-fargate-deployment/Dockerfile index 9606873..ee4898f 100644 --- a/Access Broker/ecs-fargate-deployment/Dockerfile +++ b/Access Broker/ecs-fargate-deployment/Dockerfile @@ -2,7 +2,10 @@ # This Dockerfile creates a container image for running the Britive Access Broker # on AWS ECS Fargate (serverless container orchestration) -FROM --platform=linux/amd64 ubuntu:24.04 +# TARGETPLATFORM defaults to linux/amd64 to match the X86_64 ECS task definition. +# Override at build time if needed: docker build --build-arg TARGETPLATFORM=linux/arm64 +ARG TARGETPLATFORM=linux/amd64 +FROM --platform=$TARGETPLATFORM ubuntu:24.04 # Avoid prompts from apt ENV DEBIAN_FRONTEND=noninteractive @@ -40,7 +43,7 @@ RUN mkdir -p /root/broker/config \ && chmod 700 /root/broker/secrets # Copy Britive broker JAR -COPY britive-broker-1.0.0.jar /root/broker/ +COPY britive-broker-2.0.0.jar /root/broker/ # Copy supervisor configuration COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf diff --git a/Access Broker/ecs-fargate-deployment/README.md b/Access Broker/ecs-fargate-deployment/README.md index 05f9c74..74eeb53 100644 --- a/Access Broker/ecs-fargate-deployment/README.md +++ b/Access Broker/ecs-fargate-deployment/README.md @@ -53,7 +53,7 @@ Before deploying, ensure you have: - Create a new pool or select an existing one - Copy the broker pool token -5. **britive-broker-1.0.0.jar** file in this directory +5. **britive-broker-2.0.0.jar** file or later in this directory 6. **VPC with subnets** (default VPC works, or specify custom VPC) @@ -67,7 +67,7 @@ from `secrets.json` instead. `deploy.sh` detects `secrets.json` before the token 1. Copy the broker JAR file to this directory: ```bash - cp /path/to/britive-broker-1.0.0.jar . + cp /path/to/britive-broker-2.0.0.jar . ``` 2. Edit `secrets.json` and configure your secrets: @@ -100,7 +100,7 @@ from `secrets.json` instead. `deploy.sh` detects `secrets.json` before the token 1. Copy the broker JAR file: ```bash - cp /path/to/britive-broker-1.0.0.jar . + cp /path/to/britive-broker-2.0.0.jar . ``` 2. Edit `deploy.sh` and set your token directly: diff --git a/Access Broker/ecs-fargate-deployment/deploy.sh b/Access Broker/ecs-fargate-deployment/deploy.sh index 5aa13cc..e7916e5 100755 --- a/Access Broker/ecs-fargate-deployment/deploy.sh +++ b/Access Broker/ecs-fargate-deployment/deploy.sh @@ -6,7 +6,7 @@ # Prerequisites: # 1. AWS CLI installed and configured # 2. Docker installed and running -# 3. britive-broker-1.0.0.jar in current directory +# 3. britive-broker-2.0.0.jar in current directory # # Usage: # 1. Set configuration variables below @@ -91,7 +91,7 @@ fi # Check for required files log_info "Checking required files..." -REQUIRED_FILES=("britive-broker-1.0.0.jar" "supervisord.conf" "start-broker.sh" "token-generator.sh" "task-definition.json") +REQUIRED_FILES=("britive-broker-2.0.0.jar" "supervisord.conf" "start-broker.sh" "token-generator.sh" "task-definition.json") for file in "${REQUIRED_FILES[@]}"; do if [ ! -f "$file" ]; then log_error "$file not found in current directory" diff --git a/Access Broker/ecs-fargate-deployment/start-broker.sh b/Access Broker/ecs-fargate-deployment/start-broker.sh index 571e6af..3a5f60b 100755 --- a/Access Broker/ecs-fargate-deployment/start-broker.sh +++ b/Access Broker/ecs-fargate-deployment/start-broker.sh @@ -71,7 +71,7 @@ echo "Starting Britive broker..." cd /root/broker # Note: broker stdout/stderr is written to /var/log/britive-broker.log (visible inside container). # Startup messages from this script are captured by supervisord → CloudWatch via awslogs driver. -java -jar britive-broker-1.0.0.jar >> /var/log/britive-broker.log 2>&1 & +java -jar britive-broker-2.0.0.jar >> /var/log/britive-broker.log 2>&1 & BROKER_PID=$! echo "Broker started with PID: $BROKER_PID" From 4def53b7567f64b49b8e8fa90038ee221496f0e0 Mon Sep 17 00:00:00 2001 From: palakchheda <42711310+palakchheda@users.noreply.github.com> Date: Mon, 23 Feb 2026 09:42:26 -0800 Subject: [PATCH 07/12] file updates --- .gitignore | 3 ++- .../ecs-fargate-deployment/secrets.json | 24 ------------------- 2 files changed, 2 insertions(+), 25 deletions(-) delete mode 100644 Access Broker/ecs-fargate-deployment/secrets.json diff --git a/.gitignore b/.gitignore index 3067974..98b1cab 100644 --- a/.gitignore +++ b/.gitignore @@ -369,6 +369,7 @@ venv/ ENV/ env.bak/ venv.bak/ +secrets.json # Spyder project settings .spyderproject @@ -719,4 +720,4 @@ britive-broker-0.1.3.jar britive-broker-1.0.0.jar *.jar docker.zip -secrets.json +*secrets.json* diff --git a/Access Broker/ecs-fargate-deployment/secrets.json b/Access Broker/ecs-fargate-deployment/secrets.json deleted file mode 100644 index 5b28961..0000000 --- a/Access Broker/ecs-fargate-deployment/secrets.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "_comment": "Britive Access Broker - Secrets Configuration", - "_description": "Define secrets to be stored in AWS Secrets Manager and injected into the ECS task", - "_instructions": "Add your secrets below. Each secret will be stored in AWS Secrets Manager under 'britive-broker/secrets/' and made available as environment variables or files in the container.", - - "secrets": { - "BRITIVE_TOKEN": { - "description": "Britive broker pool authentication token (required)", - "value": "your-britive-token-here", - "required": true, - "inject_as": "env" - }, - "KUBECONFIG_BASE64": { - "description": "Base64-encoded kubeconfig for external Kubernetes clusters (optional)", - "value": "", - "required": false, - "inject_as": "env" - } - }, - - "_custom_secrets_comment": "Add additional secrets below as needed. They will be available as environment variables or files.", - "custom_secrets": { - } -} From b43c0779dd6dc0dbf6f2f822bd9609f07799e8fc Mon Sep 17 00:00:00 2001 From: palakchheda <42711310+palakchheda@users.noreply.github.com> Date: Mon, 23 Feb 2026 10:03:18 -0800 Subject: [PATCH 08/12] improvements --- .../ecs-fargate-deployment/README.md | 35 +++++++++------ .../ecs-fargate-deployment/deploy.sh | 43 +++++++++++++------ .../ecs-fargate-deployment/manage-secrets.sh | 0 .../ecs-fargate-deployment/start-broker.sh | 25 +++++++++++ 4 files changed, 77 insertions(+), 26 deletions(-) mode change 100644 => 100755 Access Broker/ecs-fargate-deployment/manage-secrets.sh diff --git a/Access Broker/ecs-fargate-deployment/README.md b/Access Broker/ecs-fargate-deployment/README.md index 74eeb53..afcda7c 100644 --- a/Access Broker/ecs-fargate-deployment/README.md +++ b/Access Broker/ecs-fargate-deployment/README.md @@ -48,10 +48,10 @@ Before deploying, ensure you have: apt install jq ``` -4. **Britive Broker Pool Token** from the Britive console - - Navigate to: System Administration > Broker Pools - - Create a new pool or select an existing one - - Copy the broker pool token +4. **Britive Tenant Subdomain** and **Broker Pool Token** from the Britive console + - **Tenant subdomain**: the part before `.britive-app.com` in your Britive URL + (e.g., `mycompany` for `mycompany.britive-app.com`) — find it under System Administration > Settings + - **Broker pool token**: navigate to System Administration > Broker Pools, create or select a pool, and copy the token 5. **britive-broker-2.0.0.jar** file or later in this directory @@ -61,8 +61,9 @@ Before deploying, ensure you have: ### Option 1: Using secrets.json (Recommended) -With this option, `BRITIVE_TOKEN` in `deploy.sh` can be left as the placeholder — the token is read -from `secrets.json` instead. `deploy.sh` detects `secrets.json` before the token check. +Both `BRITIVE_TENANT` and `BRITIVE_TOKEN` are defined in `secrets.json`, stored in AWS Secrets Manager, +and injected into the running task as environment variables. The values in `deploy.sh` can be left as +placeholders when using this option. 1. Copy the broker JAR file to this directory: @@ -70,11 +71,17 @@ from `secrets.json` instead. `deploy.sh` detects `secrets.json` before the token cp /path/to/britive-broker-2.0.0.jar . ``` -2. Edit `secrets.json` and configure your secrets: +2. Edit `secrets.json` and fill in your tenant subdomain and token: ```json { "secrets": { + "BRITIVE_TENANT": { + "description": "Britive tenant subdomain", + "value": "mycompany", + "required": true, + "inject_as": "env" + }, "BRITIVE_TOKEN": { "description": "Britive broker pool authentication token", "value": "your-actual-token-here", @@ -82,9 +89,7 @@ from `secrets.json` instead. `deploy.sh` detects `secrets.json` before the token "inject_as": "env" } }, - "custom_secrets": { - "MY_API_KEY": "optional-api-key-value" - } + "custom_secrets": {} } ``` @@ -95,7 +100,7 @@ from `secrets.json` instead. `deploy.sh` detects `secrets.json` before the token ./deploy.sh ``` -### Option 2: Direct Token Configuration +### Option 2: Direct Configuration (no secrets.json) 1. Copy the broker JAR file: @@ -103,13 +108,16 @@ from `secrets.json` instead. `deploy.sh` detects `secrets.json` before the token cp /path/to/britive-broker-2.0.0.jar . ``` -2. Edit `deploy.sh` and set your token directly: +2. Edit `deploy.sh` and set both values directly: ```bash - BRITIVE_TOKEN="your-britive-token-here" + BRITIVE_TENANT="mycompany" # subdomain of your Britive URL + BRITIVE_TOKEN="your-britive-token-here" # from Broker Pools console AWS_REGION="us-west-2" ``` + Both values are stored in AWS Secrets Manager and injected into the task at runtime — same mechanism as Option 1. + 3. Run the deployment: ```bash @@ -231,6 +239,7 @@ Secrets are available to the broker in two ways: | Variable | Description | Required | Source | | -------- | ----------- | -------- | ------ | +| `BRITIVE_TENANT` | Britive tenant subdomain (e.g. `mycompany` for `mycompany.britive-app.com`) | Yes | Secrets Manager (via `secrets.json` or `deploy.sh`) | | `BRITIVE_TOKEN` | Broker pool authentication token | Yes | Secrets Manager | | `KUBECONFIG` | Path to kubeconfig | Auto | Container | | `KUBECONFIG_BASE64` | Base64-encoded kubeconfig (for external clusters) | No | Secrets Manager | diff --git a/Access Broker/ecs-fargate-deployment/deploy.sh b/Access Broker/ecs-fargate-deployment/deploy.sh index e7916e5..a819621 100755 --- a/Access Broker/ecs-fargate-deployment/deploy.sh +++ b/Access Broker/ecs-fargate-deployment/deploy.sh @@ -19,10 +19,16 @@ set -e #============================================================================== # Secrets Configuration -# Option 1: Set BRITIVE_TOKEN directly here (for simple deployments) -# Option 2: Use secrets.json file for multiple secrets (recommended) +# Option 1 (recommended): Use secrets.json — set BRITIVE_TENANT and BRITIVE_TOKEN there. +# Both are stored in AWS Secrets Manager and auto-injected into the task at runtime. +# Leave the values below as placeholders when using Option 1. # -# Get your Britive token from: Britive Console > System Administration > Broker Pools > Create/Select Pool > Token +# Option 2 (direct): Set values here if not using secrets.json. +# BRITIVE_TENANT: subdomain of your Britive URL (e.g. "mycompany" for mycompany.britive-app.com) +# Find it in: Britive Console > System Administration > Settings +# BRITIVE_TOKEN: broker pool token +# Find it in: Britive Console > System Administration > Broker Pools > Create/Select Pool > Token +BRITIVE_TENANT="your-tenant-subdomain-here" BRITIVE_TOKEN="your-britive-token-here" # Secrets are stored in AWS Secrets Manager under this prefix @@ -73,20 +79,26 @@ log_error() { echo -e "${RED}[ERROR]${NC} $1" } -# Check for secrets.json first - it determines whether BRITIVE_TOKEN must be set here -# (Option 1: secrets.json is the recommended path; Option 2: token set directly in this script) +# Check for secrets.json first — it determines whether BRITIVE_TENANT and BRITIVE_TOKEN +# must be set here (Option 2) or are read from the file (Option 1, recommended). USE_SECRETS_FILE=false if [ -f "secrets.json" ]; then log_success "Found secrets.json - will use for secrets configuration" USE_SECRETS_FILE=true fi -# Check if token is configured (only required when NOT using secrets.json) -if [ "$USE_SECRETS_FILE" = false ] && [ "$BRITIVE_TOKEN" == "your-britive-token-here" ]; then - log_error "Please set BRITIVE_TOKEN in this script, or create a secrets.json file (recommended)" - log_info "Get your token from: Britive Console > System Administration > Broker Pools" - log_info "See README.md Option 1 for the secrets.json approach" - exit 1 +# When not using secrets.json, both BRITIVE_TENANT and BRITIVE_TOKEN must be set directly above +if [ "$USE_SECRETS_FILE" = false ]; then + if [ "$BRITIVE_TENANT" == "your-tenant-subdomain-here" ]; then + log_error "Please set BRITIVE_TENANT in this script, or add it to secrets.json (recommended)" + log_info "This is the subdomain of your Britive URL (e.g., 'mycompany' for mycompany.britive-app.com)" + exit 1 + fi + if [ "$BRITIVE_TOKEN" == "your-britive-token-here" ]; then + log_error "Please set BRITIVE_TOKEN in this script, or add it to secrets.json (recommended)" + log_info "Get your token from: Britive Console > System Administration > Broker Pools" + exit 1 + fi fi # Check for required files @@ -411,7 +423,10 @@ if [ "$USE_SECRETS_FILE" = true ]; then fi done else - # Fallback: Use BRITIVE_TOKEN from script configuration + # Fallback: use values set directly in this script (Option 2) + if [ "$BRITIVE_TENANT" != "your-tenant-subdomain-here" ]; then + create_or_update_secret "BRITIVE_TENANT" "$BRITIVE_TENANT" "Britive tenant subdomain" + fi if [ "$BRITIVE_TOKEN" != "your-britive-token-here" ]; then create_or_update_secret "BRITIVE_TOKEN" "$BRITIVE_TOKEN" "Britive Access Broker token" fi @@ -486,7 +501,9 @@ done SECRETS_JSON_ARRAY+="]" -# Use jq to properly inject secrets array into task definition +# Use jq to properly inject secrets array and runtime values into the task definition template. +# BRITIVE_TENANT and BRITIVE_TOKEN arrive as environment variables via the secrets array +# (injected from AWS Secrets Manager at task launch time) — no plain env var needed here. jq --argjson secrets "$SECRETS_JSON_ARRAY" \ --arg image "$ECR_REPO_URI:latest" \ --arg exec_role "$EXECUTION_ROLE_ARN" \ diff --git a/Access Broker/ecs-fargate-deployment/manage-secrets.sh b/Access Broker/ecs-fargate-deployment/manage-secrets.sh old mode 100644 new mode 100755 diff --git a/Access Broker/ecs-fargate-deployment/start-broker.sh b/Access Broker/ecs-fargate-deployment/start-broker.sh index 3a5f60b..1966317 100755 --- a/Access Broker/ecs-fargate-deployment/start-broker.sh +++ b/Access Broker/ecs-fargate-deployment/start-broker.sh @@ -67,6 +67,31 @@ if [ ! -z "$EKS_CLUSTER_NAME" ] && [ ! -z "$AWS_REGION" ]; then echo "EKS kubeconfig configured" fi +# Generate broker-config.yml +# The broker requires this file to connect to the Britive platform. +# BRITIVE_TENANT = your tenant subdomain (e.g. "mycompany" for mycompany.britive-app.com) +# BRITIVE_TOKEN = broker pool authentication token (injected from Secrets Manager) +# This is the ECS Fargate equivalent of the ConfigMap used in Kubernetes deployments. +if [ -z "$BRITIVE_TENANT" ]; then + echo "ERROR: BRITIVE_TENANT is not set. Set it in deploy.sh before deploying." + exit 1 +fi +if [ -z "$BRITIVE_TOKEN" ]; then + echo "ERROR: BRITIVE_TOKEN is not set. Check Secrets Manager configuration." + exit 1 +fi + +mkdir -p /root/broker/config +cat > /root/broker/config/broker-config.yml << EOF +config: + version: 2 + bootstrap: + tenant_subdomain: ${BRITIVE_TENANT} + authentication_token: "${BRITIVE_TOKEN}" +EOF +chmod 600 /root/broker/config/broker-config.yml +echo "Broker config generated for tenant: $BRITIVE_TENANT" + echo "Starting Britive broker..." cd /root/broker # Note: broker stdout/stderr is written to /var/log/britive-broker.log (visible inside container). From 06c5482631a4c3e09f1f08b3c58d0a42651f6484 Mon Sep 17 00:00:00 2001 From: palakchheda <42711310+palakchheda@users.noreply.github.com> Date: Mon, 23 Feb 2026 10:12:23 -0800 Subject: [PATCH 09/12] minor improvement --- Access Broker/ecs-fargate-deployment/deploy.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Access Broker/ecs-fargate-deployment/deploy.sh b/Access Broker/ecs-fargate-deployment/deploy.sh index a819621..78ae9e4 100755 --- a/Access Broker/ecs-fargate-deployment/deploy.sh +++ b/Access Broker/ecs-fargate-deployment/deploy.sh @@ -259,6 +259,12 @@ log_info "Setting up CloudWatch log group..." aws logs create-log-group --log-group-name "/ecs/britive-broker" --region "$AWS_REGION" 2>/dev/null || true log_success "CloudWatch log group ready" +# Ensure the ECS service-linked role exists (required for CreateService; only needs to be +# created once per AWS account and is otherwise a no-op) +log_info "Ensuring ECS service-linked role exists..." +aws iam create-service-linked-role --aws-service-name ecs.amazonaws.com 2>/dev/null || true +log_success "ECS service-linked role ready" + # Create or get IAM roles log_info "Setting up IAM roles..." From 34381dd8f4be16f38cf057600f297b9794cfa5a5 Mon Sep 17 00:00:00 2001 From: palakchheda <42711310+palakchheda@users.noreply.github.com> Date: Mon, 23 Feb 2026 10:37:07 -0800 Subject: [PATCH 10/12] added README --- Access Broker/README.md | 193 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 193 insertions(+) create mode 100644 Access Broker/README.md diff --git a/Access Broker/README.md b/Access Broker/README.md new file mode 100644 index 0000000..cfa770d --- /dev/null +++ b/Access Broker/README.md @@ -0,0 +1,193 @@ +# Britive Access Broker - Deployment Options + +The Britive Access Broker is a lightweight Java service that runs inside your infrastructure and enables +the Britive platform to manage just-in-time access to your Kubernetes clusters and other resources. The +broker establishes an outbound connection to Britive — no inbound firewall rules or public endpoints +are required. + +## How It Works + +``` + Your Infrastructure Britive Platform + ───────────────── ──────────────── + ┌─────────────────────┐ outbound ┌──────────────────┐ + │ Access Broker │ ────HTTPS───▶ │ Britive SaaS │ + │ (this repo) │ │ (your-tenant │ + │ │ │ .britive-app │ + │ Manages: │ │ .com) │ + │ • Kubernetes RBAC │ └──────────────────┘ + │ • Role bindings │ + │ • Service accounts │ + └─────────────────────┘ +``` + +The broker reads its configuration from `broker-config.yml` at startup: + +```yaml +config: + bootstrap: + tenant_subdomain: mycompany # your Britive tenant + authentication_token: "" # broker pool token from Britive console +``` + +--- + +## Prerequisites (All Deployment Options) + +Before deploying, you need two values from the Britive console: + +| What | Where to Find It | +|------|-----------------| +| **Tenant subdomain** | The part before `.britive-app.com` in your Britive URL (e.g. `mycompany`). Find it under System Administration > Settings. | +| **Broker pool token** | System Administration > Broker Pools > Create or select a pool > copy the token. | + +You also need the **`britive-broker-2.0.0.jar`** file placed in the deployment directory before running any deployment script. + +--- + +## Deployment Options + +| Option | Platform | Kubernetes Required | Secret Storage | Best For | +|--------|----------|-------------------|----------------|----------| +| [ECS Fargate](#ecs-fargate-aws-recommended) | AWS | No | AWS Secrets Manager | AWS-native, serverless | +| [EKS](#eks-aws-kubernetes) | AWS | Yes (EKS) | Kubernetes Secrets | Existing EKS clusters | +| [AKS](#aks-azure-kubernetes) | Azure | Yes (AKS) | Kubernetes Secrets | Existing AKS clusters | +| [GKE](#gke-google-kubernetes) | Google Cloud | Yes (GKE) | Kubernetes Secrets | Existing GKE clusters | + +--- + +### ECS Fargate (AWS) — Recommended + +**Directory:** [`ecs-fargate-deployment/`](ecs-fargate-deployment/) + +Runs the broker as a serverless container on AWS ECS Fargate. No Kubernetes cluster needed. +Secrets are stored in AWS Secrets Manager and injected into the task at runtime. + +**Additional prerequisites:** AWS CLI, Docker, jq + +**Highlights:** +- Fully automated single-script deployment (`deploy.sh`) +- Secrets managed via `secrets.json` → AWS Secrets Manager +- `manage-secrets.sh` CLI for day-2 secret operations (add, rotate, sync) +- Auto-generates `broker-config.yml` at container startup from secrets +- CloudWatch logging, health checks, and auto-restart included + +**Quick start:** +```bash +cd ecs-fargate-deployment +# 1. Place britive-broker-2.0.0.jar here +# 2. Edit secrets.json — set BRITIVE_TENANT and BRITIVE_TOKEN +chmod +x deploy.sh manage-secrets.sh +./deploy.sh +``` + +See [`ecs-fargate-deployment/README.md`](ecs-fargate-deployment/README.md) for full documentation. + +--- + +### EKS (AWS Kubernetes) + +**Directory:** [`eks-deployment/`](eks-deployment/) + +Deploys the broker as a Kubernetes Deployment on an existing AWS EKS cluster. +Uses ECR for the container image. Configuration is provided via a Kubernetes ConfigMap. + +**Additional prerequisites:** AWS CLI, Docker, kubectl (configured for your EKS cluster) + +**Quick start:** +```bash +cd eks-deployment +# 1. Place britive-broker-2.0.0.jar here +# 2. Edit deploy.sh — set BRITIVE_TOKEN and AWS_REGION +# 3. Edit deployment.yaml — set tenant_subdomain and authentication_token in the ConfigMap +chmod +x deploy.sh +./deploy.sh +``` + +See [`eks-deployment/README.md`](eks-deployment/README.md) for full documentation. + +--- + +### AKS (Azure Kubernetes) + +**Directory:** [`aks-deployment/`](aks-deployment/) + +Deploys the broker as a Kubernetes Deployment on an existing Azure AKS cluster. +Uses Azure Container Registry (ACR) for the container image. + +**Additional prerequisites:** Azure CLI (`az`), Docker, kubectl (configured for your AKS cluster) + +**Quick start:** +```bash +cd aks-deployment +# 1. Place britive-broker-2.0.0.jar here +# 2. Edit deploy.sh — set BRITIVE_TOKEN, ACR_NAME, RESOURCE_GROUP +# 3. Edit deployment.yaml — set tenant_subdomain and authentication_token in the ConfigMap +chmod +x deploy.sh +./deploy.sh +``` + +See [`aks-deployment/README.md`](aks-deployment/README.md) for full documentation. + +--- + +### GKE (Google Kubernetes) + +**Directory:** [`gke-deployment/`](gke-deployment/) + +Deploys the broker as a Kubernetes Deployment on an existing Google GKE cluster. +Uses Google Container Registry for the container image. + +**Additional prerequisites:** gcloud CLI, Docker, kubectl (configured for your GKE cluster) + +**Quick start:** +```bash +cd gke-deployment +# 1. Place britive-broker-2.0.0.jar here +# 2. Edit deploy.sh — set BRITIVE_TOKEN +# 3. Edit deployment.yaml — set tenant_subdomain and authentication_token in the ConfigMap +chmod +x deploy.sh +./deploy.sh +``` + +See [`gke-deployment/README.md`](gke-deployment/README.md) for full documentation. + +--- + +## Choosing a Deployment Option + +``` +Do you already have a Kubernetes cluster? +│ +├─ No ──▶ Use ECS Fargate (serverless, no cluster to manage) +│ +└─ Yes + │ + ├─ AWS EKS ──▶ Use EKS deployment + ├─ Azure AKS ──▶ Use AKS deployment + └─ Google GKE ──▶ Use GKE deployment +``` + +Use **ECS Fargate** if: +- You are deploying to AWS and don't want to manage a Kubernetes cluster +- You want secrets managed in AWS Secrets Manager with audit logging +- You want a fully serverless, auto-scaling setup + +Use a **Kubernetes deployment** (EKS / AKS / GKE) if: +- You already have a Kubernetes cluster in that cloud +- You want the broker to run alongside your workloads in the same cluster +- You prefer Kubernetes-native secret and config management + +--- + +## Common Architecture Notes + +All deployment options share the same broker container image and startup sequence: + +1. Container starts under `supervisord` (auto-restarts on crash) +2. `start-broker.sh` runs: sets up secrets directory, configures kubectl if needed, generates `broker-config.yml` +3. Java broker starts and connects outbound to `.britive-app.com` over HTTPS (port 443) +4. Broker registers with the Britive platform using the broker pool token +5. Britive can now orchestrate just-in-time access via the broker + +**Network requirement:** Outbound HTTPS (port 443) to `*.britive-app.com`. No inbound rules needed. From 07d523a7e33d91ba52ef7a8d9b8891ffb86702b4 Mon Sep 17 00:00:00 2001 From: palakchheda <42711310+palakchheda@users.noreply.github.com> Date: Mon, 23 Feb 2026 14:39:19 -0800 Subject: [PATCH 11/12] Add configurable broker version to AKS deployment (default 2.0.0) Replace hardcoded 1.0.0 with a BROKER_VERSION variable (default 2.0.0) across deploy.sh, Dockerfile, and README. Add --broker-version CLI flag to deploy.sh for runtime overrides without editing the script. --- Access Broker/aks-deployment/Dockerfile | 5 ++- Access Broker/aks-deployment/README.md | 31 ++++++++------ Access Broker/aks-deployment/deploy.sh | 40 ++++++++++++++++--- .../ecs-fargate-deployment/deploy.sh | 24 ++++++++--- .../task-definition.json | 2 +- 5 files changed, 77 insertions(+), 25 deletions(-) diff --git a/Access Broker/aks-deployment/Dockerfile b/Access Broker/aks-deployment/Dockerfile index 76fccbc..e39f8a1 100644 --- a/Access Broker/aks-deployment/Dockerfile +++ b/Access Broker/aks-deployment/Dockerfile @@ -31,8 +31,11 @@ RUN mkdir -p /root/broker/config \ && mkdir -p /var/log/supervisor \ && mkdir -p /var/run/sshd +# Broker version (passed in via --build-arg BROKER_VERSION) +ARG BROKER_VERSION=2.0.0 + # Copy Britive broker JAR -COPY britive-broker-1.0.0.jar /root/broker/ +COPY britive-broker-${BROKER_VERSION}.jar /root/broker/ # Copy supervisor configuration COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf diff --git a/Access Broker/aks-deployment/README.md b/Access Broker/aks-deployment/README.md index 91e2366..8afb016 100644 --- a/Access Broker/aks-deployment/README.md +++ b/Access Broker/aks-deployment/README.md @@ -51,7 +51,7 @@ Before deploying, ensure you have: - Create a new pool or select an existing one - Copy the broker pool token -6. **britive-broker-1.0.0.jar** file in this directory +6. **britive-broker-\.jar** file in this directory (e.g. `britive-broker-2.0.0.jar`) ## Quick Start @@ -60,12 +60,13 @@ Before deploying, ensure you have: 1. Copy the broker JAR file to this directory: ```bash - cp /path/to/britive-broker-1.0.0.jar . + cp /path/to/britive-broker-2.0.0.jar . ``` 2. Edit `deploy.sh` and set your configuration: ```bash + BROKER_VERSION="2.0.0" # Broker JAR version (default: 2.0.0) BRITIVE_TOKEN="your-britive-token-here" ACR_NAME="britivebroker" # Your ACR name RESOURCE_GROUP="your-rg-name" # Your Azure resource group @@ -78,6 +79,12 @@ Before deploying, ensure you have: ./deploy.sh ``` + To deploy a specific broker version without editing the script, use the `--broker-version` flag: + + ```bash + ./deploy.sh --broker-version 1.5.0 + ``` + The script will: - Validate all prerequisites @@ -133,17 +140,17 @@ The script will: ### Environment Variables -| Variable | Description | Required | -|----------|-------------|----------| -| `BRITIVE_TOKEN` | Broker pool authentication token | Yes | -| `KUBECONFIG` | Path to kubeconfig (auto-configured) | No | +| Variable | Description | Required | +|-------------------|--------------------------------------|----------| +| `BRITIVE_TOKEN` | Broker pool authentication token | Yes | +| `KUBECONFIG` | Path to kubeconfig (auto-configured) | No | ### Resource Limits | Resource | Request | Limit | |----------|---------|-------| -| Memory | 512Mi | 1Gi | -| CPU | 250m | 500m | +| Memory | 512Mi | 1Gi | +| CPU | 250m | 500m | ### Replicas @@ -174,10 +181,10 @@ The default deployment creates 2 replicas for high availability. Modify `spec.re The broker requires the following Kubernetes permissions: -| API Group | Resources | Verbs | -|-----------|-----------|-------| -| rbac.authorization.k8s.io | roles, rolebindings, clusterroles, clusterrolebindings | get, list, watch, create, update, patch, delete | -| "" (core) | serviceaccounts, namespaces | get, list, watch, create, update, patch, delete | +| API Group | Resources | Verbs | +|----------------------------|--------------------------------------------------------|-------------------------------------------------| +| rbac.authorization.k8s.io | roles, rolebindings, clusterroles, clusterrolebindings | get, list, watch, create, update, patch, delete | +| "" (core) | serviceaccounts, namespaces | get, list, watch, create, update, patch, delete | These permissions enable the broker to manage access control for just-in-time access. diff --git a/Access Broker/aks-deployment/deploy.sh b/Access Broker/aks-deployment/deploy.sh index 78af9ca..107f674 100755 --- a/Access Broker/aks-deployment/deploy.sh +++ b/Access Broker/aks-deployment/deploy.sh @@ -8,11 +8,15 @@ # 2. Docker installed and running # 3. kubectl installed and configured for your AKS cluster # 4. AKS cluster running -# 5. britive-broker-1.0.0.jar in current directory +# 5. britive-broker-.jar in current directory # # Usage: # 1. Set BRITIVE_TOKEN below with your broker pool token from Britive console -# 2. Run: ./deploy.sh +# 2. Run: ./deploy.sh [--broker-version ] +# +# Options: +# --broker-version, -v Broker JAR version to use (default: 2.0.0) +# Example: ./deploy.sh --broker-version 1.5.0 set -e @@ -20,6 +24,9 @@ set -e # CONFIGURATION - MODIFY THESE VALUES #============================================================================== +# Broker version (can also be overridden via --broker-version flag) +BROKER_VERSION="2.0.0" + # Your Britive broker pool token (required) # Get this from: Britive Console > System Administration > Broker Pools > Create/Select Pool > Token BRITIVE_TOKEN="your-britive-token-here" @@ -38,6 +45,26 @@ IMAGE_TAG="latest" # DO NOT MODIFY BELOW THIS LINE #============================================================================== +# Parse command-line arguments (override CONFIGURATION defaults) +while [[ "$#" -gt 0 ]]; do + case $1 in + --broker-version|-v) + if [ -z "$2" ] || [[ "$2" == --* ]]; then + echo "ERROR: --broker-version requires a value (e.g. --broker-version 2.0.0)" + exit 1 + fi + BROKER_VERSION="$2" + shift + ;; + *) + echo "Unknown option: $1" + echo "Usage: ./deploy.sh [--broker-version ]" + exit 1 + ;; + esac + shift +done + # Colors for output RED='\033[0;31m' GREEN='\033[0;32m' @@ -70,8 +97,9 @@ fi # Check for required files log_info "Checking required files..." -if [ ! -f "britive-broker-1.0.0.jar" ]; then - log_error "britive-broker-1.0.0.jar not found in current directory" +log_info "Using broker version: $BROKER_VERSION" +if [ ! -f "britive-broker-${BROKER_VERSION}.jar" ]; then + log_error "britive-broker-${BROKER_VERSION}.jar not found in current directory" log_info "Please copy the broker JAR file to this directory" exit 1 fi @@ -203,8 +231,8 @@ az acr login --name "$ACR_NAME" log_success "Docker authenticated with ACR" # Build Docker image -log_info "Building Docker image (AMD64 architecture)..." -docker build --platform linux/amd64 -t "$IMAGE_NAME:$IMAGE_TAG" . +log_info "Building Docker image (AMD64 architecture, broker version: $BROKER_VERSION)..." +docker build --platform linux/amd64 --build-arg BROKER_VERSION="$BROKER_VERSION" -t "$IMAGE_NAME:$IMAGE_TAG" . # Verify architecture ARCH=$(docker inspect "$IMAGE_NAME:$IMAGE_TAG" --format '{{.Architecture}}') diff --git a/Access Broker/ecs-fargate-deployment/deploy.sh b/Access Broker/ecs-fargate-deployment/deploy.sh index 78ae9e4..2ddb535 100755 --- a/Access Broker/ecs-fargate-deployment/deploy.sh +++ b/Access Broker/ecs-fargate-deployment/deploy.sh @@ -52,6 +52,9 @@ SECURITY_GROUP_ID="" # Security group for the tasks # Number of tasks (replicas) DESIRED_COUNT=2 +# CPU architecture — set to X86_64 or ARM64 +CPU_ARCH="X86_64" + #============================================================================== # DO NOT MODIFY BELOW THIS LINE #============================================================================== @@ -218,16 +221,25 @@ log_info "Authenticating Docker with ECR..." aws ecr get-login-password --region "$AWS_REGION" | docker login --username AWS --password-stdin "${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com" log_success "Docker authenticated with ECR" +# Map ECS CPU_ARCH to Docker platform and expected architecture label +if [ "$CPU_ARCH" = "ARM64" ]; then + DOCKER_PLATFORM="linux/arm64" + EXPECTED_DOCKER_ARCH="arm64" +else + DOCKER_PLATFORM="linux/amd64" + EXPECTED_DOCKER_ARCH="amd64" +fi + # Build Docker image -log_info "Building Docker image (AMD64 architecture)..." -docker build --platform linux/amd64 -t "$ECR_REPO_NAME:latest" . +log_info "Building Docker image ($CPU_ARCH architecture)..." +docker build --platform "$DOCKER_PLATFORM" -t "$ECR_REPO_NAME:latest" . # Verify architecture ARCH=$(docker inspect "$ECR_REPO_NAME:latest" --format '{{.Architecture}}') log_info "Image architecture: $ARCH" -if [ "$ARCH" != "amd64" ]; then - log_error "Image architecture is not amd64. Fargate requires amd64 images." +if [ "$ARCH" != "$EXPECTED_DOCKER_ARCH" ]; then + log_error "Image architecture is $ARCH but expected $EXPECTED_DOCKER_ARCH for CPU_ARCH=$CPU_ARCH." exit 1 fi log_success "Image built successfully" @@ -515,11 +527,13 @@ jq --argjson secrets "$SECRETS_JSON_ARRAY" \ --arg exec_role "$EXECUTION_ROLE_ARN" \ --arg task_role "$TASK_ROLE_ARN" \ --arg region "$AWS_REGION" \ + --arg cpu_arch "$CPU_ARCH" \ '.executionRoleArn = $exec_role | .taskRoleArn = $task_role | .containerDefinitions[0].image = $image | .containerDefinitions[0].secrets = $secrets | - .containerDefinitions[0].logConfiguration.options["awslogs-region"] = $region' \ + .containerDefinitions[0].logConfiguration.options["awslogs-region"] = $region | + .runtimePlatform.cpuArchitecture = $cpu_arch' \ task-definition.json > task-definition-deploy.json log_success "Task definition prepared with $(echo "$ALL_SECRET_ARNS" | wc -l | tr -d ' ') secrets" diff --git a/Access Broker/ecs-fargate-deployment/task-definition.json b/Access Broker/ecs-fargate-deployment/task-definition.json index 09f3fc5..17129d2 100644 --- a/Access Broker/ecs-fargate-deployment/task-definition.json +++ b/Access Broker/ecs-fargate-deployment/task-definition.json @@ -43,7 +43,7 @@ } ], "runtimePlatform": { - "cpuArchitecture": "X86_64", + "cpuArchitecture": "REPLACE_WITH_CPU_ARCH", "operatingSystemFamily": "LINUX" } } From 7eadc5c6c720147fa3dea15f4b9be9d095f7317e Mon Sep 17 00:00:00 2001 From: palakchheda <42711310+palakchheda@users.noreply.github.com> Date: Mon, 23 Feb 2026 16:13:44 -0800 Subject: [PATCH 12/12] ECS deployment for Broker + Session recording added --- session-recording/README.md | 278 +++- session-recording/ecs-fargate/README.md | 314 +++++ .../ecs-fargate/broker/Dockerfile | 73 + .../ecs-fargate/broker/start-broker.sh | 120 ++ .../ecs-fargate/broker/supervisord.conf | 37 + .../ecs-fargate/broker/token-generator.sh | 12 + session-recording/ecs-fargate/deploy.sh | 1172 +++++++++++++++++ .../ecs-fargate/guacsync/Dockerfile | 73 + .../ecs-fargate/guacsync/guacsync.sh | 48 + .../ecs-fargate/manage-secrets.sh | 440 +++++++ 10 files changed, 2501 insertions(+), 66 deletions(-) create mode 100644 session-recording/ecs-fargate/README.md create mode 100644 session-recording/ecs-fargate/broker/Dockerfile create mode 100644 session-recording/ecs-fargate/broker/start-broker.sh create mode 100644 session-recording/ecs-fargate/broker/supervisord.conf create mode 100644 session-recording/ecs-fargate/broker/token-generator.sh create mode 100644 session-recording/ecs-fargate/deploy.sh create mode 100644 session-recording/ecs-fargate/guacsync/Dockerfile create mode 100644 session-recording/ecs-fargate/guacsync/guacsync.sh create mode 100644 session-recording/ecs-fargate/manage-secrets.sh diff --git a/session-recording/README.md b/session-recording/README.md index 2520fe8..0352c86 100644 --- a/session-recording/README.md +++ b/session-recording/README.md @@ -1,115 +1,261 @@ -# 📽️ Session recording +# Session Recording These examples cover session recording features for SSH and RDP sessions curated by Britive Access Broker. -## 📜 Background +## Background -This example uses Britive Access Broker and Apache Guacamole to achieve proxied user session into servers and allows for video recording of the user session. These sessions are curated by Britive, are short-lived, and do not require end users to install any special tools or to copy credentials -- the credential rotation is handled entirely by Britive Access Broker. +This example uses Britive Access Broker and Apache Guacamole to achieve proxied user sessions into servers and allows for video recording of the user session. These sessions are curated by Britive, are short-lived, and do not require end users to install any special tools or to copy credentials — the credential rotation is handled entirely by Britive Access Broker. Traditional remote access tools often run as a local client application, however, the Guacamole client requires nothing more than a modern web browser when accessing one of the served protocols, such as RDP/SSH/VNC. By separating the frontend (web application) from the backend (`guacd`), Guacamole enables secure, clientless remote access through a browser without any additional plugins. -Apache Guacamole initiates RDP (Remote Desktop Protocol) connections **through the `guacd` daemon**, which acts as a proxy between the browser client and the target remote desktop (e.g., Windows machine). Here’s how the flow works and clarifies your confusion: +### How RDP/SSH Connections Work -## 🔧 Key Components +1. **User connects via browser** to the Guacamole web interface. +2. **Guacamole web server** sends connection parameters (hostname, port, credentials) to `guacd`. +3. **`guacd` opens the remote connection** (RDP/SSH) directly to the target machine. +4. **`guacd` encodes the session** into an optimized WebSocket stream for the browser. +5. **Browser renders the session** using JavaScript — no plugins required. -* **Guacamole Client (Web App)**: Runs in the browser. HTML5 and JavaScript frontend. -* **Guacamole Server (`guacd`)**: Native daemon that speaks RDP, VNC, or SSH. -* **Remote Desktop Machine**: Windows host with RDP service running. +The RDP/SSH connection is made by `guacd`, not the browser. `guacd` must have network access to the target host. -## 📡 Flow: How RDP Connection is Established +--- + +## Deployment Options + +Three deployment methods are provided. All share the same Guacamole-based architecture and broker scripts. + +| Method | Directory | Best For | +|-----------------|----------------------------------------------|---------------------------------------------------| +| Docker Compose | [docker/](docker/) | Local development, single-host setups | +| CloudFormation | [cloudformation/](cloudformation/) | AWS production via declarative IaC | +| ECS Fargate | [ecs-fargate/](ecs-fargate/) | AWS production via automated deployment script | -1. **User connects via browser** to the Guacamole web interface (typically over HTTPS). -2. **Guacamole web server (Tomcat)** sends connection parameters (hostname/IP, port, username, password, etc.) to `guacd`. -3. **`guacd` opens an RDP connection** directly to the target machine using the RDP protocol. -4. **`guacd` encodes the RDP session** into an optimized stream using the Guacamole protocol (a stateless, websocket-based protocol). -5. **Web browser receives the stream** and renders the session using JavaScript (no plugin needed). +--- + +## [docker/](docker/) — Docker Compose -## 🔍 Clarification: Where the Connection Happens +Runs the full stack locally using Docker Compose. Suitable for development and single-server deployments. -* **The RDP connection is *not* made by the browser.** - The browser does not speak RDP and never connects to the Windows machine directly. +### Services -* **`guacd` must have network access to the target Windows machine.** - If it cannot reach the RDP port (default 3389) on the machine, the session will fail. +| Service | Image / Source | Purpose | +|-----------|-----------------------|--------------------------------------------| +| broker | Custom (Dockerfile) | Britive Access Broker + SSH server | +| guacd | `guacamole/guacd` | Native protocol daemon (libguac) | +| guacamole | `guacamole/guacamole` | Browser-based web UI | +| guacenc | Custom | Recording conversion (`.guac` → `.m4v`) | -* **The browser only talks to the Guacamole web app**, and that app forwards commands to `guacd`. +### Docker Quick Start -## 🔒 Security & Proxy-Like Behavior +1. Generate a JSON secret key: -Guacamole **feels like a proxy** from the user’s perspective because: + ```sh + echo -n "your-passphrase" | md5 # macOS + echo -n "your-passphrase" | md5sum # Linux + ``` -* You only need access to the Guacamole web app (not the RDP target directly). -* The RDP traffic is encapsulated in web protocols (e.g., WebSocket). -* You can enforce strong access controls at the Guacamole layer without exposing remote hosts. +2. Update `docker/docker-compose.yaml` with the generated key: -But **it’s not a traditional reverse proxy** — Guacamole is an active RDP client and protocol translator, not a TCP-level proxy. + ```yaml + guacamole: + environment: + JSON_SECRET_KEY: "" + ``` -## 🧠 Summary +3. Update `docker/broker/broker-config.yml` with your Britive tenant subdomain and broker token. -* **RDP connection is made by `guacd`**, not the browser. -* Browser receives rendered session stream via WebSocket. -* `guacd` **must** have network access to the RDP host. +4. Build and start: -If you're running Guacamole in Docker or in a restricted VPC, make sure the container/network can reach your RDP targets. + ```sh + cd docker/ + mkdir -m a+rw recordings + docker build -t broker-docker . + docker compose up -d + ``` + +See [docker/README.md](docker/README.md) for full setup instructions. --- -## » [example_user.json](user.json) +## [cloudformation/](cloudformation/) — AWS CloudFormation -Contains the JSON object to encode for use with the [Encrypted JSON Auth](https://guacamole.apache.org/doc/gug/json-auth.html) +Deploys the stack to AWS ECS using a CloudFormation template. Requires manual parameter setup but is fully declarative. -### Example +### Template: [cloudformation/stackamole.yaml](cloudformation/stackamole.yaml) -```py -{ - "username": "first.last@britive.com", - "expires": "1750000000000", # expiration in epoch time, including milliseconds - "connections": { - "connection-name": { # name to give the connection - "protocol": "ssh", # connection protocol, e.g. ssh, rdp, vnc, etc. - "parameters": { - "hostname": "1.2.3.4", # hostname or IP - "port": "22", # port - "username": "ubuntu", # username - "private-key": "...", # ssh private key, with substituted newlines, e.g. s/\n/\\n/g - "recording-path": "/home/guacd/recordings", # location for recordings - "recording-name": "${GUAC_DATE}-${GUAC_TIME}-${GUAC_USERNAME}-connection-name" # name of the recording - } - } - } -} -``` +Key parameters: + +| Parameter | Description | +|--------------------------|---------------------------------------------------------------------| +| `JsonSecretKey` | 32-character hex string for Guacamole JSON auth | +| `VpcId` | VPC to deploy into | +| `FirstSubnetId` | First subnet ID | +| `SecondSubnetId` | Second subnet ID | +| `LoadBalancerArn` | Existing ALB to attach the Guacamole listener to | +| `CertificateArn` | ACM certificate for the HTTPS listener | +| `ImageLocationGuacd` | ECR or Docker Hub image URI for guacd | +| `ImageLocationGuacamole` | ECR or Docker Hub image URI for guacamole | +| `ImageLocationGuacSync` | *(Optional)* ECR image for recording conversion + S3 sync | +| `S3BucketArnGuacSync` | *(Optional)* S3 bucket ARN where converted recordings are stored | + +See [cloudformation/DEPLOY.md](cloudformation/DEPLOY.md) for the full parameter reference and deployment walkthrough. + +### Optional: GuacSync + +To automatically convert `.guac` session recordings to `.m4v` and sync to S3: + +1. Build the GuacSync image from `cloudformation/guacsync/Dockerfile` +2. Push to ECR +3. Set `ImageLocationGuacSync` and `S3BucketArnGuacSync` in the CloudFormation parameters + +--- + +## [ecs-fargate/](ecs-fargate/) — AWS ECS Fargate (Automated) + +Deploys the full stack to AWS ECS Fargate using a single `deploy.sh` script. All AWS infrastructure is created automatically — ECR, EFS, ALB, Secrets Manager, Cloud Map service discovery, IAM roles, and ECS services. + +### Services + +| Service | Image / Source | Purpose | +|-----------|---------------------------------|--------------------------------------------| +| broker | Custom ECR image | Britive Access Broker + SSH server | +| guacd | `guacamole/guacd:1.5.5` | Native protocol daemon | +| guacamole | `guacamole/guacamole:1.5.5` | Browser-based web UI (behind ALB) | +| guacsync | Custom ECR image *(optional)* | Recording conversion + S3 sync | + +Services communicate via AWS Cloud Map private DNS (`guacd.britive.local`, `broker.britive.local`). Session recordings are stored on a shared EFS filesystem mounted at `/recordings`. + +### ECS Fargate Quick Start + +1. Copy and fill in `ecs-fargate/secrets.json`: + + ```sh + cp ecs-fargate/secrets.json.example ecs-fargate/secrets.json + ``` -> Additional connection parameter information: [configuring-connections](https://guacamole.apache.org/doc/gug/configuring-guacamole.html#configuring-connections) + Set `BRITIVE_TENANT` and `BRITIVE_TOKEN`. Leave `JSON_SECRET_KEY` empty to auto-generate. -## 📌 [encrypt-token.sh](encrypt-token.sh) +2. Place the broker JAR in `ecs-fargate/broker/`: -Sign and encrypt the `user.json` file, or any other file, with a JSON secret key. + ```sh + cp /path/to/britive-broker-2.0.0.jar ecs-fargate/broker/ + ``` -### Usage +3. Deploy: + + ```sh + cd ecs-fargate/ + chmod +x deploy.sh manage-secrets.sh + ./deploy.sh + ``` + +### Key Options + +| CLI Flag | Description | Default | +|---------------------------|--------------------------------------------------------|-----------------------------| +| `--broker-version ` | Broker JAR version to build and deploy | `2.0.0` | +| `--region ` | AWS region | `us-east-1` | +| `--cluster-name ` | ECS cluster name | `britive-session-recording` | +| `--acm-cert-arn ` | Enable HTTPS on ALB (adds HTTP→HTTPS redirect) | *(HTTP/80 only)* | +| `--enable-guacsync` | Deploy the GuacSync recording conversion service | `false` | +| `--s3-bucket ` | S3 bucket for GuacSync output *(required with above)* | | +| `--vpc-id ` | Override auto-detected default VPC | *(auto-detect)* | +| `--subnets ` | Comma-separated subnet IDs | *(auto-detect)* | +| `--use-secrets-json` | Force-load configuration from `secrets.json` | | + +### Secrets Management ```sh -./encrypt-token.sh +./manage-secrets.sh list # list all secrets +./manage-secrets.sh get BRITIVE_TOKEN # retrieve a value +./manage-secrets.sh set MY_SECRET "value" # add or update a secret +./manage-secrets.sh sync # push secrets.json to Secrets Manager +./manage-secrets.sh restart-tasks # restart ECS tasks to pick up new values +./manage-secrets.sh update-iam # refresh IAM permissions after adding secrets ``` -### Example Shared secret +See [ecs-fargate/README.md](ecs-fargate/README.md) for the full documentation. + +--- + +## [broker-scripts/](broker-scripts/) — Checkout Scripts + +Scripts called by the Britive broker during permission checkout and check-in. They generate signed, encrypted Guacamole tokens that authenticate users into sessions. -How to generate encryption key for the broker to set up with a guacamole service. +| Script | Description | +|---------------------------------|--------------------------------------------------------------------------| +| `checkout-generic.sh` | Generic token generator — accepts a full connection JSON object | +| `rdp/checkout-rdp.sh` | RDP checkout — builds connection from hostname/domain params | +| `rdp/checkout-rdp.ps1` | RDP checkout (PowerShell) — creates a temporary local admin user | +| `rdp/checkin-rdp.ps1` | RDP check-in — removes the temporary user after the session ends | +| `rdp/checkout-ec2-rdp.sh` | RDP checkout for EC2 — retrieves the secret key from Secrets Manager | +| `ssh/checkout-ssh.sh` | SSH checkout — creates SSH user, key pair, and Guacamole token | +| `ssh/remote-checkout-ssh.sh` | SSH checkout — sets up a user on a remote host via SSH | +| `ssh/checkout-ec2-ssh.sh` | SSH checkout for EC2 — retrieves the encryption key from Secrets Manager | -#### Generate a JSON secret key +--- + +## Shared Utilities + +### [encrypt-token.sh](encrypt-token.sh) + +Signs and encrypts a JSON authentication object for Guacamole using HMAC-SHA256 signing and AES-128-CBC encryption. The output is a URL-encoded token passed to Guacamole via `?data=`. ```sh -$ echo -n "britiveallthethings" | md5 # `md5` on macos, `md5sum` on linux -fb57d11d533339aea1e37c2a5a1cb92c +./encrypt-token.sh ``` -#### Encrypt a token with the generated JSON secret key +**Example:** ```sh -./encrypt-token.sh fb57d11d533339aea1e37c2a5a1cb92c user.json -{"token": "dziPjv9S6NgNgsA7V5TCsdUlxRb8OZO3h3Rbi52cfHS9An6hXgMfvpOMq3RLTBUFqC87j8RkN1jJ1zkyQa%2FgmiO07x2P%2FewLiKG86a60v%2BlUCv%2Blh9wd2ENMLjTnhmLhTWkpNgKHfQHQt%2F34K19------oSwJ%2FPLEiuSMvYO6Z72H5%2----------JiDI%2BZ6ap2ZKyB"} +# Generate a JSON secret key +echo -n "britiveallthethings" | md5 # → fb57d11d533339aea1e37c2a5a1cb92c + +# Encrypt a token +./encrypt-token.sh fb57d11d533339aea1e37c2a5a1cb92c example_user.json + +# Use the token in a URL +# https://guacamole.example.com/guacamole?data= ``` +### [example_user.json](example_user.json) + +Sample JSON object showing the structure expected by the Guacamole JSON auth extension: + +```json +{ + "username": "first.last@britive.com", + "expires": "1750000000000", + "connections": { + "my-ssh-session": { + "protocol": "ssh", + "parameters": { + "hostname": "1.2.3.4", + "port": "22", + "username": "ubuntu", + "private-key": "...", + "recording-path": "/recordings", + "recording-name": "${GUAC_DATE}-${GUAC_TIME}-${GUAC_USERNAME}-my-ssh-session" + } + } + } +} +``` + +> Full connection parameter reference: [configuring-connections](https://guacamole.apache.org/doc/gug/configuring-guacamole.html#configuring-connections) + --- + +## Broker Version + +All deployment methods support a configurable broker version. The default is **2.0.0**. + +| Method | How to set the version | +|-----------------|---------------------------------------------------------------------------------------------------------------------------| +| Docker Compose | Replace the JAR filename in `docker/broker/Dockerfile` and place the matching JAR in `docker/broker/` | +| CloudFormation | Update the ECR image tag in the `ImageLocation*` parameters | +| ECS Fargate | `./deploy.sh --broker-version 2.0.0` or set `BROKER_VERSION` at the top of `deploy.sh` | + +Place the matching `britive-broker-.jar` file in the `broker/` directory of your chosen deployment method before building. diff --git a/session-recording/ecs-fargate/README.md b/session-recording/ecs-fargate/README.md new file mode 100644 index 0000000..e44a17d --- /dev/null +++ b/session-recording/ecs-fargate/README.md @@ -0,0 +1,314 @@ +# Britive Session Recording - AWS ECS Fargate Deployment + +This directory contains everything needed to deploy the Britive Session Recording stack on AWS ECS Fargate. It mirrors the Docker Compose setup under `../docker/` but targets AWS-native infrastructure with EFS for recording storage, Secrets Manager for credentials, and an ALB for Guacamole web access. + +## Architecture + +``` +Internet + │ + ▼ +[ALB :80/443] + │ + ▼ +[Guacamole :8080] ──(service discovery)──▶ [GuacD :4822] + │ │ + │ SSH session │ + ▼ │ +[Broker :22] ◀─────────────────────────────┘ + │ + ▼ +[EFS /recordings] ◀─── [GuacSync] (optional — converts .guac → .m4v, syncs to S3) +``` + +| Component | Image | Purpose | +|-------------|--------------------------------|--------------------------------------------| +| Guacamole | `guacamole/guacamole:1.5.5` | Web UI — browser-based SSH/RDP sessions | +| GuacD | `guacamole/guacd:1.5.5` | Native protocol daemon (libguac) | +| Broker | Custom (ECR) | Britive JIT access + SSH server | +| GuacSync | Custom (ECR) — optional | Converts recordings to .m4v, syncs to S3 | + +All components share an **EFS filesystem** mounted at `/recordings` for raw Guacamole recording files. + +## Prerequisites + +1. **AWS CLI** installed and configured + + ```bash + aws configure # or: export AWS_PROFILE=my-profile + aws sts get-caller-identity # verify + ``` + +2. **Docker** installed and running + + ```bash + docker info + ``` + +3. **jq** installed + + ```bash + brew install jq # macOS + apt install jq # Linux + ``` + +4. **britive-broker-\.jar** placed in `broker/` + + ```bash + cp /path/to/britive-broker-2.0.0.jar broker/ + ``` + +5. **Britive Broker Pool Token** from the Britive console: + - Navigate to: System Administration > Broker Pools + - Create or select a pool, then copy the token + +## Quick Start + +### Option 1: Using secrets.json (Recommended) + +1. Copy the example secrets file and fill in your values: + + ```bash + cp secrets.json.example secrets.json + ``` + +2. Edit `secrets.json`: + + ```json + { + "secrets": { + "BRITIVE_TENANT": { "value": "mycompany" }, + "BRITIVE_TOKEN": { "value": "your-token-here" }, + "JSON_SECRET_KEY": { "value": "" } + } + } + ``` + + > Leave `JSON_SECRET_KEY` empty — `deploy.sh` will auto-generate a secure 64-character key. + +3. Place the broker JAR in `broker/`: + + ```bash + cp /path/to/britive-broker-2.0.0.jar broker/ + ``` + +4. Run the deployment: + + ```bash + chmod +x deploy.sh manage-secrets.sh + ./deploy.sh + ``` + +### Option 2: Direct configuration + +Edit the CONFIGURATION section at the top of `deploy.sh` and set: + +```bash +BRITIVE_TENANT="mycompany" +BRITIVE_TOKEN="your-token-here" +``` + +Then run `./deploy.sh`. + +### Option 3: CLI flags + +```bash +./deploy.sh \ + --broker-version 2.0.0 \ + --region us-west-2 \ + --cluster-name my-recording-cluster \ + --acm-cert-arn arn:aws:acm:us-west-2:123456789:certificate/abc-123 +``` + +The script will: + +- Validate prerequisites and required files +- Auto-detect the default VPC and subnets (or use the values you provide) +- Create security groups with correct inter-service rules +- Build and push the broker Docker image to ECR +- Create an EFS filesystem with an access point at `/recordings` +- Store secrets in AWS Secrets Manager under `britive/session-recording/` +- Configure Cloud Map service discovery (`guacd.britive.local`, `broker.britive.local`) +- Create an ALB and target group for Guacamole +- Register ECS task definitions for all services +- Create ECS services and wait for them to stabilize +- Print the Guacamole URL when done + +## Configuration Options + +### deploy.sh CONFIGURATION section + +| Variable | Default | Description | +|-------------------|------------------------------|------------------------------------------| +| `BRITIVE_TENANT` | (placeholder) | Britive tenant subdomain | +| `BRITIVE_TOKEN` | (placeholder) | Broker pool token | +| `JSON_SECRET_KEY` | (auto-generated) | Guacamole JSON auth secret key | +| `BROKER_VERSION` | `2.0.0` | Broker JAR version | +| `AWS_REGION` | `us-east-1` | AWS region | +| `CLUSTER_NAME` | `britive-session-recording` | ECS cluster name | +| `ENABLE_GUACSYNC` | `false` | Enable recording conversion + S3 sync | +| `S3_BUCKET` | (empty) | S3 bucket for GuacSync | +| `ACM_CERT_ARN` | (empty) | ACM certificate ARN for HTTPS | +| `VPC_ID` | (auto-detect) | VPC to deploy into | +| `SUBNET_IDS` | (auto-detect) | Comma-separated subnet IDs | +| `DESIRED_COUNT` | `1` | Number of broker task replicas | + +### CLI flags + +| Flag | Description | +|--------------------------|--------------------------------------------------| +| `--broker-version ` | Override broker JAR version (default: `2.0.0`) | +| `--region ` | AWS region | +| `--cluster-name ` | ECS cluster name | +| `--enable-guacsync` | Enable GuacSync service | +| `--s3-bucket ` | S3 bucket for GuacSync (required with above) | +| `--acm-cert-arn ` | Enable HTTPS on ALB (creates HTTP→HTTPS redirect)| +| `--vpc-id ` | Override auto-detected VPC | +| `--subnets ` | Override auto-detected subnets | +| `--use-secrets-json` | Force load from secrets.json | + +## Secrets Management + +Secrets are stored in AWS Secrets Manager under `britive/session-recording/` and injected into ECS tasks at launch. + +```bash +# List all secrets +./manage-secrets.sh list + +# Add or update a secret +./manage-secrets.sh set MY_SECRET "value" "Description" + +# Read a secret value +./manage-secrets.sh get BRITIVE_TOKEN + +# Sync all secrets from secrets.json +./manage-secrets.sh sync + +# Restart tasks to pick up new secret values +./manage-secrets.sh restart-tasks + +# Update IAM permissions after adding new secrets +./manage-secrets.sh update-iam +``` + +### Environment variables for manage-secrets.sh + +| Variable | Default | +|------------------------|---------------------------------| +| `AWS_REGION` | `us-east-1` | +| `ECS_CLUSTER_NAME` | `britive-session-recording` | +| `ECS_BROKER_SERVICE` | `britive-session-recording-broker-service` | +| `ECS_GUACD_SERVICE` | `britive-session-recording-guacd-service` | +| `ECS_GUACAMOLE_SERVICE`| `britive-session-recording-guacamole-service` | + +## AWS Resources Created + +| Resource | Name / Identifier | +|-------------------------------|-----------------------------------------------------------| +| ECR repository (broker) | `britive-session-recording/broker` | +| ECR repository (guacsync) | `britive-session-recording/guacsync` (if enabled) | +| EFS filesystem | `britive-recordings-efs` | +| ECS cluster | `britive-session-recording` | +| ECS services | `…-broker-service`, `…-guacd-service`, `…-guacamole-service` | +| ALB | `britive-session-recording-alb` | +| Secrets Manager secrets | `britive/session-recording/BRITIVE_TOKEN`, etc. | +| Cloud Map namespace | `britive.local` (private) | +| IAM roles | `britive-sr-execution-role`, `britive-sr-task-role` | +| CloudWatch log groups | `/ecs/britive-session-recording/{broker,guacd,guacamole}` | +| Security groups | `britive-sr-{alb,guacamole,guacd,broker}-sg` | + +## Files + +| File / Directory | Description | +|-------------------------|----------------------------------------------------------| +| `deploy.sh` | Main deployment script | +| `manage-secrets.sh` | Secrets management CLI | +| `secrets.json` | Secrets configuration (fill in and keep private) | +| `secrets.json.example` | Template — copy to `secrets.json` to get started | +| `broker/Dockerfile` | Broker + SSH server container image | +| `broker/start-broker.sh`| Broker startup and config generation script | +| `broker/supervisord.conf`| Process supervisor (sshd + broker) | +| `broker/token-generator.sh` | Token provider called by the broker | +| `guacsync/Dockerfile` | GuacSync image (builds guacenc from source) | +| `guacsync/guacsync.sh` | Recording conversion and S3 sync loop | + +## Monitoring and Troubleshooting + +### View logs + +```bash +# Broker logs +aws logs tail /ecs/britive-session-recording/broker --follow --region us-east-1 + +# Guacamole logs +aws logs tail /ecs/britive-session-recording/guacamole --follow --region us-east-1 + +# GuacD logs +aws logs tail /ecs/britive-session-recording/guacd --follow --region us-east-1 +``` + +### Check service status + +```bash +aws ecs describe-services \ + --cluster britive-session-recording \ + --services britive-session-recording-broker-service \ + britive-session-recording-guacd-service \ + britive-session-recording-guacamole-service \ + --region us-east-1 +``` + +### List running tasks + +```bash +aws ecs list-tasks \ + --cluster britive-session-recording \ + --service-name britive-session-recording-broker-service \ + --region us-east-1 +``` + +### Common issues + +1. **Guacamole can't reach GuacD** — verify service discovery is working. The `GUACD_HOSTNAME` env var is set to `guacd.britive.local`. Check the Cloud Map namespace and guacd service registration. + +2. **Broker JAR not found** — ensure `broker/britive-broker-.jar` exists before running `deploy.sh`. + +3. **ECS tasks failing to pull secrets** — run `./manage-secrets.sh update-iam` to refresh IAM permissions, then restart tasks. + +4. **Recordings not appearing** — confirm EFS mount targets exist in the same subnets as your ECS tasks and security groups allow NFS (port 2049). + +## Cleanup + +```bash +# Remove ECS services +aws ecs update-service --cluster britive-session-recording --service britive-session-recording-broker-service --desired-count 0 --region us-east-1 +aws ecs update-service --cluster britive-session-recording --service britive-session-recording-guacd-service --desired-count 0 --region us-east-1 +aws ecs update-service --cluster britive-session-recording --service britive-session-recording-guacamole-service --desired-count 0 --region us-east-1 + +aws ecs delete-service --cluster britive-session-recording --service britive-session-recording-broker-service --region us-east-1 +aws ecs delete-service --cluster britive-session-recording --service britive-session-recording-guacd-service --region us-east-1 +aws ecs delete-service --cluster britive-session-recording --service britive-session-recording-guacamole-service --region us-east-1 + +# Delete ECS cluster +aws ecs delete-cluster --cluster britive-session-recording --region us-east-1 + +# Delete secrets (use with caution) +./manage-secrets.sh delete BRITIVE_TOKEN +./manage-secrets.sh delete BRITIVE_TENANT +./manage-secrets.sh delete JSON_SECRET_KEY + +# Remove ECR images +aws ecr delete-repository --repository-name britive-session-recording/broker --force --region us-east-1 +``` + +> **Note:** EFS filesystem and ALB are not deleted by these commands. Delete them manually via the AWS console or CLI if no longer needed. + +## Security Considerations + +1. **HTTPS**: Always use the `--acm-cert-arn` flag (or set `ACM_CERT_ARN` in `deploy.sh`) for production deployments to encrypt Guacamole web traffic. + +2. **Secrets**: `secrets.json` contains sensitive values — add it to `.gitignore` and never commit it to source control. + +3. **SSH**: The broker container enables root SSH login for Guacamole compatibility. Restrict broker security group ingress to only the Guacamole security group (this is done automatically by `deploy.sh`). + +4. **Recordings**: Session recordings on EFS are encrypted at rest (enabled during filesystem creation). Restrict EFS access using security groups and IAM. diff --git a/session-recording/ecs-fargate/broker/Dockerfile b/session-recording/ecs-fargate/broker/Dockerfile new file mode 100644 index 0000000..66c9fe8 --- /dev/null +++ b/session-recording/ecs-fargate/broker/Dockerfile @@ -0,0 +1,73 @@ +# Britive Session Recording Broker - AWS ECS Fargate +# Runs the Britive Access Broker alongside an SSH server so that Guacamole +# can open SSH sessions through it and record them to the shared EFS volume. + +FROM --platform=linux/amd64 ubuntu:24.04 + +USER root:0 + +ENV DEBIAN_FRONTEND=noninteractive + +# Install required packages +RUN apt-get update && apt-get install -y \ + openjdk-21-jre-headless \ + curl wget unzip ca-certificates \ + supervisor jq \ + openssh-server \ + mysql-client \ + python3 python3-pip \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Install Python packages required by Britive broker scripts +RUN pip3 install --break-system-packages \ + jmespath "britive>=4.1.3" pyjwt cryptography jinja2 pyyaml + +# Install kubectl (for clusters the broker manages) +RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \ + && chmod +x kubectl \ + && mv kubectl /usr/local/bin/ + +# Install AWS CLI v2 +RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o /tmp/awscliv2.zip \ + && unzip /tmp/awscliv2.zip -d /tmp \ + && /tmp/aws/install \ + && rm -rf /tmp/awscliv2.zip /tmp/aws + +# Create required directories +RUN mkdir -p \ + /root/broker/config \ + /root/broker/bootstrap \ + /root/broker/cache \ + /root/broker/secrets \ + /root/.kube \ + /root/.ssh \ + /var/log/supervisor \ + /var/run/sshd + +# Configure SSH — root login is required for Guacamole session recording +RUN echo 'root:root' | chpasswd \ + && echo "PermitRootLogin yes" >> /etc/ssh/sshd_config \ + && echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config + +EXPOSE 22 + +# Broker version — override at build time: docker build --build-arg BROKER_VERSION=x.y.z +ARG BROKER_VERSION=2.0.0 + +# Copy Britive broker JAR +COPY britive-broker-${BROKER_VERSION}.jar /root/broker/ + +# Copy supervisor configuration and startup scripts +COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf +COPY start-broker.sh /root/start-broker.sh +COPY token-generator.sh /root/broker/bootstrap/token-generator.sh + +RUN chmod +x /root/start-broker.sh /root/broker/bootstrap/token-generator.sh + +# Health check — verify the broker Java process is running +HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \ + CMD pgrep -f "britive-broker" > /dev/null || exit 1 + +WORKDIR /root/ +CMD ["/usr/bin/supervisord", "-n", "-c", "/etc/supervisor/conf.d/supervisord.conf"] diff --git a/session-recording/ecs-fargate/broker/start-broker.sh b/session-recording/ecs-fargate/broker/start-broker.sh new file mode 100644 index 0000000..95e2d60 --- /dev/null +++ b/session-recording/ecs-fargate/broker/start-broker.sh @@ -0,0 +1,120 @@ +#!/bin/bash + +# Britive Session Recording Broker - ECS Fargate startup script +# Handles graceful shutdown, secrets management, SSH setup, and broker launch. + +# Signal handler for graceful shutdown +cleanup() { + echo "Received shutdown signal, cleaning up..." + if [ -n "$BROKER_PID" ]; then + kill -TERM "$BROKER_PID" 2>/dev/null + wait "$BROKER_PID" 2>/dev/null + fi + exit 0 +} + +trap cleanup SIGTERM SIGINT + +# Optional startup delay (default 5 seconds — gives sshd time to start first) +DELAY=${1:-5} +if [ "$DELAY" -gt 0 ]; then + echo "Waiting $DELAY seconds before starting broker..." + sleep "$DELAY" +fi + +# Secrets directory for file-based secrets +SECRETS_DIR="${SECRETS_DIR:-/root/broker/secrets}" +mkdir -p "$SECRETS_DIR" + +echo "Setting up secrets directory: $SECRETS_DIR" + +# Write BRITIVE_TOKEN to file (ECS injects it from Secrets Manager as an env var) +if [ -n "$BRITIVE_TOKEN" ]; then + echo -n "$BRITIVE_TOKEN" > "$SECRETS_DIR/BRITIVE_TOKEN" + chmod 600 "$SECRETS_DIR/BRITIVE_TOKEN" + echo "BRITIVE_TOKEN written to secrets directory" +fi + +# Write JSON_SECRET_KEY to file so broker scripts can read it +if [ -n "$JSON_SECRET_KEY" ]; then + echo -n "$JSON_SECRET_KEY" > "$SECRETS_DIR/JSON_SECRET_KEY" + chmod 600 "$SECRETS_DIR/JSON_SECRET_KEY" + echo "JSON_SECRET_KEY written to secrets directory" +fi + +# Write any BROKER_* prefixed secrets to individual files +env | grep "^BROKER_" | while IFS='=' read -r key value; do + if [ -n "$value" ]; then + echo -n "$value" > "$SECRETS_DIR/$key" + chmod 600 "$SECRETS_DIR/$key" + echo "$key written to secrets directory" + fi +done + +echo "Secrets directory contents:" +ls -la "$SECRETS_DIR" 2>/dev/null || echo " (empty)" + +# Setup kubeconfig from base64-encoded value if provided +if [ -n "$KUBECONFIG_BASE64" ]; then + echo "Setting up kubeconfig from environment..." + mkdir -p /root/.kube + echo "$KUBECONFIG_BASE64" | base64 -d > /root/.kube/config + chmod 600 /root/.kube/config + export KUBECONFIG=/root/.kube/config + echo "Kubeconfig configured" +fi + +# Setup kubeconfig for EKS cluster if EKS variables provided +if [ -n "$EKS_CLUSTER_NAME" ] && [ -n "$AWS_REGION" ]; then + echo "Configuring kubectl for EKS cluster: $EKS_CLUSTER_NAME" + aws eks update-kubeconfig --name "$EKS_CLUSTER_NAME" --region "$AWS_REGION" + echo "EKS kubeconfig configured" +fi + +# Generate SSH host keys (safe to run multiple times) +ssh-keygen -A 2>/dev/null || true +echo "SSH host keys ready" + +# Generate broker-config.yml +# BRITIVE_TENANT and BRITIVE_TOKEN are injected from Secrets Manager at task launch. +if [ -z "$BRITIVE_TENANT" ]; then + echo "ERROR: BRITIVE_TENANT is not set. Configure it in secrets.json or deploy.sh." + exit 1 +fi +if [ -z "$BRITIVE_TOKEN" ]; then + echo "ERROR: BRITIVE_TOKEN is not set. Check Secrets Manager configuration." + exit 1 +fi + +mkdir -p /root/broker/config +cat > /root/broker/config/broker-config.yml << EOF +config: + version: 2 + bootstrap: + tenant_subdomain: ${BRITIVE_TENANT} + authentication_token: "${BRITIVE_TOKEN}" +EOF +chmod 600 /root/broker/config/broker-config.yml +echo "Broker config generated for tenant: $BRITIVE_TENANT" + +# Find the broker JAR — respects BROKER_VERSION if set; otherwise uses any available version +BROKER_VERSION="${BROKER_VERSION:-2.0.0}" +JAR_FILE="/root/broker/britive-broker-${BROKER_VERSION}.jar" + +if [ ! -f "$JAR_FILE" ]; then + echo "WARNING: Expected JAR not found at $JAR_FILE — searching for any broker JAR..." + JAR_FILE=$(find /root/broker -maxdepth 1 -name "britive-broker-*.jar" | head -1) + if [ -z "$JAR_FILE" ]; then + echo "ERROR: No britive-broker-*.jar found in /root/broker/" + exit 1 + fi + echo "Using: $JAR_FILE" +fi + +echo "Starting Britive broker: $JAR_FILE" +cd /root/broker +java -jar "$JAR_FILE" >> /var/log/britive-broker.log 2>&1 & +BROKER_PID=$! + +echo "Broker started with PID: $BROKER_PID" +wait "$BROKER_PID" diff --git a/session-recording/ecs-fargate/broker/supervisord.conf b/session-recording/ecs-fargate/broker/supervisord.conf new file mode 100644 index 0000000..71976f6 --- /dev/null +++ b/session-recording/ecs-fargate/broker/supervisord.conf @@ -0,0 +1,37 @@ +[supervisord] +nodaemon=true +logfile=/dev/null +logfile_maxbytes=0 +stopwaitsecs=10 + +[program:sshd] +command=/usr/sbin/sshd -D -e +autostart=true +autorestart=true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +stdout_events_enabled=true +stderr_events_enabled=true +stopsignal=TERM +stopasgroup=true +killasgroup=true +priority=10 + +[program:britive-broker] +command=/root/start-broker.sh 5 +autostart=true +autorestart=true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +stdout_events_enabled=true +stderr_events_enabled=true +stopsignal=TERM +stopasgroup=true +killasgroup=true +priority=20 +; Pass ECS-injected secrets into the broker process environment +environment=%(ENV_BRITIVE_TOKEN)s,%(ENV_BRITIVE_TENANT)s,%(ENV_JSON_SECRET_KEY)s diff --git a/session-recording/ecs-fargate/broker/token-generator.sh b/session-recording/ecs-fargate/broker/token-generator.sh new file mode 100644 index 0000000..d4318fa --- /dev/null +++ b/session-recording/ecs-fargate/broker/token-generator.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# Token generator for ECS Fargate — reads from secrets file (preferred) or env var fallback. +# The broker calls this script to obtain the Britive authentication token. + +SECRET_FILE="/root/broker/secrets/BRITIVE_TOKEN" + +if [ -f "$SECRET_FILE" ]; then + cat "$SECRET_FILE" +else + echo "$BRITIVE_TOKEN" +fi diff --git a/session-recording/ecs-fargate/deploy.sh b/session-recording/ecs-fargate/deploy.sh new file mode 100644 index 0000000..7e6b7a4 --- /dev/null +++ b/session-recording/ecs-fargate/deploy.sh @@ -0,0 +1,1172 @@ +#!/bin/bash + +# Britive Session Recording - AWS ECS Fargate Deployment Script +# Deploys Guacamole (guacd + web app), Britive broker, and optional GuacSync +# to AWS ECS Fargate with service discovery, EFS for recordings, and an ALB. +# +# Prerequisites: +# 1. AWS CLI installed and configured (aws configure) +# 2. Docker installed and running +# 3. jq installed +# 4. britive-broker-.jar placed in the broker/ directory +# +# Usage: +# ./deploy.sh [options] +# +# Options: +# --broker-version Broker JAR version to use (default: 2.0.0) +# --region AWS region (default: us-east-1) +# --cluster-name ECS cluster name (default: britive-session-recording) +# --enable-guacsync Enable GuacSync recording conversion and S3 sync +# --s3-bucket S3 bucket for GuacSync (required when --enable-guacsync is set) +# --acm-cert-arn ACM certificate ARN — enables HTTPS/443 on the ALB +# --vpc-id VPC ID (default: auto-detect default VPC) +# --subnets Comma-separated subnet IDs (default: auto-detect) +# --use-secrets-json Force-load configuration from secrets.json + +set -e + +#============================================================================== +# CONFIGURATION - MODIFY THESE VALUES +#============================================================================== + +# Secrets Configuration +# Option 1 (recommended): Use secrets.json — set BRITIVE_TENANT, BRITIVE_TOKEN, and +# JSON_SECRET_KEY there. All are stored in AWS Secrets Manager and auto-injected +# into tasks at runtime. Leave the values below as placeholders when using Option 1. +# +# Option 2 (direct): Set values here if not using secrets.json. +# BRITIVE_TENANT: subdomain of your Britive URL (e.g. "mycompany" for mycompany.britive-app.com) +# BRITIVE_TOKEN: broker pool token from System Administration > Broker Pools +# JSON_SECRET_KEY: secret key for Guacamole JSON auth — auto-generated if left empty +BRITIVE_TENANT="your-tenant-subdomain-here" +BRITIVE_TOKEN="your-britive-token-here" +JSON_SECRET_KEY="" # Leave empty to auto-generate a random 64-character hex key + +# Broker version — must match the JAR file in broker/ (broker/britive-broker-.jar) +BROKER_VERSION="2.0.0" + +# Secrets are stored in AWS Secrets Manager under this prefix +SECRETS_PREFIX="britive/session-recording" + +# AWS Configuration +AWS_REGION="${AWS_REGION:-us-east-1}" + +# ECS Configuration +CLUSTER_NAME="britive-session-recording" +ECR_BROKER_REPO="britive-session-recording/broker" +ECR_GUACSYNC_REPO="britive-session-recording/guacsync" +IMAGE_TAG="latest" + +# EFS — shared volume for session recordings +EFS_NAME="britive-recordings-efs" + +# Service Discovery — private DNS namespace for inter-service communication +NAMESPACE="britive.local" + +# GuacSync — optional recording conversion and S3 sync service +ENABLE_GUACSYNC=false +S3_BUCKET="" + +# ALB — optional ACM certificate ARN enables HTTPS/443; without it HTTP/80 is used +ACM_CERT_ARN="" + +# Networking — leave empty to auto-detect default VPC and subnets +VPC_ID="" +SUBNET_IDS="" # Comma-separated, e.g. "subnet-aaa,subnet-bbb" + +# Number of broker replicas +DESIRED_COUNT=1 + +#============================================================================== +# DO NOT MODIFY BELOW THIS LINE +#============================================================================== + +# Parse command-line arguments +while [[ "$#" -gt 0 ]]; do + case $1 in + --broker-version) + if [ -z "$2" ] || [[ "$2" == --* ]]; then + echo "ERROR: --broker-version requires a value" + exit 1 + fi + BROKER_VERSION="$2"; shift ;; + --region) + AWS_REGION="$2"; shift ;; + --cluster-name) + CLUSTER_NAME="$2"; shift ;; + --enable-guacsync) + ENABLE_GUACSYNC=true ;; + --s3-bucket) + S3_BUCKET="$2"; shift ;; + --acm-cert-arn) + ACM_CERT_ARN="$2"; shift ;; + --vpc-id) + VPC_ID="$2"; shift ;; + --subnets) + SUBNET_IDS="$2"; shift ;; + --use-secrets-json) + FORCE_SECRETS_FILE=true ;; + *) + echo "Unknown option: $1" + echo "Usage: ./deploy.sh [--broker-version ] [--region ] [--cluster-name ]" + echo " [--enable-guacsync] [--s3-bucket ] [--acm-cert-arn ]" + echo " [--vpc-id ] [--subnets ] [--use-secrets-json]" + exit 1 ;; + esac + shift +done + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +log_info() { echo -e "${BLUE}[INFO]${NC} $1"; } +log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; } +log_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; } +log_error() { echo -e "${RED}[ERROR]${NC} $1"; } + +# Derived names (built after CLI parsing so --cluster-name takes effect) +ECS_BROKER_SERVICE="${CLUSTER_NAME}-broker-service" +ECS_GUACD_SERVICE="${CLUSTER_NAME}-guacd-service" +ECS_GUACAMOLE_SERVICE="${CLUSTER_NAME}-guacamole-service" +ECS_GUACSYNC_SERVICE="${CLUSTER_NAME}-guacsync-service" +EXECUTION_ROLE_NAME="britive-sr-execution-role" +TASK_ROLE_NAME="britive-sr-task-role" + +# Determine whether to load from secrets.json +USE_SECRETS_FILE=false +if [ "${FORCE_SECRETS_FILE:-false}" = true ] || [ -f "secrets.json" ]; then + log_success "Found secrets.json — will use for secrets configuration" + USE_SECRETS_FILE=true +fi + +# When not using secrets.json, validate required values +if [ "$USE_SECRETS_FILE" = false ]; then + if [ "$BRITIVE_TENANT" = "your-tenant-subdomain-here" ]; then + log_error "Please set BRITIVE_TENANT in this script, or add it to secrets.json (recommended)" + exit 1 + fi + if [ "$BRITIVE_TOKEN" = "your-britive-token-here" ]; then + log_error "Please set BRITIVE_TOKEN in this script, or add it to secrets.json (recommended)" + exit 1 + fi +fi + +# Validate GuacSync options +if [ "$ENABLE_GUACSYNC" = true ] && [ -z "$S3_BUCKET" ]; then + log_error "--enable-guacsync requires --s3-bucket " + exit 1 +fi + +# Check for required broker JAR +log_info "Checking required files..." +log_info "Using broker version: $BROKER_VERSION" +if [ ! -f "broker/britive-broker-${BROKER_VERSION}.jar" ]; then + log_error "broker/britive-broker-${BROKER_VERSION}.jar not found" + log_info "Place the broker JAR in the broker/ directory and try again" + exit 1 +fi +log_success "Broker JAR found: broker/britive-broker-${BROKER_VERSION}.jar" + +# Check AWS CLI +log_info "Checking AWS CLI..." +if ! command -v aws &> /dev/null; then + log_error "AWS CLI not found. Install it:" + log_info " macOS: brew install awscli" + log_info " Linux: pip install awscli" + exit 1 +fi +if ! aws sts get-caller-identity &> /dev/null; then + log_error "AWS credentials not configured. Please run: aws configure" + exit 1 +fi +AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) +log_success "AWS CLI configured — Account: $AWS_ACCOUNT_ID, Region: $AWS_REGION" + +# Check jq +if ! command -v jq &> /dev/null; then + log_error "jq not found. Install it: brew install jq (macOS) or apt install jq (Linux)" + exit 1 +fi +log_success "jq is available" + +# Check Docker +log_info "Checking Docker..." +if ! command -v docker &> /dev/null; then + log_error "Docker not found. Please install Docker Desktop" + exit 1 +fi +if ! docker info &> /dev/null; then + log_error "Docker daemon not running. Please start Docker Desktop" + exit 1 +fi +log_success "Docker is running" + +#------------------------------------------------------------------------------ +# VPC and subnet detection +#------------------------------------------------------------------------------ + +if [ -z "$VPC_ID" ]; then + log_info "Auto-detecting default VPC..." + VPC_ID=$(aws ec2 describe-vpcs \ + --filters "Name=isDefault,Values=true" \ + --query "Vpcs[0].VpcId" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "") + if [ -z "$VPC_ID" ] || [ "$VPC_ID" = "None" ]; then + log_error "No default VPC found. Please set VPC_ID in this script or use --vpc-id." + exit 1 + fi + log_success "Using default VPC: $VPC_ID" +fi + +if [ -z "$SUBNET_IDS" ]; then + log_info "Auto-detecting subnets in VPC $VPC_ID..." + SUBNET_IDS=$(aws ec2 describe-subnets \ + --filters "Name=vpc-id,Values=$VPC_ID" \ + --query "Subnets[*].SubnetId" \ + --output text \ + --region "$AWS_REGION" | tr '\t' ',') + if [ -z "$SUBNET_IDS" ]; then + log_error "No subnets found in VPC. Please set SUBNET_IDS or use --subnets." + exit 1 + fi + log_success "Using subnets: $SUBNET_IDS" +fi + +# Build JSON subnet array for ECS network config (use first 2 subnets) +SUBNET_ARRAY=$(echo "$SUBNET_IDS" | tr ',' '\n' | head -2 | jq -R . | jq -s .) + +#------------------------------------------------------------------------------ +# Security groups +#------------------------------------------------------------------------------ + +log_info "Setting up security groups..." + +create_sg_if_missing() { + local name="$1" desc="$2" + local existing + existing=$(aws ec2 describe-security-groups \ + --filters "Name=group-name,Values=${name}" "Name=vpc-id,Values=${VPC_ID}" \ + --query "SecurityGroups[0].GroupId" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "None") + if [ "$existing" = "None" ] || [ -z "$existing" ]; then + aws ec2 create-security-group \ + --group-name "$name" \ + --description "$desc" \ + --vpc-id "$VPC_ID" \ + --query "GroupId" \ + --output text \ + --region "$AWS_REGION" + else + echo "$existing" + fi +} + +ALB_SG_ID=$(create_sg_if_missing "britive-sr-alb-sg" "Britive session recording ALB") +GUACAMOLE_SG_ID=$(create_sg_if_missing "britive-sr-guacamole-sg" "Britive session recording Guacamole") +GUACD_SG_ID=$(create_sg_if_missing "britive-sr-guacd-sg" "Britive session recording GuacD") +BROKER_SG_ID=$(create_sg_if_missing "britive-sr-broker-sg" "Britive session recording Broker") + +log_success "Security groups ready:" +log_info " ALB: $ALB_SG_ID" +log_info " Guacamole: $GUACAMOLE_SG_ID" +log_info " GuacD: $GUACD_SG_ID" +log_info " Broker: $BROKER_SG_ID" + +# Helper — adds an ingress rule only if it doesn't already exist +add_ingress_if_missing() { + local sg_id="$1" proto="$2" port="$3" source="$4" + aws ec2 authorize-security-group-ingress \ + --group-id "$sg_id" \ + --protocol "$proto" \ + --port "$port" \ + --source-group "$source" \ + --region "$AWS_REGION" 2>/dev/null || true +} + +add_ingress_cidr_if_missing() { + local sg_id="$1" proto="$2" port="$3" cidr="$4" + aws ec2 authorize-security-group-ingress \ + --group-id "$sg_id" \ + --protocol "$proto" \ + --port "$port" \ + --cidr "$cidr" \ + --region "$AWS_REGION" 2>/dev/null || true +} + +# ALB: inbound HTTP and HTTPS from the internet +add_ingress_cidr_if_missing "$ALB_SG_ID" tcp 80 "0.0.0.0/0" +add_ingress_cidr_if_missing "$ALB_SG_ID" tcp 443 "0.0.0.0/0" + +# Guacamole: inbound from ALB only +add_ingress_if_missing "$GUACAMOLE_SG_ID" tcp 8080 "$ALB_SG_ID" + +# GuacD: inbound from Guacamole only +add_ingress_if_missing "$GUACD_SG_ID" tcp 4822 "$GUACAMOLE_SG_ID" + +# Broker: inbound SSH from Guacamole (for proxied SSH sessions) +add_ingress_if_missing "$BROKER_SG_ID" tcp 22 "$GUACAMOLE_SG_ID" + +log_success "Security group rules configured" + +#------------------------------------------------------------------------------ +# ECR — build and push broker image +#------------------------------------------------------------------------------ + +log_info "Setting up ECR repository for broker..." +ECR_BROKER_URI="${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_BROKER_REPO}" + +if ! aws ecr describe-repositories --repository-names "$ECR_BROKER_REPO" --region "$AWS_REGION" &> /dev/null; then + log_info "Creating ECR repository: $ECR_BROKER_REPO" + aws ecr create-repository --repository-name "$ECR_BROKER_REPO" --region "$AWS_REGION" > /dev/null +fi +log_success "ECR broker repository ready" + +log_info "Authenticating Docker with ECR..." +aws ecr get-login-password --region "$AWS_REGION" | \ + docker login --username AWS --password-stdin "${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com" +log_success "Docker authenticated with ECR" + +log_info "Building broker Docker image (AMD64, version: $BROKER_VERSION)..." +docker build \ + --platform linux/amd64 \ + --build-arg BROKER_VERSION="$BROKER_VERSION" \ + -t "britive-sr-broker:${IMAGE_TAG}" \ + broker/ + +ARCH=$(docker inspect "britive-sr-broker:${IMAGE_TAG}" --format '{{.Architecture}}') +if [ "$ARCH" != "amd64" ]; then + log_error "Broker image architecture is $ARCH but ECS Fargate requires amd64." + exit 1 +fi +log_success "Broker image built (amd64)" + +# Quick local smoke test +log_info "Testing broker image locally..." +docker run -d --name test-sr-broker \ + -e BRITIVE_TOKEN="test" \ + -e BRITIVE_TENANT="test" \ + "britive-sr-broker:${IMAGE_TAG}" +sleep 5 +if docker ps | grep -q test-sr-broker; then + log_success "Broker container started successfully" + docker logs test-sr-broker 2>&1 | head -10 || true +else + log_warning "Broker container exited early — check image and start-broker.sh" + docker logs test-sr-broker 2>&1 || true +fi +docker stop test-sr-broker 2>/dev/null || true +docker rm test-sr-broker 2>/dev/null || true + +log_info "Pushing broker image to ECR..." +docker tag "britive-sr-broker:${IMAGE_TAG}" "${ECR_BROKER_URI}:${IMAGE_TAG}" +docker push "${ECR_BROKER_URI}:${IMAGE_TAG}" +log_success "Broker image pushed to ECR" + +# GuacSync image (optional) +ECR_GUACSYNC_URI="${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_REGION}.amazonaws.com/${ECR_GUACSYNC_REPO}" +if [ "$ENABLE_GUACSYNC" = true ]; then + log_info "Setting up ECR repository for GuacSync..." + if ! aws ecr describe-repositories --repository-names "$ECR_GUACSYNC_REPO" --region "$AWS_REGION" &> /dev/null; then + aws ecr create-repository --repository-name "$ECR_GUACSYNC_REPO" --region "$AWS_REGION" > /dev/null + fi + log_info "Building GuacSync image..." + docker build --platform linux/amd64 -t "britive-sr-guacsync:${IMAGE_TAG}" guacsync/ + docker tag "britive-sr-guacsync:${IMAGE_TAG}" "${ECR_GUACSYNC_URI}:${IMAGE_TAG}" + docker push "${ECR_GUACSYNC_URI}:${IMAGE_TAG}" + log_success "GuacSync image pushed to ECR" +fi + +#------------------------------------------------------------------------------ +# EFS — shared recording storage +#------------------------------------------------------------------------------ + +log_info "Setting up EFS filesystem for recordings..." +EFS_ID=$(aws efs describe-file-systems \ + --query "FileSystems[?Name=='${EFS_NAME}'].FileSystemId | [0]" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "None") + +if [ "$EFS_ID" = "None" ] || [ -z "$EFS_ID" ]; then + log_info "Creating EFS filesystem: $EFS_NAME" + EFS_ID=$(aws efs create-file-system \ + --encrypted \ + --tags "Key=Name,Value=${EFS_NAME}" \ + --query "FileSystemId" \ + --output text \ + --region "$AWS_REGION") + log_success "EFS created: $EFS_ID" + + # Wait for filesystem to become available + log_info "Waiting for EFS to become available..." + aws efs wait file-system-available --file-system-id "$EFS_ID" --region "$AWS_REGION" 2>/dev/null || sleep 15 +else + log_success "EFS exists: $EFS_ID" +fi + +# Create EFS access point for /recordings +EFS_AP_ID=$(aws efs describe-access-points \ + --file-system-id "$EFS_ID" \ + --query "AccessPoints[?RootDirectory.Path=='/recordings'].AccessPointId | [0]" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "None") + +if [ "$EFS_AP_ID" = "None" ] || [ -z "$EFS_AP_ID" ]; then + log_info "Creating EFS access point at /recordings..." + EFS_AP_ID=$(aws efs create-access-point \ + --file-system-id "$EFS_ID" \ + --root-directory "Path=/recordings,CreationInfo={OwnerUid=0,OwnerGid=0,Permissions=755}" \ + --query "AccessPointId" \ + --output text \ + --region "$AWS_REGION") + log_success "EFS access point created: $EFS_AP_ID" +else + log_success "EFS access point exists: $EFS_AP_ID" +fi + +# Create EFS mount targets in each subnet (idempotent) +log_info "Creating EFS mount targets in subnets..." +for subnet in $(echo "$SUBNET_IDS" | tr ',' '\n' | head -2); do + aws efs create-mount-target \ + --file-system-id "$EFS_ID" \ + --subnet-id "$subnet" \ + --security-groups "$GUACD_SG_ID" \ + --region "$AWS_REGION" 2>/dev/null || true +done +log_success "EFS mount targets ready" + +#------------------------------------------------------------------------------ +# CloudWatch log groups +#------------------------------------------------------------------------------ + +log_info "Setting up CloudWatch log groups..." +for svc in broker guacd guacamole guacsync; do + aws logs create-log-group \ + --log-group-name "/ecs/britive-session-recording/${svc}" \ + --region "$AWS_REGION" 2>/dev/null || true +done +log_success "CloudWatch log groups ready" + +#------------------------------------------------------------------------------ +# IAM roles +#------------------------------------------------------------------------------ + +log_info "Setting up IAM roles..." + +TRUST_POLICY='{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Principal": { "Service": "ecs-tasks.amazonaws.com" }, + "Action": "sts:AssumeRole" + }] +}' + +# Execution role +if ! aws iam get-role --role-name "$EXECUTION_ROLE_NAME" &> /dev/null; then + log_info "Creating ECS task execution role: $EXECUTION_ROLE_NAME" + echo "$TRUST_POLICY" > /tmp/sr-trust-policy.json + aws iam create-role \ + --role-name "$EXECUTION_ROLE_NAME" \ + --assume-role-policy-document file:///tmp/sr-trust-policy.json > /dev/null + aws iam attach-role-policy \ + --role-name "$EXECUTION_ROLE_NAME" \ + --policy-arn "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" + rm -f /tmp/sr-trust-policy.json + log_success "Execution role created" +else + log_success "Execution role exists" +fi +EXECUTION_ROLE_ARN="arn:aws:iam::${AWS_ACCOUNT_ID}:role/${EXECUTION_ROLE_NAME}" + +# Task role +if ! aws iam get-role --role-name "$TASK_ROLE_NAME" &> /dev/null; then + log_info "Creating ECS task role: $TASK_ROLE_NAME" + echo "$TRUST_POLICY" > /tmp/sr-trust-policy.json + aws iam create-role \ + --role-name "$TASK_ROLE_NAME" \ + --assume-role-policy-document file:///tmp/sr-trust-policy.json > /dev/null + rm -f /tmp/sr-trust-policy.json + log_success "Task role created" +else + log_success "Task role exists" +fi +TASK_ROLE_ARN="arn:aws:iam::${AWS_ACCOUNT_ID}:role/${TASK_ROLE_NAME}" + +# Attach S3 policy to task role if GuacSync is enabled +if [ "$ENABLE_GUACSYNC" = true ]; then + log_info "Adding S3 permissions to task role for GuacSync..." + cat > /tmp/sr-s3-policy.json << EOF +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": ["s3:PutObject", "s3:GetObject", "s3:ListBucket"], + "Resource": [ + "arn:aws:s3:::${S3_BUCKET}", + "arn:aws:s3:::${S3_BUCKET}/*" + ] + }] +} +EOF + aws iam put-role-policy \ + --role-name "$TASK_ROLE_NAME" \ + --policy-name "britive-sr-s3-access" \ + --policy-document file:///tmp/sr-s3-policy.json + rm -f /tmp/sr-s3-policy.json + log_success "S3 permissions attached to task role" +fi + +# ECS service-linked role (required once per AWS account) +aws iam create-service-linked-role --aws-service-name ecs.amazonaws.com 2>/dev/null || true + +#------------------------------------------------------------------------------ +# Secrets Manager +#------------------------------------------------------------------------------ + +log_info "Setting up Secrets Manager secrets..." + +create_or_update_secret() { + local key="$1" value="$2" desc="$3" + local full_name="${SECRETS_PREFIX}/${key}" + if aws secretsmanager describe-secret --secret-id "$full_name" --region "$AWS_REGION" &> /dev/null; then + aws secretsmanager update-secret \ + --secret-id "$full_name" \ + --secret-string "$value" \ + --region "$AWS_REGION" > /dev/null + else + aws secretsmanager create-secret \ + --name "$full_name" \ + --description "$desc" \ + --secret-string "$value" \ + --region "$AWS_REGION" > /dev/null + fi + log_success "Secret configured: $key" +} + +# Auto-generate JSON_SECRET_KEY if not provided +if [ -z "$JSON_SECRET_KEY" ]; then + if [ "$USE_SECRETS_FILE" = true ]; then + JSON_SECRET_KEY=$(jq -r '.secrets.JSON_SECRET_KEY.value // ""' secrets.json 2>/dev/null || echo "") + fi + if [ -z "$JSON_SECRET_KEY" ]; then + log_info "Generating JSON_SECRET_KEY (not found in secrets.json)..." + JSON_SECRET_KEY=$(openssl rand -hex 32) + log_success "JSON_SECRET_KEY generated (64-char hex)" + fi +fi + +if [ "$USE_SECRETS_FILE" = true ]; then + log_info "Processing secrets from secrets.json..." + SECRETS_JSON=$(cat secrets.json) + + echo "$SECRETS_JSON" | jq -r '.secrets | to_entries[] | select(.value.value != "" and .value.value != "your-britive-token-here" and .value.value != "your-tenant-subdomain-here") | @json' | while read -r entry; do + key=$(echo "$entry" | jq -r '.key') + value=$(echo "$entry" | jq -r '.value.value') + desc=$(echo "$entry" | jq -r '.value.description // "Britive session recording secret"') + [ -n "$value" ] && create_or_update_secret "$key" "$value" "$desc" + done + + echo "$SECRETS_JSON" | jq -r '.custom_secrets | to_entries[] | select(.value != "" and .value != null) | @json' | while read -r entry; do + key=$(echo "$entry" | jq -r '.key') + value=$(echo "$entry" | jq -r '.value') + [ -n "$value" ] && create_or_update_secret "$key" "$value" "Custom secret" + done +else + [ "$BRITIVE_TENANT" != "your-tenant-subdomain-here" ] && \ + create_or_update_secret "BRITIVE_TENANT" "$BRITIVE_TENANT" "Britive tenant subdomain" + [ "$BRITIVE_TOKEN" != "your-britive-token-here" ] && \ + create_or_update_secret "BRITIVE_TOKEN" "$BRITIVE_TOKEN" "Britive broker pool token" +fi + +# Always ensure JSON_SECRET_KEY is in Secrets Manager +create_or_update_secret "JSON_SECRET_KEY" "$JSON_SECRET_KEY" "Guacamole JSON authentication secret key" + +# Collect all secret ARNs for IAM policy and task definitions +ALL_SECRET_ARNS=$(aws secretsmanager list-secrets \ + --filter Key=name,Values="${SECRETS_PREFIX}" \ + --query "SecretList[*].ARN" \ + --output text \ + --region "$AWS_REGION" | tr '\t' '\n') + +if [ -z "$ALL_SECRET_ARNS" ]; then + log_error "No secrets found after setup. Check Secrets Manager and try again." + exit 1 +fi + +ARN_ARRAY=$(echo "$ALL_SECRET_ARNS" | jq -R . | jq -s .) +SECRET_COUNT=$(echo "$ALL_SECRET_ARNS" | grep -c . || echo 0) +log_success "Total secrets configured: $SECRET_COUNT" + +# Grant execution role access to all secrets +log_info "Updating IAM execution role with Secrets Manager permissions..." +cat > /tmp/sr-secrets-policy.json << EOF +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": ["secretsmanager:GetSecretValue"], + "Resource": ${ARN_ARRAY} + }] +} +EOF +aws iam put-role-policy \ + --role-name "$EXECUTION_ROLE_NAME" \ + --policy-name "britive-sr-secrets-access" \ + --policy-document file:///tmp/sr-secrets-policy.json 2>/dev/null || true +rm -f /tmp/sr-secrets-policy.json +log_success "IAM permissions updated" + +# Build secrets array for ECS task definitions +# Each secret in Secrets Manager becomes an env var inside the container +build_secrets_array() { + local filter_prefix="${1:-}" # optional: only include secrets with this key prefix + local arr="[" + local first=true + for arn in $ALL_SECRET_ARNS; do + full_name=$(aws secretsmanager describe-secret \ + --secret-id "$arn" \ + --query "Name" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null) + key_name="${full_name#${SECRETS_PREFIX}/}" + # Skip secrets that don't match the optional prefix filter + if [ -n "$filter_prefix" ] && [[ "$key_name" != ${filter_prefix}* ]]; then + continue + fi + [ "$first" = true ] && first=false || arr+="," + arr+="{\"name\":\"${key_name}\",\"valueFrom\":\"${arn}\"}" + done + arr+="]" + echo "$arr" +} + +ALL_SECRETS_ARRAY=$(build_secrets_array) +JSON_KEY_ARN=$(aws secretsmanager describe-secret \ + --secret-id "${SECRETS_PREFIX}/JSON_SECRET_KEY" \ + --query "ARN" --output text --region "$AWS_REGION" 2>/dev/null || echo "") +GUACAMOLE_SECRETS_ARRAY="[{\"name\":\"JSON_SECRET_KEY\",\"valueFrom\":\"${JSON_KEY_ARN}\"}]" + +#------------------------------------------------------------------------------ +# Cloud Map — service discovery +#------------------------------------------------------------------------------ + +log_info "Setting up Cloud Map service discovery (namespace: ${NAMESPACE})..." + +NAMESPACE_ID=$(aws servicediscovery list-namespaces \ + --query "Namespaces[?Name=='${NAMESPACE}'].Id | [0]" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "None") + +if [ "$NAMESPACE_ID" = "None" ] || [ -z "$NAMESPACE_ID" ]; then + log_info "Creating private DNS namespace: $NAMESPACE" + NAMESPACE_OP=$(aws servicediscovery create-private-dns-namespace \ + --name "$NAMESPACE" \ + --vpc "$VPC_ID" \ + --region "$AWS_REGION" \ + --output json) + OP_ID=$(echo "$NAMESPACE_OP" | jq -r '.OperationId') + # Wait for namespace creation + for i in {1..20}; do + STATUS=$(aws servicediscovery get-operation --operation-id "$OP_ID" \ + --query "Operation.Status" --output text --region "$AWS_REGION" 2>/dev/null) + [ "$STATUS" = "SUCCESS" ] && break + sleep 5 + done + NAMESPACE_ID=$(aws servicediscovery list-namespaces \ + --query "Namespaces[?Name=='${NAMESPACE}'].Id | [0]" \ + --output text --region "$AWS_REGION") + log_success "Namespace created: $NAMESPACE_ID" +else + log_success "Namespace exists: $NAMESPACE_ID" +fi + +create_sd_service_if_missing() { + local name="$1" + local existing + existing=$(aws servicediscovery list-services \ + --filters "Name=NAMESPACE_ID,Values=${NAMESPACE_ID},Condition=EQ" \ + --query "Services[?Name=='${name}'].Id | [0]" \ + --output text --region "$AWS_REGION" 2>/dev/null || echo "None") + if [ "$existing" = "None" ] || [ -z "$existing" ]; then + aws servicediscovery create-service \ + --name "$name" \ + --dns-config "NamespaceId=${NAMESPACE_ID},DnsRecords=[{Type=A,TTL=60}]" \ + --health-check-custom-config "FailureThreshold=1" \ + --query "Service.Id" \ + --output text \ + --region "$AWS_REGION" + else + echo "$existing" + fi +} + +GUACD_SD_ID=$(create_sd_service_if_missing "guacd") +BROKER_SD_ID=$(create_sd_service_if_missing "broker") +log_success "Service discovery ready (guacd.${NAMESPACE}, broker.${NAMESPACE})" + +#------------------------------------------------------------------------------ +# ALB — Application Load Balancer for Guacamole +#------------------------------------------------------------------------------ + +log_info "Setting up Application Load Balancer..." + +ALB_NAME="${CLUSTER_NAME}-alb" +ALB_ARN=$(aws elbv2 describe-load-balancers \ + --names "$ALB_NAME" \ + --query "LoadBalancers[0].LoadBalancerArn" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "None") + +if [ "$ALB_ARN" = "None" ] || [ -z "$ALB_ARN" ]; then + log_info "Creating ALB: $ALB_NAME" + SUBNET_LIST=$(echo "$SUBNET_IDS" | tr ',' ' ') + ALB_ARN=$(aws elbv2 create-load-balancer \ + --name "$ALB_NAME" \ + --subnets $SUBNET_LIST \ + --security-groups "$ALB_SG_ID" \ + --scheme internet-facing \ + --type application \ + --query "LoadBalancers[0].LoadBalancerArn" \ + --output text \ + --region "$AWS_REGION") + log_success "ALB created: $ALB_ARN" +else + log_success "ALB exists: $ALB_ARN" +fi + +ALB_DNS=$(aws elbv2 describe-load-balancers \ + --load-balancer-arns "$ALB_ARN" \ + --query "LoadBalancers[0].DNSName" \ + --output text \ + --region "$AWS_REGION") + +# Target group for Guacamole +TG_NAME="${CLUSTER_NAME}-guacamole-tg" +TG_ARN=$(aws elbv2 describe-target-groups \ + --names "$TG_NAME" \ + --query "TargetGroups[0].TargetGroupArn" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "None") + +if [ "$TG_ARN" = "None" ] || [ -z "$TG_ARN" ]; then + log_info "Creating target group: $TG_NAME" + TG_ARN=$(aws elbv2 create-target-group \ + --name "$TG_NAME" \ + --protocol HTTP \ + --port 8080 \ + --vpc-id "$VPC_ID" \ + --target-type ip \ + --health-check-path "/guacamole/" \ + --health-check-interval-seconds 30 \ + --healthy-threshold-count 2 \ + --query "TargetGroups[0].TargetGroupArn" \ + --output text \ + --region "$AWS_REGION") + log_success "Target group created" +else + log_success "Target group exists" +fi + +# ALB listener (HTTP/80 or HTTPS/443 depending on ACM cert) +LISTENER_ARN=$(aws elbv2 describe-listeners \ + --load-balancer-arn "$ALB_ARN" \ + --query "Listeners[0].ListenerArn" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "None") + +if [ "$LISTENER_ARN" = "None" ] || [ -z "$LISTENER_ARN" ]; then + if [ -n "$ACM_CERT_ARN" ]; then + log_info "Creating HTTPS listener on port 443..." + LISTENER_ARN=$(aws elbv2 create-listener \ + --load-balancer-arn "$ALB_ARN" \ + --protocol HTTPS \ + --port 443 \ + --certificates "CertificateArn=${ACM_CERT_ARN}" \ + --default-actions "Type=forward,TargetGroupArn=${TG_ARN}" \ + --query "Listeners[0].ListenerArn" \ + --output text \ + --region "$AWS_REGION") + # Redirect HTTP → HTTPS + aws elbv2 create-listener \ + --load-balancer-arn "$ALB_ARN" \ + --protocol HTTP \ + --port 80 \ + --default-actions "Type=redirect,RedirectConfig={Protocol=HTTPS,Port=443,StatusCode=HTTP_301}" \ + --region "$AWS_REGION" > /dev/null 2>/dev/null || true + log_success "HTTPS listener created (HTTP→HTTPS redirect enabled)" + else + log_info "Creating HTTP listener on port 80..." + LISTENER_ARN=$(aws elbv2 create-listener \ + --load-balancer-arn "$ALB_ARN" \ + --protocol HTTP \ + --port 80 \ + --default-actions "Type=forward,TargetGroupArn=${TG_ARN}" \ + --query "Listeners[0].ListenerArn" \ + --output text \ + --region "$AWS_REGION") + log_success "HTTP listener created" + log_warning "No ACM certificate specified — traffic is unencrypted. Pass --acm-cert-arn to enable HTTPS." + fi +else + log_success "ALB listener exists" +fi + +#------------------------------------------------------------------------------ +# ECS cluster +#------------------------------------------------------------------------------ + +log_info "Setting up ECS cluster: $CLUSTER_NAME" +if ! aws ecs describe-clusters \ + --clusters "$CLUSTER_NAME" \ + --query "clusters[?status=='ACTIVE'].clusterName" \ + --output text \ + --region "$AWS_REGION" | grep -q "$CLUSTER_NAME"; then + aws ecs create-cluster --cluster-name "$CLUSTER_NAME" --region "$AWS_REGION" > /dev/null + log_success "ECS cluster created" +else + log_success "ECS cluster exists" +fi + +#------------------------------------------------------------------------------ +# Task definitions +#------------------------------------------------------------------------------ + +log_info "Registering ECS task definitions..." + +EFS_VOLUME_CONFIG=$(cat << EOF +{ + "name": "recordings", + "efsVolumeConfiguration": { + "fileSystemId": "${EFS_ID}", + "transitEncryption": "ENABLED", + "authorizationConfig": { + "accessPointId": "${EFS_AP_ID}", + "iam": "DISABLED" + } + } +} +EOF +) + +# guacd task definition +GUACD_TASK_DEF=$(cat << EOF +{ + "family": "britive-sr-guacd", + "executionRoleArn": "${EXECUTION_ROLE_ARN}", + "taskRoleArn": "${TASK_ROLE_ARN}", + "networkMode": "awsvpc", + "requiresCompatibilities": ["FARGATE"], + "cpu": "512", + "memory": "1024", + "volumes": [${EFS_VOLUME_CONFIG}], + "containerDefinitions": [{ + "name": "guacd", + "image": "guacamole/guacd:1.5.5", + "essential": true, + "portMappings": [{"containerPort": 4822, "protocol": "tcp"}], + "mountPoints": [{"sourceVolume": "recordings", "containerPath": "/recordings"}], + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-group": "/ecs/britive-session-recording/guacd", + "awslogs-region": "${AWS_REGION}", + "awslogs-stream-prefix": "guacd" + } + } + }], + "runtimePlatform": { + "cpuArchitecture": "X86_64", + "operatingSystemFamily": "LINUX" + } +} +EOF +) + +GUACD_TASK_ARN=$(aws ecs register-task-definition \ + --cli-input-json "$GUACD_TASK_DEF" \ + --query "taskDefinition.taskDefinitionArn" \ + --output text \ + --region "$AWS_REGION") +log_success "guacd task definition registered" + +# guacamole task definition +GUACAMOLE_TASK_DEF=$(cat << EOF +{ + "family": "britive-sr-guacamole", + "executionRoleArn": "${EXECUTION_ROLE_ARN}", + "taskRoleArn": "${TASK_ROLE_ARN}", + "networkMode": "awsvpc", + "requiresCompatibilities": ["FARGATE"], + "cpu": "512", + "memory": "1024", + "volumes": [${EFS_VOLUME_CONFIG}], + "containerDefinitions": [{ + "name": "guacamole", + "image": "guacamole/guacamole:1.5.5", + "essential": true, + "portMappings": [{"containerPort": 8080, "protocol": "tcp"}], + "mountPoints": [{"sourceVolume": "recordings", "containerPath": "/recordings"}], + "environment": [ + {"name": "GUACD_HOSTNAME", "value": "guacd.${NAMESPACE}"}, + {"name": "GUACD_PORT", "value": "4822"}, + {"name": "GUACAMOLE_HOME", "value": "/etc/guacamole"} + ], + "secrets": ${GUACAMOLE_SECRETS_ARRAY}, + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-group": "/ecs/britive-session-recording/guacamole", + "awslogs-region": "${AWS_REGION}", + "awslogs-stream-prefix": "guacamole" + } + } + }], + "runtimePlatform": { + "cpuArchitecture": "X86_64", + "operatingSystemFamily": "LINUX" + } +} +EOF +) + +GUACAMOLE_TASK_ARN=$(aws ecs register-task-definition \ + --cli-input-json "$GUACAMOLE_TASK_DEF" \ + --query "taskDefinition.taskDefinitionArn" \ + --output text \ + --region "$AWS_REGION") +log_success "guacamole task definition registered" + +# broker task definition +BROKER_TASK_DEF=$(cat << EOF +{ + "family": "britive-sr-broker", + "executionRoleArn": "${EXECUTION_ROLE_ARN}", + "taskRoleArn": "${TASK_ROLE_ARN}", + "networkMode": "awsvpc", + "requiresCompatibilities": ["FARGATE"], + "cpu": "512", + "memory": "1024", + "volumes": [${EFS_VOLUME_CONFIG}], + "containerDefinitions": [{ + "name": "britive-broker", + "image": "${ECR_BROKER_URI}:${IMAGE_TAG}", + "essential": true, + "portMappings": [{"containerPort": 22, "protocol": "tcp"}], + "mountPoints": [{"sourceVolume": "recordings", "containerPath": "/recordings"}], + "secrets": ${ALL_SECRETS_ARRAY}, + "healthCheck": { + "command": ["CMD-SHELL", "pgrep -f britive-broker > /dev/null || exit 1"], + "interval": 30, + "timeout": 10, + "retries": 3, + "startPeriod": 60 + }, + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-group": "/ecs/britive-session-recording/broker", + "awslogs-region": "${AWS_REGION}", + "awslogs-stream-prefix": "broker" + } + } + }], + "runtimePlatform": { + "cpuArchitecture": "X86_64", + "operatingSystemFamily": "LINUX" + } +} +EOF +) + +BROKER_TASK_ARN=$(aws ecs register-task-definition \ + --cli-input-json "$BROKER_TASK_DEF" \ + --query "taskDefinition.taskDefinitionArn" \ + --output text \ + --region "$AWS_REGION") +log_success "broker task definition registered" + +# GuacSync task definition (optional) +if [ "$ENABLE_GUACSYNC" = true ]; then + GUACSYNC_TASK_DEF=$(cat << EOF +{ + "family": "britive-sr-guacsync", + "executionRoleArn": "${EXECUTION_ROLE_ARN}", + "taskRoleArn": "${TASK_ROLE_ARN}", + "networkMode": "awsvpc", + "requiresCompatibilities": ["FARGATE"], + "cpu": "1024", + "memory": "2048", + "volumes": [${EFS_VOLUME_CONFIG}], + "containerDefinitions": [{ + "name": "guacsync", + "image": "${ECR_GUACSYNC_URI}:${IMAGE_TAG}", + "essential": true, + "mountPoints": [{"sourceVolume": "recordings", "containerPath": "/recordings"}], + "environment": [ + {"name": "REC_DIR", "value": "/recordings"}, + {"name": "BUCKET", "value": "${S3_BUCKET}"}, + {"name": "AUTOCONVERT", "value": "true"}, + {"name": "AUTOCONVERT_WAIT", "value": "30"}, + {"name": "PARALLEL", "value": "true"}, + {"name": "CONCURRENT_LIMIT", "value": "4"} + ], + "logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-group": "/ecs/britive-session-recording/guacsync", + "awslogs-region": "${AWS_REGION}", + "awslogs-stream-prefix": "guacsync" + } + } + }], + "runtimePlatform": { + "cpuArchitecture": "X86_64", + "operatingSystemFamily": "LINUX" + } +} +EOF +) + GUACSYNC_TASK_ARN=$(aws ecs register-task-definition \ + --cli-input-json "$GUACSYNC_TASK_DEF" \ + --query "taskDefinition.taskDefinitionArn" \ + --output text \ + --region "$AWS_REGION") + log_success "guacsync task definition registered" +fi + +#------------------------------------------------------------------------------ +# ECS services +#------------------------------------------------------------------------------ + +log_info "Deploying ECS services..." + +deploy_service() { + local service_name="$1" + local task_arn="$2" + local sg_id="$3" + local desired="${4:-1}" + local extra_args="${5:-}" + + local existing + existing=$(aws ecs describe-services \ + --cluster "$CLUSTER_NAME" \ + --services "$service_name" \ + --query "services[?status=='ACTIVE'].serviceName" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "") + + if [ -n "$existing" ]; then + log_info "Updating service: $service_name" + aws ecs update-service \ + --cluster "$CLUSTER_NAME" \ + --service "$service_name" \ + --task-definition "$task_arn" \ + --desired-count "$desired" \ + --force-new-deployment \ + --region "$AWS_REGION" > /dev/null + else + log_info "Creating service: $service_name" + # shellcheck disable=SC2086 + aws ecs create-service \ + --cluster "$CLUSTER_NAME" \ + --service-name "$service_name" \ + --task-definition "$task_arn" \ + --desired-count "$desired" \ + --launch-type FARGATE \ + --network-configuration "awsvpcConfiguration={subnets=${SUBNET_ARRAY},securityGroups=[\"${sg_id}\"],assignPublicIp=ENABLED}" \ + --region "$AWS_REGION" \ + $extra_args > /dev/null + fi + log_success "Service deployed: $service_name" +} + +# guacd — service discovery registration +deploy_service "$ECS_GUACD_SERVICE" "$GUACD_TASK_ARN" "$GUACD_SG_ID" 1 \ + "--service-registries registryArn=${GUACD_SD_ID}" + +# broker — service discovery registration +deploy_service "$ECS_BROKER_SERVICE" "$BROKER_TASK_ARN" "$BROKER_SG_ID" "$DESIRED_COUNT" \ + "--service-registries registryArn=${BROKER_SD_ID}" + +# guacamole — attached to ALB target group +GUACAMOLE_LB_CONFIG="loadBalancers=[{targetGroupArn=${TG_ARN},containerName=guacamole,containerPort=8080}]" +deploy_service "$ECS_GUACAMOLE_SERVICE" "$GUACAMOLE_TASK_ARN" "$GUACAMOLE_SG_ID" 1 \ + "--load-balancers ${GUACAMOLE_LB_CONFIG}" + +# guacsync (optional) +if [ "$ENABLE_GUACSYNC" = true ]; then + deploy_service "$ECS_GUACSYNC_SERVICE" "$GUACSYNC_TASK_ARN" "$GUACD_SG_ID" 1 +fi + +#------------------------------------------------------------------------------ +# Wait for stabilization +#------------------------------------------------------------------------------ + +log_info "Waiting for services to stabilize (this may take a few minutes)..." + +SERVICES_TO_WATCH=("$ECS_GUACD_SERVICE" "$ECS_BROKER_SERVICE" "$ECS_GUACAMOLE_SERVICE") +[ "$ENABLE_GUACSYNC" = true ] && SERVICES_TO_WATCH+=("$ECS_GUACSYNC_SERVICE") + +aws ecs wait services-stable \ + --cluster "$CLUSTER_NAME" \ + --services "${SERVICES_TO_WATCH[@]}" \ + --region "$AWS_REGION" || \ + log_warning "Some services may still be stabilizing. Check the AWS console." + +log_success "Deployment complete!" + +#------------------------------------------------------------------------------ +# Status +#------------------------------------------------------------------------------ + +GUACAMOLE_PROTO="http" +GUACAMOLE_PORT_LABEL="80" +if [ -n "$ACM_CERT_ARN" ]; then + GUACAMOLE_PROTO="https" + GUACAMOLE_PORT_LABEL="443" +fi + +echo "" +echo "============================================================" +echo " DEPLOYMENT STATUS" +echo "============================================================" +echo "" +echo "Cluster: $CLUSTER_NAME" +echo "Region: $AWS_REGION" +echo "Broker version: $BROKER_VERSION" +echo "" +echo "Services:" +for svc in "${SERVICES_TO_WATCH[@]}"; do + echo " - $svc" +done +echo "" +echo "Guacamole URL: ${GUACAMOLE_PROTO}://${ALB_DNS}/guacamole/" +echo "" +echo "EFS Filesystem: $EFS_ID" +echo "Secrets prefix: $SECRETS_PREFIX" +echo "" +echo "============================================================" +echo "" +log_info "View logs:" +log_info " aws logs tail /ecs/britive-session-recording/broker --follow --region $AWS_REGION" +log_info " aws logs tail /ecs/britive-session-recording/guacd --follow --region $AWS_REGION" +log_info " aws logs tail /ecs/britive-session-recording/guacamole --follow --region $AWS_REGION" +echo "" +log_info "Check service status:" +log_info " aws ecs describe-services --cluster $CLUSTER_NAME --services ${ECS_BROKER_SERVICE} --region $AWS_REGION" +echo "" +log_info "Manage secrets:" +log_info " ./manage-secrets.sh list" +log_info " ./manage-secrets.sh restart-tasks" +echo "" +log_info "AWS Console:" +log_info " https://${AWS_REGION}.console.aws.amazon.com/ecs/home?region=${AWS_REGION}#/clusters/${CLUSTER_NAME}" +echo "" +log_success "Britive Session Recording deployed to ECS Fargate!" diff --git a/session-recording/ecs-fargate/guacsync/Dockerfile b/session-recording/ecs-fargate/guacsync/Dockerfile new file mode 100644 index 0000000..199c7ce --- /dev/null +++ b/session-recording/ecs-fargate/guacsync/Dockerfile @@ -0,0 +1,73 @@ +# GuacSync - Guacamole recording conversion and S3 sync service +# Stage 1: Build guacenc from guacamole-server source +FROM ubuntu:latest AS guacserver + +ENV LD_LIBRARY_PATH=/usr/local/lib + +RUN apt-get update && apt-get install -y \ + make \ + libcairo2-dev \ + libjpeg-turbo8-dev \ + libpng-dev \ + libtool-bin \ + libossp-uuid-dev \ + libavcodec-dev \ + libavformat-dev \ + libavutil-dev \ + libswscale-dev \ + wget \ + ffmpeg \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /opt + +RUN wget -O guacamole-server-1.5.5.tar.gz \ + "https://apache.org/dyn/closer.lua/guacamole/1.5.5/source/guacamole-server-1.5.5.tar.gz?action=download" +RUN tar -xzf guacamole-server-1.5.5.tar.gz + +WORKDIR /opt/guacamole-server-1.5.5 + +RUN ./configure +RUN make +RUN make install + +# Stage 2: Runtime image with guacenc binary and AWS CLI +FROM ubuntu:latest + +ARG USERNAME=guacamole +ARG USER_UID=1001 +ARG USER_GID=$USER_UID + +RUN apt-get update && \ + apt-get install -y wget unzip libcairo2-dev libossp-uuid-dev libwebp-dev ffmpeg curl \ + && rm -rf /var/lib/apt/lists/* + +COPY --from=guacserver /usr/local/bin/guacenc /usr/local/bin/guacenc +COPY --from=guacserver /usr/local/lib/lib* /usr/local/lib/ + +ENV LD_LIBRARY_PATH=/usr/local/lib + +WORKDIR /opt/ + +# Install AWS CLI v2 (platform-aware) +RUN ARCH=$(dpkg --print-architecture) && \ + if [ "$ARCH" = "arm64" ]; then \ + AWS_CLI_URL="https://awscli.amazonaws.com/awscli-exe-linux-aarch64.zip"; \ + else \ + AWS_CLI_URL="https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip"; \ + fi && \ + wget -O awscliv2.zip "$AWS_CLI_URL" && \ + unzip awscliv2.zip && \ + ./aws/install && \ + rm -rf awscliv2.zip aws + +COPY ./guacsync.sh /usr/bin/guacsync +RUN chmod +x /usr/bin/guacsync + +RUN useradd -m $USERNAME + +WORKDIR /home/$USERNAME + +USER $USERNAME + +CMD ["/usr/bin/guacsync"] diff --git a/session-recording/ecs-fargate/guacsync/guacsync.sh b/session-recording/ecs-fargate/guacsync/guacsync.sh new file mode 100644 index 0000000..4177a7d --- /dev/null +++ b/session-recording/ecs-fargate/guacsync/guacsync.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +setup() { + DIR=${REC_DIR:-$HOME/recordings} + UNCONVERTED=() + while IFS= read -r -d '' file; do + if [ ! -f "${file}.m4v" ]; then + UNCONVERTED+=("$file") + fi + done < <(find "$DIR" -type f ! -name "*.m4v" -print0) +} + +convert() { + count=0 + for FILE in "${UNCONVERTED[@]}"; do + SIZE=$(awk -F';' '/size/ {gsub("[[:digit:]].size,[[:digit:]].[[:digit:]],?[[:digit:]].","",$1); gsub(",[[:digit:]].","x",$1); print $1}' "${FILE}") + if [[ "${SIZE}" == "0x0" ]]; then SIZE="1280x720"; fi + + if [[ "${PARALLEL}" == "true" ]]; then + /usr/local/bin/guacenc -s "${SIZE}" "${FILE}" & + + count=$((count + 1)) + if (( count % CONCURRENT_LIMIT == 0 )); then + wait + fi + else + /usr/local/bin/guacenc -s "${SIZE}" "${FILE}" + fi + done + wait +} + +s3sync () { + aws s3 sync ${DIR}/ s3://${BUCKET}/ --exclude "*" --include "*.m4v" +} + +main() { + CONCURRENT_LIMIT=${CONCURRENT_LIMIT:-4} + if [[ "${AUTOCONVERT}" == "true" ]]; then + setup "$@" + convert + s3sync + fi + sleep ${AUTOCONVERT_WAIT:-15} + main "$@" +} + +main "$@" diff --git a/session-recording/ecs-fargate/manage-secrets.sh b/session-recording/ecs-fargate/manage-secrets.sh new file mode 100644 index 0000000..82b05b3 --- /dev/null +++ b/session-recording/ecs-fargate/manage-secrets.sh @@ -0,0 +1,440 @@ +#!/bin/bash + +# Britive Session Recording - Secrets Management Script +# Manages secrets in AWS Secrets Manager for the ECS Fargate deployment +# +# Usage: +# ./manage-secrets.sh [options] +# +# Commands: +# list List all secrets in the britive/session-recording namespace +# get Get a secret value +# set Create or update a secret +# delete Delete a secret +# sync [file] Sync secrets from secrets.json to AWS Secrets Manager +# export Export current secrets list (values hidden) +# restart-tasks Force restart ECS tasks to pick up new secrets +# update-iam Update IAM permissions for all secrets + +set -e + +#============================================================================== +# CONFIGURATION +#============================================================================== + +AWS_REGION="${AWS_REGION:-us-east-1}" +SECRETS_PREFIX="britive/session-recording" +ECS_CLUSTER_NAME="${ECS_CLUSTER_NAME:-britive-session-recording}" + +# Service names — restart-tasks acts on all of them +ECS_SERVICES=( + "${ECS_BROKER_SERVICE:-britive-sr-broker-service}" + "${ECS_GUACD_SERVICE:-britive-sr-guacd-service}" + "${ECS_GUACAMOLE_SERVICE:-britive-sr-guacamole-service}" +) + +#============================================================================== +# Colors for output +#============================================================================== + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +#============================================================================== +# Functions +#============================================================================== + +check_prerequisites() { + if ! command -v aws &> /dev/null; then + log_error "AWS CLI not found. Please install it first." + exit 1 + fi + + if ! aws sts get-caller-identity &> /dev/null; then + log_error "AWS credentials not configured. Please run: aws configure" + exit 1 + fi + + if ! command -v jq &> /dev/null; then + log_error "jq not found. Please install it: brew install jq (macOS) or apt install jq (Linux)" + exit 1 + fi +} + +list_secrets() { + log_info "Listing secrets in AWS Secrets Manager (prefix: ${SECRETS_PREFIX})..." + echo "" + + SECRETS=$(aws secretsmanager list-secrets \ + --filter Key=name,Values="${SECRETS_PREFIX}" \ + --query "SecretList[*].[Name,Description,LastChangedDate]" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "") + + if [ -z "$SECRETS" ]; then + log_warning "No secrets found with prefix: ${SECRETS_PREFIX}" + echo "" + echo "To create secrets, use:" + echo " ./manage-secrets.sh set " + echo " ./manage-secrets.sh sync (to sync from secrets.json)" + return + fi + + echo "----------------------------------------------------------------------" + printf "%-40s %-25s %s\n" "SECRET NAME" "LAST MODIFIED" "DESCRIPTION" + echo "----------------------------------------------------------------------" + + echo "$SECRETS" | while IFS=$'\t' read -r name desc modified; do + short_name="${name#${SECRETS_PREFIX}/}" + if [ "$modified" != "None" ] && [ -n "$modified" ]; then + mod_date=$(echo "$modified" | cut -d'T' -f1) + else + mod_date="N/A" + fi + printf "%-40s %-25s %s\n" "$short_name" "$mod_date" "${desc:-N/A}" + done + echo "----------------------------------------------------------------------" +} + +get_secret() { + local secret_name="$1" + + if [ -z "$secret_name" ]; then + log_error "Usage: ./manage-secrets.sh get " + exit 1 + fi + + local full_secret_name="${SECRETS_PREFIX}/${secret_name}" + + log_info "Retrieving secret: $secret_name" + + SECRET_VALUE=$(aws secretsmanager get-secret-value \ + --secret-id "$full_secret_name" \ + --query "SecretString" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null) + + if [ $? -ne 0 ] || [ -z "$SECRET_VALUE" ]; then + log_error "Secret not found: $secret_name" + exit 1 + fi + + echo "" + echo "Secret: $secret_name" + echo "Value: $SECRET_VALUE" + echo "" +} + +set_secret() { + local secret_name="$1" + local secret_value="$2" + local description="${3:-Britive session recording secret}" + + if [ -z "$secret_name" ] || [ -z "$secret_value" ]; then + log_error "Usage: ./manage-secrets.sh set [description]" + exit 1 + fi + + local full_secret_name="${SECRETS_PREFIX}/${secret_name}" + + if aws secretsmanager describe-secret --secret-id "$full_secret_name" --region "$AWS_REGION" &> /dev/null; then + log_info "Updating existing secret: $secret_name" + aws secretsmanager update-secret \ + --secret-id "$full_secret_name" \ + --secret-string "$secret_value" \ + --region "$AWS_REGION" > /dev/null + log_success "Secret updated: $secret_name" + else + log_info "Creating new secret: $secret_name" + aws secretsmanager create-secret \ + --name "$full_secret_name" \ + --description "$description" \ + --secret-string "$secret_value" \ + --region "$AWS_REGION" > /dev/null + log_success "Secret created: $secret_name" + fi + + echo "" + log_warning "ECS tasks need to be restarted to pick up the new secret value." + echo "Run: ./manage-secrets.sh restart-tasks" +} + +delete_secret() { + local secret_name="$1" + + if [ -z "$secret_name" ]; then + log_error "Usage: ./manage-secrets.sh delete " + exit 1 + fi + + local full_secret_name="${SECRETS_PREFIX}/${secret_name}" + + log_warning "This will permanently delete secret: $secret_name" + read -p "Are you sure? (y/N): " confirm + + if [ "$confirm" != "y" ] && [ "$confirm" != "Y" ]; then + log_info "Cancelled." + exit 0 + fi + + aws secretsmanager delete-secret \ + --secret-id "$full_secret_name" \ + --force-delete-without-recovery \ + --region "$AWS_REGION" > /dev/null + + log_success "Secret deleted: $secret_name" +} + +sync_secrets() { + local secrets_file="${1:-secrets.json}" + + if [ ! -f "$secrets_file" ]; then + log_error "Secrets file not found: $secrets_file" + log_info "Create a secrets.json file or copy secrets.json.example as a starting point." + exit 1 + fi + + log_info "Syncing secrets from: $secrets_file" + echo "" + + # Process main secrets + SECRETS=$(jq -r '.secrets | to_entries[] | select(.value.value != "" and .value.value != "your-britive-token-here" and .value.value != "your-tenant-subdomain-here") | @json' "$secrets_file" 2>/dev/null) + + if [ -n "$SECRETS" ]; then + echo "$SECRETS" | while read -r entry; do + key=$(echo "$entry" | jq -r '.key') + value=$(echo "$entry" | jq -r '.value.value') + desc=$(echo "$entry" | jq -r '.value.description // "Britive session recording secret"') + + if [ -n "$value" ]; then + log_info "Syncing secret: $key" + set_secret "$key" "$value" "$desc" 2>/dev/null || true + fi + done + fi + + # Process custom secrets + CUSTOM_SECRETS=$(jq -r '.custom_secrets | to_entries[] | select(.value != "" and .value != null) | @json' "$secrets_file" 2>/dev/null) + + if [ -n "$CUSTOM_SECRETS" ]; then + echo "$CUSTOM_SECRETS" | while read -r entry; do + key=$(echo "$entry" | jq -r '.key') + value=$(echo "$entry" | jq -r '.value') + + if [ -n "$value" ]; then + log_info "Syncing custom secret: $key" + set_secret "$key" "$value" "Custom secret for Britive session recording" 2>/dev/null || true + fi + done + fi + + echo "" + log_success "Secrets synced to AWS Secrets Manager" + echo "" + log_info "Next steps:" + echo " 1. Run ./deploy.sh to update IAM permissions and task definitions" + echo " 2. Or run ./manage-secrets.sh restart-tasks to restart with existing config" +} + +export_secrets() { + log_info "Exporting secrets from AWS Secrets Manager (values hidden)..." + echo "" + + SECRETS=$(aws secretsmanager list-secrets \ + --filter Key=name,Values="${SECRETS_PREFIX}" \ + --query "SecretList[*].Name" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "") + + echo "{" + echo ' "secrets": {' + + if [ -n "$SECRETS" ]; then + first=true + for secret_name in $SECRETS; do + short_name="${secret_name#${SECRETS_PREFIX}/}" + + if [ "$first" = true ]; then + first=false + else + echo "," + fi + + echo -n " \"${short_name}\": \"***HIDDEN***\"" + done + fi + + echo "" + echo ' }' + echo "}" + + echo "" + log_warning "Secret values are hidden. Use 'get ' to retrieve individual values." +} + +restart_tasks() { + log_info "Forcing ECS services to restart tasks (picks up new secret values)..." + + local any_restarted=false + + for service in "${ECS_SERVICES[@]}"; do + SERVICE_STATUS=$(aws ecs describe-services \ + --cluster "$ECS_CLUSTER_NAME" \ + --services "$service" \ + --query "services[0].status" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null || echo "") + + if [ "$SERVICE_STATUS" = "ACTIVE" ]; then + aws ecs update-service \ + --cluster "$ECS_CLUSTER_NAME" \ + --service "$service" \ + --force-new-deployment \ + --region "$AWS_REGION" > /dev/null + log_success "Restart triggered for: $service" + any_restarted=true + else + log_warning "Service not found or not active — skipping: $service" + fi + done + + if [ "$any_restarted" = true ]; then + echo "" + log_info "New tasks will be launched with the latest secret values." + log_info "Monitor progress:" + echo " aws ecs describe-services --cluster $ECS_CLUSTER_NAME --services britive-sr-broker-service --region $AWS_REGION" + fi +} + +update_iam_permissions() { + log_info "Updating IAM execution role with permissions for all current secrets..." + + EXECUTION_ROLE_NAME="britive-sr-execution-role" + + SECRET_ARNS=$(aws secretsmanager list-secrets \ + --filter Key=name,Values="${SECRETS_PREFIX}" \ + --query "SecretList[*].ARN" \ + --output text \ + --region "$AWS_REGION" 2>/dev/null) + + if [ -z "$SECRET_ARNS" ]; then + log_warning "No secrets found to configure permissions for." + return + fi + + ARN_ARRAY=$(echo "$SECRET_ARNS" | tr '\t' '\n' | jq -R . | jq -s .) + + cat > /tmp/sr-secrets-policy.json << EOF +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue" + ], + "Resource": ${ARN_ARRAY} + } + ] +} +EOF + + aws iam put-role-policy \ + --role-name "$EXECUTION_ROLE_NAME" \ + --policy-name "britive-sr-secrets-access" \ + --policy-document file:///tmp/sr-secrets-policy.json 2>/dev/null || true + + rm -f /tmp/sr-secrets-policy.json + + SECRET_COUNT=$(echo "$SECRET_ARNS" | tr '\t' '\n' | grep -c . || echo 0) + log_success "IAM permissions updated for ${SECRET_COUNT} secrets" +} + +show_help() { + echo "Britive Session Recording - Secrets Management" + echo "" + echo "Usage: ./manage-secrets.sh [options]" + echo "" + echo "Commands:" + echo " list List all secrets in AWS Secrets Manager" + echo " get Get a secret value" + echo " set [desc] Create or update a secret" + echo " delete Delete a secret" + echo " sync [file] Sync secrets from secrets.json (or specified file)" + echo " export Export secrets list (values hidden)" + echo " restart-tasks Restart all ECS tasks to pick up new secrets" + echo " update-iam Update IAM permissions for all secrets" + echo "" + echo "Environment Variables:" + echo " AWS_REGION AWS region (default: us-east-1)" + echo " ECS_CLUSTER_NAME ECS cluster name (default: britive-session-recording)" + echo " ECS_BROKER_SERVICE Broker service name (default: britive-sr-broker-service)" + echo " ECS_GUACD_SERVICE GuacD service name (default: britive-sr-guacd-service)" + echo " ECS_GUACAMOLE_SERVICE Guacamole service name (default: britive-sr-guacamole-service)" + echo "" + echo "Examples:" + echo " ./manage-secrets.sh list" + echo " ./manage-secrets.sh set MY_API_KEY 'secret-value' 'Description'" + echo " ./manage-secrets.sh get BRITIVE_TOKEN" + echo " ./manage-secrets.sh sync" + echo " ./manage-secrets.sh restart-tasks" +} + +#============================================================================== +# Main +#============================================================================== + +check_prerequisites + +case "${1:-}" in + list) + list_secrets + ;; + get) + get_secret "$2" + ;; + set) + set_secret "$2" "$3" "$4" + ;; + delete) + delete_secret "$2" + ;; + sync) + sync_secrets "$2" + ;; + export) + export_secrets + ;; + restart-tasks|restart) + restart_tasks + ;; + update-iam) + update_iam_permissions + ;; + help|--help|-h) + show_help + ;; + *) + show_help + exit 1 + ;; +esac