diff --git a/.github/instructions/ARCHITECTURE.instructions.md b/.github/instructions/ARCHITECTURE.instructions.md index b8ddf926c..82c2a95c6 100644 --- a/.github/instructions/ARCHITECTURE.instructions.md +++ b/.github/instructions/ARCHITECTURE.instructions.md @@ -126,7 +126,7 @@ graph TB | **HTTP Framework** | Gin | Latest | Routing, middleware, HTTP handling | | **Database** | SQLite | 3.x | Embedded database | | **ORM** | GORM | Latest | Database abstraction layer | -| **Reverse Proxy** | Caddy Server | 2.11.0-beta.2 | Embedded HTTP/HTTPS proxy | +| **Reverse Proxy** | Caddy Server | 2.11.1 | Embedded HTTP/HTTPS proxy | | **WebSocket** | gorilla/websocket | Latest | Real-time log streaming | | **Crypto** | golang.org/x/crypto | Latest | Password hashing, encryption | | **Metrics** | Prometheus Client | Latest | Application metrics | diff --git a/.github/renovate.json b/.github/renovate.json index ff5961f83..2ad2fa197 100644 --- a/.github/renovate.json +++ b/.github/renovate.json @@ -36,6 +36,19 @@ "platformAutomerge": true, "customManagers": [ + { + "customType": "regex", + "description": "Track caddy-security plugin version in Dockerfile", + "managerFilePatterns": [ + "/^Dockerfile$/" + ], + "matchStrings": [ + "ARG CADDY_SECURITY_VERSION=(?[^\\s]+)" + ], + "depNameTemplate": "github.com/greenpau/caddy-security", + "datasourceTemplate": "go", + "versioningTemplate": "semver" + }, { "customType": "regex", "description": "Track Go dependencies patched in Dockerfile for Caddy CVE fixes", diff --git a/.github/workflows/codecov-upload.yml b/.github/workflows/codecov-upload.yml index 7eb29ca9f..309e88cd4 100644 --- a/.github/workflows/codecov-upload.yml +++ b/.github/workflows/codecov-upload.yml @@ -154,7 +154,7 @@ jobs: ref: ${{ github.sha }} - name: Set up Node.js - uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6 + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 with: node-version: ${{ env.NODE_VERSION }} cache: 'npm' diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index edd872cc4..1b0418030 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -39,7 +39,12 @@ jobs: - name: Checkout repository uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: - ref: ${{ github.sha }} + # Use github.ref (full ref path) instead of github.ref_name: + # - push/schedule: resolves to refs/heads/, checking out latest HEAD + # - pull_request: resolves to refs/pull//merge, the correct PR merge ref + # github.ref_name fails for PRs because it yields "/merge" which checkout + # interprets as a branch name (refs/heads//merge) that does not exist. + ref: ${{ github.ref }} - name: Verify CodeQL parity guard if: matrix.language == 'go' diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index f29c126ec..d51177550 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -115,7 +115,7 @@ jobs: - name: Set up QEMU if: steps.skip.outputs.skip_build != 'true' - uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 + uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4.0.0 - name: Set up Docker Buildx if: steps.skip.outputs.skip_build != 'true' uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 @@ -129,7 +129,7 @@ jobs: - name: Log in to GitHub Container Registry if: steps.skip.outputs.skip_build != 'true' - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: ${{ env.GHCR_REGISTRY }} username: ${{ github.actor }} @@ -137,7 +137,7 @@ jobs: - name: Log in to Docker Hub if: steps.skip.outputs.skip_build != 'true' && env.HAS_DOCKERHUB_TOKEN == 'true' - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: docker.io username: ${{ secrets.DOCKERHUB_USERNAME }} @@ -657,7 +657,7 @@ jobs: echo "image_ref=${{ env.GHCR_REGISTRY }}/${{ env.IMAGE_NAME }}:${PR_TAG}" >> "$GITHUB_OUTPUT" - name: Log in to GitHub Container Registry - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: ${{ env.GHCR_REGISTRY }} username: ${{ github.actor }} diff --git a/.github/workflows/docs-to-issues.yml b/.github/workflows/docs-to-issues.yml index 5d7e1fb7a..1f45dd183 100644 --- a/.github/workflows/docs-to-issues.yml +++ b/.github/workflows/docs-to-issues.yml @@ -44,7 +44,7 @@ jobs: ref: ${{ github.event.workflow_run.head_sha || github.sha }} - name: Set up Node.js - uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6 + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 with: node-version: ${{ env.NODE_VERSION }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 738c4a0ba..eda97942b 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -38,7 +38,7 @@ jobs: # Step 2: Set up Node.js (for building any JS-based doc tools) - name: 🔧 Set up Node.js - uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6 + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 with: node-version: ${{ env.NODE_VERSION }} diff --git a/.github/workflows/e2e-tests-split.yml b/.github/workflows/e2e-tests-split.yml index 73eee00bf..827a3fc96 100644 --- a/.github/workflows/e2e-tests-split.yml +++ b/.github/workflows/e2e-tests-split.yml @@ -150,7 +150,7 @@ jobs: - name: Set up Node.js if: steps.resolve-image.outputs.image_source == 'build' - uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6 + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 with: node-version: ${{ env.NODE_VERSION }} cache: 'npm' @@ -224,7 +224,7 @@ jobs: ref: ${{ github.sha }} - name: Set up Node.js - uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6 + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 with: node-version: ${{ env.NODE_VERSION }} cache: 'npm' @@ -232,7 +232,7 @@ jobs: - name: Log in to Docker Hub if: needs.build.outputs.image_source == 'registry' - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: ${{ env.DOCKERHUB_REGISTRY }} username: ${{ secrets.DOCKERHUB_USERNAME }} @@ -426,7 +426,7 @@ jobs: ref: ${{ github.sha }} - name: Set up Node.js - uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6 + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 with: node-version: ${{ env.NODE_VERSION }} cache: 'npm' @@ -434,7 +434,7 @@ jobs: - name: Log in to Docker Hub if: needs.build.outputs.image_source == 'registry' - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: ${{ env.DOCKERHUB_REGISTRY }} username: ${{ secrets.DOCKERHUB_USERNAME }} @@ -636,7 +636,7 @@ jobs: ref: ${{ github.sha }} - name: Set up Node.js - uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6 + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 with: node-version: ${{ env.NODE_VERSION }} cache: 'npm' @@ -644,7 +644,7 @@ jobs: - name: Log in to Docker Hub if: needs.build.outputs.image_source == 'registry' - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: ${{ env.DOCKERHUB_REGISTRY }} username: ${{ secrets.DOCKERHUB_USERNAME }} @@ -858,7 +858,7 @@ jobs: ref: ${{ github.sha }} - name: Set up Node.js - uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6 + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 with: node-version: ${{ env.NODE_VERSION }} cache: 'npm' @@ -898,7 +898,7 @@ jobs: - name: Log in to Docker Hub if: needs.build.outputs.image_source == 'registry' - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: ${{ env.DOCKERHUB_REGISTRY }} username: ${{ secrets.DOCKERHUB_USERNAME }} @@ -1095,7 +1095,7 @@ jobs: ref: ${{ github.sha }} - name: Set up Node.js - uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6 + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 with: node-version: ${{ env.NODE_VERSION }} cache: 'npm' @@ -1135,7 +1135,7 @@ jobs: - name: Log in to Docker Hub if: needs.build.outputs.image_source == 'registry' - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: ${{ env.DOCKERHUB_REGISTRY }} username: ${{ secrets.DOCKERHUB_USERNAME }} @@ -1340,7 +1340,7 @@ jobs: ref: ${{ github.sha }} - name: Set up Node.js - uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6 + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 with: node-version: ${{ env.NODE_VERSION }} cache: 'npm' @@ -1380,7 +1380,7 @@ jobs: - name: Log in to Docker Hub if: needs.build.outputs.image_source == 'registry' - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: ${{ env.DOCKERHUB_REGISTRY }} username: ${{ secrets.DOCKERHUB_USERNAME }} diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml index e5c48e773..8930d3812 100644 --- a/.github/workflows/nightly-build.yml +++ b/.github/workflows/nightly-build.yml @@ -162,13 +162,13 @@ jobs: run: echo "IMAGE_NAME_LC=${IMAGE_NAME,,}" >> "$GITHUB_ENV" - name: Set up QEMU - uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 + uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4.0.0 - name: Set up Docker Buildx uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 - name: Log in to GitHub Container Registry - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: ${{ env.GHCR_REGISTRY }} username: ${{ github.actor }} @@ -176,7 +176,7 @@ jobs: - name: Log in to Docker Hub if: env.HAS_DOCKERHUB_TOKEN == 'true' - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: docker.io username: ${{ secrets.DOCKERHUB_USERNAME }} @@ -330,7 +330,7 @@ jobs: run: echo "IMAGE_NAME_LC=${IMAGE_NAME,,}" >> "$GITHUB_ENV" - name: Log in to GitHub Container Registry - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: ${{ env.GHCR_REGISTRY }} username: ${{ github.actor }} diff --git a/.github/workflows/propagate-changes.yml b/.github/workflows/propagate-changes.yml index 97c832d0f..1182429e5 100644 --- a/.github/workflows/propagate-changes.yml +++ b/.github/workflows/propagate-changes.yml @@ -28,7 +28,7 @@ jobs: (github.event.workflow_run.head_branch == 'main' || github.event.workflow_run.head_branch == 'development') steps: - name: Set up Node (for github-script) - uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6 + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 with: node-version: ${{ env.NODE_VERSION }} diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 93da4681a..8dd5f1a9a 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -249,7 +249,7 @@ jobs: bash "scripts/repo_health_check.sh" - name: Set up Node.js - uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0 + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0 with: node-version: ${{ env.NODE_VERSION }} cache: 'npm' diff --git a/.github/workflows/release-goreleaser.yml b/.github/workflows/release-goreleaser.yml index c79a0eb1b..2c8994d3f 100644 --- a/.github/workflows/release-goreleaser.yml +++ b/.github/workflows/release-goreleaser.yml @@ -51,7 +51,7 @@ jobs: cache-dependency-path: backend/go.sum - name: Set up Node.js - uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6 + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 with: node-version: ${{ env.NODE_VERSION }} diff --git a/.github/workflows/security-pr.yml b/.github/workflows/security-pr.yml index aae49d3a4..d9edb5c0f 100644 --- a/.github/workflows/security-pr.yml +++ b/.github/workflows/security-pr.yml @@ -362,7 +362,7 @@ jobs: - name: Run Trivy filesystem scan (SARIF output) if: steps.check-artifact.outputs.artifact_exists == 'true' || github.event_name == 'push' || github.event_name == 'pull_request' # aquasecurity/trivy-action v0.33.1 - uses: aquasecurity/trivy-action@97e0b3872f55f89b95b2f65b3dbab56962816478 + uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 with: scan-type: 'fs' scan-ref: ${{ steps.extract.outputs.binary_path }} @@ -385,7 +385,7 @@ jobs: - name: Upload Trivy SARIF to GitHub Security if: always() && steps.trivy-sarif-check.outputs.exists == 'true' # github/codeql-action v4 - uses: github/codeql-action/upload-sarif@b895512248b1b5b0089ac3c33ecf123c2cd6f373 + uses: github/codeql-action/upload-sarif@a5b959e10d29aec4f277040b4d27d0f6bea2322a with: sarif_file: 'trivy-binary-results.sarif' category: ${{ steps.pr-info.outputs.is_push == 'true' && format('security-scan-{0}', github.event_name == 'workflow_run' && github.event.workflow_run.head_branch || github.ref_name) || format('security-scan-pr-{0}', steps.pr-info.outputs.pr_number) }} @@ -394,7 +394,7 @@ jobs: - name: Run Trivy filesystem scan (fail on CRITICAL/HIGH) if: steps.check-artifact.outputs.artifact_exists == 'true' || github.event_name == 'push' || github.event_name == 'pull_request' # aquasecurity/trivy-action v0.33.1 - uses: aquasecurity/trivy-action@97e0b3872f55f89b95b2f65b3dbab56962816478 + uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 with: scan-type: 'fs' scan-ref: ${{ steps.extract.outputs.binary_path }} diff --git a/.github/workflows/security-weekly-rebuild.yml b/.github/workflows/security-weekly-rebuild.yml index 1039e6500..e2d1c9c94 100644 --- a/.github/workflows/security-weekly-rebuild.yml +++ b/.github/workflows/security-weekly-rebuild.yml @@ -36,13 +36,18 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + # Explicitly fetch the current HEAD of the ref at run time, not the + # SHA that was frozen when this scheduled job was queued. Without this, + # a queued job can run days later with stale code. + ref: ${{ github.ref_name }} - name: Normalize image name run: | echo "IMAGE_NAME=$(echo "${{ env.IMAGE_NAME }}" | tr '[:upper:]' '[:lower:]')" >> "$GITHUB_ENV" - name: Set up QEMU - uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 + uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4.0.0 - name: Set up Docker Buildx uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0 @@ -56,7 +61,7 @@ jobs: echo "Base image digest: $DIGEST" - name: Log in to Container Registry - uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0 + uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} diff --git a/Dockerfile b/Dockerfile index f26ed1e9b..9fa6ea56a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,6 +19,8 @@ ARG CADDY_VERSION=2.11.1 ARG CADDY_CANDIDATE_VERSION=2.11.1 ARG CADDY_USE_CANDIDATE=0 ARG CADDY_PATCH_SCENARIO=B +# renovate: datasource=go depName=github.com/greenpau/caddy-security +ARG CADDY_SECURITY_VERSION=1.1.36 ## When an official caddy image tag isn't available on the host, use a ## plain Alpine base image and overwrite its caddy binary with our ## xcaddy-built binary in the later COPY step. This avoids relying on @@ -134,7 +136,7 @@ RUN set -eux; \ # Note: xx-go install puts binaries in /go/bin/TARGETOS_TARGETARCH/dlv if cross-compiling. # We find it and move it to /go/bin/dlv so it's in a consistent location for the next stage. # renovate: datasource=go depName=github.com/go-delve/delve -ARG DLV_VERSION=1.26.0 +ARG DLV_VERSION=1.26.1 # hadolint ignore=DL3059,DL4006 RUN CGO_ENABLED=0 xx-go install github.com/go-delve/delve/cmd/dlv@v${DLV_VERSION} && \ DLV_PATH=$(find /go/bin -name dlv -type f | head -n 1) && \ @@ -202,6 +204,7 @@ ARG CADDY_VERSION ARG CADDY_CANDIDATE_VERSION ARG CADDY_USE_CANDIDATE ARG CADDY_PATCH_SCENARIO +ARG CADDY_SECURITY_VERSION # renovate: datasource=go depName=github.com/caddyserver/xcaddy ARG XCADDY_VERSION=0.4.5 @@ -229,7 +232,8 @@ RUN --mount=type=cache,target=/root/.cache/go-build \ echo "Stage 1: Generate go.mod with xcaddy..."; \ # Run xcaddy to generate the build directory and go.mod GOOS=$TARGETOS GOARCH=$TARGETARCH xcaddy build v${CADDY_TARGET_VERSION} \ - --with github.com/greenpau/caddy-security \ + --with github.com/caddyserver/caddy/v2@v${CADDY_TARGET_VERSION} \ + --with github.com/greenpau/caddy-security@v${CADDY_SECURITY_VERSION} \ --with github.com/corazawaf/coraza-caddy/v2 \ --with github.com/hslatman/caddy-crowdsec-bouncer@v0.10.0 \ --with github.com/zhangjiayin/caddy-geoip2 \ diff --git a/backend/cmd/api/main_test.go b/backend/cmd/api/main_test.go index d260b552b..78d3978c3 100644 --- a/backend/cmd/api/main_test.go +++ b/backend/cmd/api/main_test.go @@ -40,7 +40,7 @@ func TestResetPasswordCommand_Succeeds(t *testing.T) { } email := "user@example.com" - user := models.User{UUID: "u-1", Email: email, Name: "User", Role: "admin", Enabled: true} + user := models.User{UUID: "u-1", Email: email, Name: "User", Role: models.RoleAdmin, Enabled: true} user.PasswordHash = "$2a$10$example_hashed_password" if err = db.Create(&user).Error; err != nil { t.Fatalf("seed user: %v", err) @@ -257,7 +257,7 @@ func TestMain_ResetPasswordCommand_InProcess(t *testing.T) { } email := "user@example.com" - user := models.User{UUID: "u-1", Email: email, Name: "User", Role: "admin", Enabled: true} + user := models.User{UUID: "u-1", Email: email, Name: "User", Role: models.RoleAdmin, Enabled: true} user.PasswordHash = "$2a$10$example_hashed_password" user.FailedLoginAttempts = 3 if err = db.Create(&user).Error; err != nil { diff --git a/backend/cmd/seed/seed_smoke_test.go b/backend/cmd/seed/seed_smoke_test.go index c47f5a9af..54e12953e 100644 --- a/backend/cmd/seed/seed_smoke_test.go +++ b/backend/cmd/seed/seed_smoke_test.go @@ -72,7 +72,7 @@ func TestSeedMain_ForceAdminUpdatesExistingUserPassword(t *testing.T) { UUID: "existing-user", Email: "admin@localhost", Name: "Old Name", - Role: "viewer", + Role: models.RolePassthrough, Enabled: false, PasswordHash: "$2a$10$example_hashed_password", } @@ -134,7 +134,7 @@ func TestSeedMain_ForceAdminWithoutPasswordUpdatesMetadata(t *testing.T) { UUID: "existing-user-no-pass", Email: "admin@localhost", Name: "Old Name", - Role: "viewer", + Role: models.RolePassthrough, Enabled: false, PasswordHash: "$2a$10$example_hashed_password", } diff --git a/backend/go.mod b/backend/go.mod index 4b15720ab..eac8277a8 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -84,7 +84,7 @@ require ( github.com/ugorji/go/codec v1.3.1 // indirect go.mongodb.org/mongo-driver/v2 v2.5.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.66.0 // indirect go.opentelemetry.io/otel v1.41.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect go.opentelemetry.io/otel/metric v1.41.0 // indirect diff --git a/backend/go.sum b/backend/go.sum index 049feed90..77ccb1ef1 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -176,8 +176,8 @@ go.mongodb.org/mongo-driver/v2 v2.5.0 h1:yXUhImUjjAInNcpTcAlPHiT7bIXhshCTL3jVBkF go.mongodb.org/mongo-driver/v2 v2.5.0/go.mod h1:yOI9kBsufol30iFsl1slpdq1I0eHPzybRWdyYUs8K/0= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.66.0 h1:PnV4kVnw0zOmwwFkAzCN5O07fw1YOIQor120zrh0AVo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.66.0/go.mod h1:ofAwF4uinaf8SXdVzzbL4OsxJ3VfeEg3f/F6CeF49/Y= go.opentelemetry.io/otel v1.41.0 h1:YlEwVsGAlCvczDILpUXpIpPSL/VPugt7zHThEMLce1c= go.opentelemetry.io/otel v1.41.0/go.mod h1:Yt4UwgEKeT05QbLwbyHXEwhnjxNO6D8L5PQP51/46dE= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= @@ -186,10 +186,10 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= go.opentelemetry.io/otel/metric v1.41.0 h1:rFnDcs4gRzBcsO9tS8LCpgR0dxg4aaxWlJxCno7JlTQ= go.opentelemetry.io/otel/metric v1.41.0/go.mod h1:xPvCwd9pU0VN8tPZYzDZV/BMj9CM9vs00GuBjeKhJps= -go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= -go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= -go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= -go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/sdk v1.41.0 h1:YPIEXKmiAwkGl3Gu1huk1aYWwtpRLeskpV+wPisxBp8= +go.opentelemetry.io/otel/sdk v1.41.0/go.mod h1:ahFdU0G5y8IxglBf0QBJXgSe7agzjE4GiTJ6HT9ud90= +go.opentelemetry.io/otel/sdk/metric v1.41.0 h1:siZQIYBAUd1rlIWQT2uCxWJxcCO7q3TriaMlf08rXw8= +go.opentelemetry.io/otel/sdk/metric v1.41.0/go.mod h1:HNBuSvT7ROaGtGI50ArdRLUnvRTRGniSUZbxiWxSO8Y= go.opentelemetry.io/otel/trace v1.41.0 h1:Vbk2co6bhj8L59ZJ6/xFTskY+tGAbOnCtQGVVa9TIN0= go.opentelemetry.io/otel/trace v1.41.0/go.mod h1:U1NU4ULCoxeDKc09yCWdWe+3QoyweJcISEVa1RBzOis= go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= diff --git a/backend/internal/api/handlers/auth_handler.go b/backend/internal/api/handlers/auth_handler.go index 8d6c86e05..72decb75e 100644 --- a/backend/internal/api/handlers/auth_handler.go +++ b/backend/internal/api/handlers/auth_handler.go @@ -381,7 +381,7 @@ func (h *AuthHandler) Verify(c *gin.Context) { // Set headers for downstream services c.Header("X-Forwarded-User", user.Email) - c.Header("X-Forwarded-Groups", user.Role) + c.Header("X-Forwarded-Groups", string(user.Role)) c.Header("X-Forwarded-Name", user.Name) // Return 200 OK - access granted diff --git a/backend/internal/api/handlers/auth_handler_test.go b/backend/internal/api/handlers/auth_handler_test.go index 72f73c886..101df3214 100644 --- a/backend/internal/api/handlers/auth_handler_test.go +++ b/backend/internal/api/handlers/auth_handler_test.go @@ -430,7 +430,7 @@ func TestAuthHandler_Me(t *testing.T) { UUID: uuid.NewString(), Email: "me@example.com", Name: "Me User", - Role: "admin", + Role: models.RoleAdmin, } db.Create(user) @@ -630,7 +630,7 @@ func TestAuthHandler_Verify_ValidToken(t *testing.T) { UUID: uuid.NewString(), Email: "test@example.com", Name: "Test User", - Role: "user", + Role: models.RoleUser, Enabled: true, } _ = user.SetPassword("password123") @@ -661,7 +661,7 @@ func TestAuthHandler_Verify_BearerToken(t *testing.T) { UUID: uuid.NewString(), Email: "bearer@example.com", Name: "Bearer User", - Role: "admin", + Role: models.RoleAdmin, Enabled: true, } _ = user.SetPassword("password123") @@ -690,7 +690,7 @@ func TestAuthHandler_Verify_DisabledUser(t *testing.T) { UUID: uuid.NewString(), Email: "disabled@example.com", Name: "Disabled User", - Role: "user", + Role: models.RoleUser, } _ = user.SetPassword("password123") db.Create(user) @@ -730,7 +730,7 @@ func TestAuthHandler_Verify_ForwardAuthDenied(t *testing.T) { UUID: uuid.NewString(), Email: "denied@example.com", Name: "Denied User", - Role: "user", + Role: models.RoleUser, Enabled: true, PermissionMode: models.PermissionModeDenyAll, } @@ -795,7 +795,7 @@ func TestAuthHandler_VerifyStatus_Authenticated(t *testing.T) { UUID: uuid.NewString(), Email: "status@example.com", Name: "Status User", - Role: "user", + Role: models.RoleUser, Enabled: true, } _ = user.SetPassword("password123") @@ -828,7 +828,7 @@ func TestAuthHandler_VerifyStatus_DisabledUser(t *testing.T) { UUID: uuid.NewString(), Email: "disabled2@example.com", Name: "Disabled User 2", - Role: "user", + Role: models.RoleUser, } _ = user.SetPassword("password123") db.Create(user) @@ -880,7 +880,7 @@ func TestAuthHandler_GetAccessibleHosts_AllowAll(t *testing.T) { UUID: uuid.NewString(), Email: "allowall@example.com", Name: "Allow All User", - Role: "user", + Role: models.RoleUser, Enabled: true, PermissionMode: models.PermissionModeAllowAll, } @@ -917,7 +917,7 @@ func TestAuthHandler_GetAccessibleHosts_DenyAll(t *testing.T) { UUID: uuid.NewString(), Email: "denyall@example.com", Name: "Deny All User", - Role: "user", + Role: models.RoleUser, Enabled: true, PermissionMode: models.PermissionModeDenyAll, } @@ -956,7 +956,7 @@ func TestAuthHandler_GetAccessibleHosts_PermittedHosts(t *testing.T) { UUID: uuid.NewString(), Email: "permitted@example.com", Name: "Permitted User", - Role: "user", + Role: models.RoleUser, Enabled: true, PermissionMode: models.PermissionModeDenyAll, PermittedHosts: []models.ProxyHost{*host1}, // Only host1 @@ -1111,7 +1111,7 @@ func TestAuthHandler_Logout_InvalidatesBearerSession(t *testing.T) { UUID: uuid.NewString(), Email: "logout-session@example.com", Name: "Logout Session", - Role: "admin", + Role: models.RoleAdmin, Enabled: true, } _ = user.SetPassword("password123") @@ -1242,7 +1242,7 @@ func TestAuthHandler_Refresh(t *testing.T) { handler, db := setupAuthHandler(t) - user := &models.User{UUID: uuid.NewString(), Email: "refresh@example.com", Name: "Refresh User", Role: "user", Enabled: true} + user := &models.User{UUID: uuid.NewString(), Email: "refresh@example.com", Name: "Refresh User", Role: models.RoleUser, Enabled: true} require.NoError(t, user.SetPassword("password123")) require.NoError(t, db.Create(user).Error) @@ -1332,7 +1332,7 @@ func TestAuthHandler_Verify_UsesOriginalHostFallback(t *testing.T) { UUID: uuid.NewString(), Email: "originalhost@example.com", Name: "Original Host User", - Role: "user", + Role: models.RoleUser, Enabled: true, PermissionMode: models.PermissionModeAllowAll, } diff --git a/backend/internal/api/handlers/emergency_handler.go b/backend/internal/api/handlers/emergency_handler.go index 55ea772ef..e2110bcd8 100644 --- a/backend/internal/api/handlers/emergency_handler.go +++ b/backend/internal/api/handlers/emergency_handler.go @@ -384,10 +384,7 @@ func (h *EmergencyHandler) syncSecurityState(ctx context.Context) { // POST /api/v1/emergency/token/generate // Requires admin authentication func (h *EmergencyHandler) GenerateToken(c *gin.Context) { - // Check admin role - role, exists := c.Get("role") - if !exists || role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } @@ -437,10 +434,7 @@ func (h *EmergencyHandler) GenerateToken(c *gin.Context) { // GET /api/v1/emergency/token/status // Requires admin authentication func (h *EmergencyHandler) GetTokenStatus(c *gin.Context) { - // Check admin role - role, exists := c.Get("role") - if !exists || role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } @@ -458,10 +452,7 @@ func (h *EmergencyHandler) GetTokenStatus(c *gin.Context) { // DELETE /api/v1/emergency/token // Requires admin authentication func (h *EmergencyHandler) RevokeToken(c *gin.Context) { - // Check admin role - role, exists := c.Get("role") - if !exists || role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } @@ -485,10 +476,7 @@ func (h *EmergencyHandler) RevokeToken(c *gin.Context) { // PATCH /api/v1/emergency/token/expiration // Requires admin authentication func (h *EmergencyHandler) UpdateTokenExpiration(c *gin.Context) { - // Check admin role - role, exists := c.Get("role") - if !exists || role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } diff --git a/backend/internal/api/handlers/permission_helpers.go b/backend/internal/api/handlers/permission_helpers.go index e2a06716f..2d20f3706 100644 --- a/backend/internal/api/handlers/permission_helpers.go +++ b/backend/internal/api/handlers/permission_helpers.go @@ -36,9 +36,7 @@ func requireAuthenticatedAdmin(c *gin.Context) bool { } func isAdmin(c *gin.Context) bool { - role, _ := c.Get("role") - roleStr, _ := role.(string) - return roleStr == "admin" + return c.GetString("role") == string(models.RoleAdmin) } func respondPermissionError(c *gin.Context, securityService *services.SecurityService, action string, err error, path string) bool { diff --git a/backend/internal/api/handlers/security_event_intake_test.go b/backend/internal/api/handlers/security_event_intake_test.go index 37afce161..febf286c7 100644 --- a/backend/internal/api/handlers/security_event_intake_test.go +++ b/backend/internal/api/handlers/security_event_intake_test.go @@ -307,7 +307,7 @@ func TestSecurityEventIntakeR6Intact(t *testing.T) { Email: "admin@example.com", Name: "Admin User", PasswordHash: "$2a$10$abcdefghijklmnopqrstuvwxyz", // Dummy bcrypt hash - Role: "admin", + Role: models.RoleAdmin, Enabled: true, } require.NoError(t, db.Create(adminUser).Error) diff --git a/backend/internal/api/handlers/security_handler.go b/backend/internal/api/handlers/security_handler.go index 4468d4b2f..20dafef9e 100644 --- a/backend/internal/api/handlers/security_handler.go +++ b/backend/internal/api/handlers/security_handler.go @@ -1075,10 +1075,7 @@ func (h *SecurityHandler) PatchRateLimit(c *gin.Context) { // toggleSecurityModule is a helper function that handles enabling/disabling security modules // It updates the setting, invalidates cache, and triggers Caddy config reload func (h *SecurityHandler) toggleSecurityModule(c *gin.Context, settingKey string, enabled bool) { - // Check admin role - role, exists := c.Get("role") - if !exists || role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } diff --git a/backend/internal/api/handlers/user_handler.go b/backend/internal/api/handlers/user_handler.go index 6b1d884af..9b4809d0e 100644 --- a/backend/internal/api/handlers/user_handler.go +++ b/backend/internal/api/handlers/user_handler.go @@ -22,13 +22,15 @@ import ( type UserHandler struct { DB *gorm.DB + AuthService *services.AuthService MailService *services.MailService securitySvc *services.SecurityService } -func NewUserHandler(db *gorm.DB) *UserHandler { +func NewUserHandler(db *gorm.DB, authService *services.AuthService) *UserHandler { return &UserHandler{ DB: db, + AuthService: authService, MailService: services.NewMailService(db), securitySvc: services.NewSecurityService(db), } @@ -141,7 +143,7 @@ func (h *UserHandler) Setup(c *gin.Context) { UUID: uuid.New().String(), Name: req.Name, Email: strings.ToLower(req.Email), - Role: "admin", + Role: models.RoleAdmin, Enabled: true, APIKey: uuid.New().String(), } @@ -197,8 +199,21 @@ func (h *UserHandler) Setup(c *gin.Context) { }) } +// rejectPassthrough aborts with 403 if the caller is a passthrough user. +// Returns true if the request was rejected (caller should return). +func rejectPassthrough(c *gin.Context, action string) bool { + if c.GetString("role") == string(models.RolePassthrough) { + c.JSON(http.StatusForbidden, gin.H{"error": "Passthrough users cannot " + action}) + return true + } + return false +} + // RegenerateAPIKey generates a new API key for the authenticated user. func (h *UserHandler) RegenerateAPIKey(c *gin.Context) { + if rejectPassthrough(c, "manage API keys") { + return + } userID, exists := c.Get("userID") if !exists { c.JSON(http.StatusUnauthorized, gin.H{"error": "Unauthorized"}) @@ -222,6 +237,9 @@ func (h *UserHandler) RegenerateAPIKey(c *gin.Context) { // GetProfile returns the current user's profile including API key. func (h *UserHandler) GetProfile(c *gin.Context) { + if rejectPassthrough(c, "access profile") { + return + } userID, exists := c.Get("userID") if !exists { c.JSON(http.StatusUnauthorized, gin.H{"error": "Unauthorized"}) @@ -252,6 +270,9 @@ type UpdateProfileRequest struct { // UpdateProfile updates the authenticated user's profile. func (h *UserHandler) UpdateProfile(c *gin.Context) { + if rejectPassthrough(c, "update profile") { + return + } userID, exists := c.Get("userID") if !exists { c.JSON(http.StatusUnauthorized, gin.H{"error": "Unauthorized"}) @@ -309,9 +330,7 @@ func (h *UserHandler) UpdateProfile(c *gin.Context) { // ListUsers returns all users (admin only). func (h *UserHandler) ListUsers(c *gin.Context) { - role, _ := c.Get("role") - if role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } @@ -355,9 +374,7 @@ type CreateUserRequest struct { // CreateUser creates a new user with a password (admin only). func (h *UserHandler) CreateUser(c *gin.Context) { - role, _ := c.Get("role") - if role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } @@ -369,7 +386,12 @@ func (h *UserHandler) CreateUser(c *gin.Context) { // Default role to "user" if req.Role == "" { - req.Role = "user" + req.Role = string(models.RoleUser) + } + + if !models.UserRole(req.Role).IsValid() { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid role"}) + return } // Default permission mode to "allow_all" @@ -392,7 +414,7 @@ func (h *UserHandler) CreateUser(c *gin.Context) { UUID: uuid.New().String(), Email: strings.ToLower(req.Email), Name: req.Name, - Role: req.Role, + Role: models.UserRole(req.Role), Enabled: true, APIKey: uuid.New().String(), PermissionMode: models.PermissionMode(req.PermissionMode), @@ -460,9 +482,7 @@ func generateSecureToken(length int) (string, error) { // InviteUser creates a new user with an invite token and sends an email (admin only). func (h *UserHandler) InviteUser(c *gin.Context) { - role, _ := c.Get("role") - if role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } @@ -476,7 +496,12 @@ func (h *UserHandler) InviteUser(c *gin.Context) { // Default role to "user" if req.Role == "" { - req.Role = "user" + req.Role = string(models.RoleUser) + } + + if !models.UserRole(req.Role).IsValid() { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid role"}) + return } // Default permission mode to "allow_all" @@ -506,7 +531,7 @@ func (h *UserHandler) InviteUser(c *gin.Context) { user := models.User{ UUID: uuid.New().String(), Email: strings.ToLower(req.Email), - Role: req.Role, + Role: models.UserRole(req.Role), Enabled: false, // Disabled until invite is accepted APIKey: uuid.New().String(), PermissionMode: models.PermissionMode(req.PermissionMode), @@ -595,9 +620,7 @@ type PreviewInviteURLRequest struct { // PreviewInviteURL returns what the invite URL would look like with current settings. func (h *UserHandler) PreviewInviteURL(c *gin.Context) { - role, _ := c.Get("role") - if role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } @@ -641,9 +664,7 @@ func getAppName(db *gorm.DB) string { // GetUser returns a single user by ID (admin only). func (h *UserHandler) GetUser(c *gin.Context) { - role, _ := c.Get("role") - if role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } @@ -692,11 +713,17 @@ type UpdateUserRequest struct { Enabled *bool `json:"enabled"` } -// UpdateUser updates an existing user (admin only). +// UpdateUser updates an existing user (admin only for management fields, self-service for name/password). func (h *UserHandler) UpdateUser(c *gin.Context) { - role, _ := c.Get("role") - if role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + currentRole := c.GetString("role") + currentUserIDRaw, exists := c.Get("userID") + if !exists { + c.JSON(http.StatusUnauthorized, gin.H{"error": "Authentication required"}) + return + } + currentUserID, ok := currentUserIDRaw.(uint) + if !ok { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Invalid session"}) return } @@ -714,11 +741,31 @@ func (h *UserHandler) UpdateUser(c *gin.Context) { } var req UpdateUserRequest - if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + if bindErr := c.ShouldBindJSON(&req); bindErr != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": bindErr.Error()}) return } + isSelf := uint(id) == currentUserID + isCallerAdmin := currentRole == string(models.RoleAdmin) + + // Non-admin users can only update their own name and password via this endpoint. + // Email changes require password verification and must go through PUT /user/profile. + if !isCallerAdmin { + if !isSelf { + c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + return + } + if req.Email != "" { + c.JSON(http.StatusForbidden, gin.H{"error": "Email changes must be made via your profile settings"}) + return + } + if req.Role != "" || req.Enabled != nil { + c.JSON(http.StatusForbidden, gin.H{"error": "Cannot modify role or enabled status"}) + return + } + } + updates := make(map[string]any) if req.Name != "" { @@ -727,21 +774,37 @@ func (h *UserHandler) UpdateUser(c *gin.Context) { if req.Email != "" { email := strings.ToLower(req.Email) - // Check if email is taken by another user var count int64 - if err := h.DB.Model(&models.User{}).Where("email = ? AND id != ?", email, id).Count(&count).Error; err == nil && count > 0 { + if dbErr := h.DB.Model(&models.User{}).Where("email = ? AND id != ?", email, id).Count(&count).Error; dbErr == nil && count > 0 { c.JSON(http.StatusConflict, gin.H{"error": "Email already in use"}) return } updates["email"] = email } + needsSessionInvalidation := false + if req.Role != "" { - updates["role"] = req.Role + newRole := models.UserRole(req.Role) + if !newRole.IsValid() { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid role"}) + return + } + + if newRole != user.Role { + // Self-demotion prevention + if isSelf { + c.JSON(http.StatusForbidden, gin.H{"error": "Cannot change your own role"}) + return + } + + updates["role"] = string(newRole) + needsSessionInvalidation = true + } } if req.Password != nil { - if err := user.SetPassword(*req.Password); err != nil { + if hashErr := user.SetPassword(*req.Password); hashErr != nil { c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to hash password"}) return } @@ -750,15 +813,83 @@ func (h *UserHandler) UpdateUser(c *gin.Context) { updates["locked_until"] = nil } - if req.Enabled != nil { + if req.Enabled != nil && *req.Enabled != user.Enabled { + // Prevent self-disable + if isSelf && !*req.Enabled { + c.JSON(http.StatusForbidden, gin.H{"error": "Cannot disable your own account"}) + return + } + updates["enabled"] = *req.Enabled + if !*req.Enabled { + needsSessionInvalidation = true + } } - if len(updates) > 0 { - if err := h.DB.Model(&user).Updates(updates).Error; err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to update user"}) + // Wrap the last-admin checks and the actual update in a transaction to prevent + // race conditions: two concurrent requests could both read adminCount==2 + // and both proceed, leaving zero admins. + err = h.DB.Transaction(func(tx *gorm.DB) error { + // Re-fetch user inside transaction for consistent state + if txErr := tx.First(&user, id).Error; txErr != nil { + return txErr + } + + // Last-admin protection for role demotion + if newRoleStr, ok := updates["role"]; ok { + newRole := models.UserRole(newRoleStr.(string)) + if user.Role == models.RoleAdmin && newRole != models.RoleAdmin { + var adminCount int64 + // Policy: count only enabled admins. This is stricter than "WHERE role = ?" + // because a disabled admin cannot act; treating them as non-existent + // prevents leaving the system with only disabled admins. + tx.Model(&models.User{}).Where("role = ? AND enabled = ?", models.RoleAdmin, true).Count(&adminCount) + if adminCount <= 1 { + return fmt.Errorf("cannot demote the last admin") + } + } + } + + // Last-admin protection for disabling + if enabledVal, ok := updates["enabled"]; ok { + if enabled, isBool := enabledVal.(bool); isBool && !enabled { + if user.Role == models.RoleAdmin { + var adminCount int64 + // Policy: count only enabled admins (same rationale as above). + tx.Model(&models.User{}).Where("role = ? AND enabled = ?", models.RoleAdmin, true).Count(&adminCount) + if adminCount <= 1 { + return fmt.Errorf("cannot disable the last admin") + } + } + } + } + + if len(updates) > 0 { + if txErr := tx.Model(&user).Updates(updates).Error; txErr != nil { + return txErr + } + } + + return nil + }) + + if err != nil { + errMsg := err.Error() + if errMsg == "cannot demote the last admin" || errMsg == "cannot disable the last admin" { + c.JSON(http.StatusForbidden, gin.H{"error": "Cannot" + errMsg[len("cannot"):]}) return } + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to update user"}) + return + } + + if len(updates) > 0 { + if needsSessionInvalidation && h.AuthService != nil { + if invErr := h.AuthService.InvalidateSessions(user.ID); invErr != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to invalidate sessions"}) + return + } + } h.logUserAudit(c, "user_update", &user, map[string]any{ "target_email": user.Email, @@ -780,13 +911,12 @@ func mapsKeys(values map[string]any) []string { // DeleteUser deletes a user (admin only). func (h *UserHandler) DeleteUser(c *gin.Context) { - role, _ := c.Get("role") - if role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } - currentUserID, _ := c.Get("userID") + currentUserIDRaw, _ := c.Get("userID") + currentUserID, _ := currentUserIDRaw.(uint) idParam := c.Param("id") id, err := strconv.ParseUint(idParam, 10, 32) @@ -796,7 +926,7 @@ func (h *UserHandler) DeleteUser(c *gin.Context) { } // Prevent self-deletion - if uint(id) == currentUserID.(uint) { + if uint(id) == currentUserID { c.JSON(http.StatusForbidden, gin.H{"error": "Cannot delete your own account"}) return } @@ -834,9 +964,7 @@ type UpdateUserPermissionsRequest struct { // ResendInvite regenerates and resends an invitation to a pending user (admin only). func (h *UserHandler) ResendInvite(c *gin.Context) { - role, _ := c.Get("role") - if role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } @@ -919,9 +1047,7 @@ func redactInviteURL(inviteURL string) string { // UpdateUserPermissions updates a user's permission mode and host exceptions (admin only). func (h *UserHandler) UpdateUserPermissions(c *gin.Context) { - role, _ := c.Get("role") - if role != "admin" { - c.JSON(http.StatusForbidden, gin.H{"error": "Admin access required"}) + if !requireAdmin(c) { return } diff --git a/backend/internal/api/handlers/user_handler_coverage_test.go b/backend/internal/api/handlers/user_handler_coverage_test.go index d1fc16af2..db0133a80 100644 --- a/backend/internal/api/handlers/user_handler_coverage_test.go +++ b/backend/internal/api/handlers/user_handler_coverage_test.go @@ -23,7 +23,7 @@ func setupUserCoverageDB(t *testing.T) *gorm.DB { func TestUserHandler_GetSetupStatus_Error(t *testing.T) { gin.SetMode(gin.TestMode) db := setupUserCoverageDB(t) - h := NewUserHandler(db) + h := NewUserHandler(db, nil) // Drop table to cause error _ = db.Migrator().DropTable(&models.User{}) @@ -40,7 +40,7 @@ func TestUserHandler_GetSetupStatus_Error(t *testing.T) { func TestUserHandler_Setup_CheckStatusError(t *testing.T) { gin.SetMode(gin.TestMode) db := setupUserCoverageDB(t) - h := NewUserHandler(db) + h := NewUserHandler(db, nil) // Drop table to cause error _ = db.Migrator().DropTable(&models.User{}) @@ -57,10 +57,10 @@ func TestUserHandler_Setup_CheckStatusError(t *testing.T) { func TestUserHandler_Setup_AlreadyCompleted(t *testing.T) { gin.SetMode(gin.TestMode) db := setupUserCoverageDB(t) - h := NewUserHandler(db) + h := NewUserHandler(db, nil) // Create a user to mark setup as complete - user := &models.User{UUID: "uuid-a", Name: "Admin", Email: "admin@test.com", Role: "admin"} + user := &models.User{UUID: "uuid-a", Name: "Admin", Email: "admin@test.com", Role: models.RoleAdmin} _ = user.SetPassword("password123") db.Create(user) @@ -76,7 +76,7 @@ func TestUserHandler_Setup_AlreadyCompleted(t *testing.T) { func TestUserHandler_Setup_InvalidJSON(t *testing.T) { gin.SetMode(gin.TestMode) db := setupUserCoverageDB(t) - h := NewUserHandler(db) + h := NewUserHandler(db, nil) w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) @@ -91,7 +91,7 @@ func TestUserHandler_Setup_InvalidJSON(t *testing.T) { func TestUserHandler_RegenerateAPIKey_Unauthorized(t *testing.T) { gin.SetMode(gin.TestMode) db := setupUserCoverageDB(t) - h := NewUserHandler(db) + h := NewUserHandler(db, nil) w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) @@ -105,7 +105,7 @@ func TestUserHandler_RegenerateAPIKey_Unauthorized(t *testing.T) { func TestUserHandler_RegenerateAPIKey_DBError(t *testing.T) { gin.SetMode(gin.TestMode) db := setupUserCoverageDB(t) - h := NewUserHandler(db) + h := NewUserHandler(db, nil) // Drop table to cause error _ = db.Migrator().DropTable(&models.User{}) @@ -123,7 +123,7 @@ func TestUserHandler_RegenerateAPIKey_DBError(t *testing.T) { func TestUserHandler_GetProfile_Unauthorized(t *testing.T) { gin.SetMode(gin.TestMode) db := setupUserCoverageDB(t) - h := NewUserHandler(db) + h := NewUserHandler(db, nil) w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) @@ -137,7 +137,7 @@ func TestUserHandler_GetProfile_Unauthorized(t *testing.T) { func TestUserHandler_GetProfile_NotFound(t *testing.T) { gin.SetMode(gin.TestMode) db := setupUserCoverageDB(t) - h := NewUserHandler(db) + h := NewUserHandler(db, nil) w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) @@ -152,7 +152,7 @@ func TestUserHandler_GetProfile_NotFound(t *testing.T) { func TestUserHandler_UpdateProfile_Unauthorized(t *testing.T) { gin.SetMode(gin.TestMode) db := setupUserCoverageDB(t) - h := NewUserHandler(db) + h := NewUserHandler(db, nil) w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) @@ -166,7 +166,7 @@ func TestUserHandler_UpdateProfile_Unauthorized(t *testing.T) { func TestUserHandler_UpdateProfile_InvalidJSON(t *testing.T) { gin.SetMode(gin.TestMode) db := setupUserCoverageDB(t) - h := NewUserHandler(db) + h := NewUserHandler(db, nil) w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) @@ -182,7 +182,7 @@ func TestUserHandler_UpdateProfile_InvalidJSON(t *testing.T) { func TestUserHandler_UpdateProfile_UserNotFound(t *testing.T) { gin.SetMode(gin.TestMode) db := setupUserCoverageDB(t) - h := NewUserHandler(db) + h := NewUserHandler(db, nil) body, _ := json.Marshal(map[string]string{ "name": "Updated", @@ -203,14 +203,14 @@ func TestUserHandler_UpdateProfile_UserNotFound(t *testing.T) { func TestUserHandler_UpdateProfile_EmailConflict(t *testing.T) { gin.SetMode(gin.TestMode) db := setupUserCoverageDB(t) - h := NewUserHandler(db) + h := NewUserHandler(db, nil) // Create two users - user1 := &models.User{UUID: "uuid-1", Name: "User1", Email: "user1@test.com", Role: "admin", APIKey: "key1"} + user1 := &models.User{UUID: "uuid-1", Name: "User1", Email: "user1@test.com", Role: models.RoleAdmin, APIKey: "key1"} _ = user1.SetPassword("password123") db.Create(user1) - user2 := &models.User{UUID: "uuid-2", Name: "User2", Email: "user2@test.com", Role: "admin", APIKey: "key2"} + user2 := &models.User{UUID: "uuid-2", Name: "User2", Email: "user2@test.com", Role: models.RoleAdmin, APIKey: "key2"} _ = user2.SetPassword("password123") db.Create(user2) @@ -236,9 +236,9 @@ func TestUserHandler_UpdateProfile_EmailConflict(t *testing.T) { func TestUserHandler_UpdateProfile_EmailChangeNoPassword(t *testing.T) { gin.SetMode(gin.TestMode) db := setupUserCoverageDB(t) - h := NewUserHandler(db) + h := NewUserHandler(db, nil) - user := &models.User{UUID: "uuid-u", Name: "User", Email: "user@test.com", Role: "admin"} + user := &models.User{UUID: "uuid-u", Name: "User", Email: "user@test.com", Role: models.RoleAdmin} _ = user.SetPassword("password123") db.Create(user) @@ -263,9 +263,9 @@ func TestUserHandler_UpdateProfile_EmailChangeNoPassword(t *testing.T) { func TestUserHandler_UpdateProfile_WrongPassword(t *testing.T) { gin.SetMode(gin.TestMode) db := setupUserCoverageDB(t) - h := NewUserHandler(db) + h := NewUserHandler(db, nil) - user := &models.User{UUID: "uuid-u", Name: "User", Email: "user@test.com", Role: "admin"} + user := &models.User{UUID: "uuid-u", Name: "User", Email: "user@test.com", Role: models.RoleAdmin} _ = user.SetPassword("password123") db.Create(user) diff --git a/backend/internal/api/handlers/user_handler_test.go b/backend/internal/api/handlers/user_handler_test.go index bdcb24b7e..ab2dee9f0 100644 --- a/backend/internal/api/handlers/user_handler_test.go +++ b/backend/internal/api/handlers/user_handler_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "errors" + "fmt" "net/http" "net/http/httptest" "strconv" @@ -11,6 +12,7 @@ import ( "testing" "time" + "github.com/Wikid82/charon/backend/internal/config" "github.com/Wikid82/charon/backend/internal/models" "github.com/Wikid82/charon/backend/internal/services" "github.com/gin-gonic/gin" @@ -23,7 +25,7 @@ import ( func setupUserHandler(t *testing.T) (*UserHandler, *gorm.DB) { db := OpenTestDB(t) _ = db.AutoMigrate(&models.User{}, &models.Setting{}, &models.SecurityAudit{}) - return NewUserHandler(db), db + return NewUserHandler(db, nil), db } func TestMapsKeys(t *testing.T) { @@ -312,7 +314,7 @@ func TestUserHandler_ListUsers_SecretEchoContract(t *testing.T) { UUID: uuid.NewString(), Email: "user@example.com", Name: "User", - Role: "user", + Role: models.RoleUser, APIKey: "raw-api-key", InviteToken: "raw-invite-token", PasswordHash: "raw-password-hash", @@ -661,7 +663,7 @@ func TestUserHandler_UpdateProfile_Errors(t *testing.T) { func setupUserHandlerWithProxyHosts(t *testing.T) (*UserHandler, *gorm.DB) { db := OpenTestDB(t) _ = db.AutoMigrate(&models.User{}, &models.Setting{}, &models.ProxyHost{}, &models.SecurityAudit{}) - return NewUserHandler(db), db + return NewUserHandler(db, nil), db } func TestUserHandler_ListUsers_NonAdmin(t *testing.T) { @@ -912,18 +914,24 @@ func TestUserHandler_GetUser_Success(t *testing.T) { } func TestUserHandler_UpdateUser_NonAdmin(t *testing.T) { - handler, _ := setupUserHandlerWithProxyHosts(t) + handler, db := setupUserHandlerWithProxyHosts(t) + + // Create a target user so it exists in the DB + target := &models.User{UUID: uuid.NewString(), Email: "target@example.com", Name: "Target", APIKey: uuid.NewString(), Role: models.RoleUser} + db.Create(target) + gin.SetMode(gin.TestMode) r := gin.New() r.Use(func(c *gin.Context) { c.Set("role", "user") + c.Set("userID", uint(999)) c.Next() }) r.PUT("/users/:id", handler.UpdateUser) body := map[string]any{"name": "Updated"} jsonBody, _ := json.Marshal(body) - req := httptest.NewRequest("PUT", "/users/1", bytes.NewBuffer(jsonBody)) + req := httptest.NewRequest("PUT", fmt.Sprintf("/users/%d", target.ID), bytes.NewBuffer(jsonBody)) req.Header.Set("Content-Type", "application/json") w := httptest.NewRecorder() r.ServeHTTP(w, req) @@ -937,6 +945,7 @@ func TestUserHandler_UpdateUser_InvalidID(t *testing.T) { r := gin.New() r.Use(func(c *gin.Context) { c.Set("role", "admin") + c.Set("userID", uint(11)) c.Next() }) r.PUT("/users/:id", handler.UpdateUser) @@ -962,6 +971,7 @@ func TestUserHandler_UpdateUser_InvalidJSON(t *testing.T) { r := gin.New() r.Use(func(c *gin.Context) { c.Set("role", "admin") + c.Set("userID", uint(11)) c.Next() }) r.PUT("/users/:id", handler.UpdateUser) @@ -980,6 +990,7 @@ func TestUserHandler_UpdateUser_NotFound(t *testing.T) { r := gin.New() r.Use(func(c *gin.Context) { c.Set("role", "admin") + c.Set("userID", uint(11)) c.Next() }) r.PUT("/users/:id", handler.UpdateUser) @@ -997,7 +1008,7 @@ func TestUserHandler_UpdateUser_NotFound(t *testing.T) { func TestUserHandler_UpdateUser_Success(t *testing.T) { handler, db := setupUserHandlerWithProxyHosts(t) - user := &models.User{UUID: uuid.NewString(), Email: "update@example.com", Name: "Original", Role: "user"} + user := &models.User{UUID: uuid.NewString(), Email: "update@example.com", Name: "Original", Role: models.RoleUser} db.Create(user) gin.SetMode(gin.TestMode) @@ -1030,7 +1041,7 @@ func TestUserHandler_UpdateUser_Success(t *testing.T) { func TestUserHandler_UpdateUser_PasswordReset(t *testing.T) { handler, db := setupUserHandlerWithProxyHosts(t) - user := &models.User{UUID: uuid.NewString(), Email: "reset@example.com", Name: "Reset User", Role: "user"} + user := &models.User{UUID: uuid.NewString(), Email: "reset@example.com", Name: "Reset User", Role: models.RoleUser} require.NoError(t, user.SetPassword("oldpassword123")) lockUntil := time.Now().Add(10 * time.Minute) user.FailedLoginAttempts = 4 @@ -1041,6 +1052,7 @@ func TestUserHandler_UpdateUser_PasswordReset(t *testing.T) { r := gin.New() r.Use(func(c *gin.Context) { c.Set("role", "admin") + c.Set("userID", uint(11)) c.Next() }) r.PUT("/users/:id", handler.UpdateUser) @@ -1214,7 +1226,7 @@ func TestUserHandler_UpdateUserPermissions_InvalidJSON(t *testing.T) { APIKey: uuid.NewString(), Email: "perms-invalid@example.com", Name: "Perms Invalid Test", - Role: "user", + Role: models.RoleUser, Enabled: true, } db.Create(user) @@ -1562,7 +1574,7 @@ func TestUserHandler_InviteUser_Success(t *testing.T) { UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "admin@example.com", - Role: "admin", + Role: models.RoleAdmin, } db.Create(admin) @@ -1615,7 +1627,7 @@ func TestUserHandler_InviteUser_WithPermittedHosts(t *testing.T) { UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "admin-perm@example.com", - Role: "admin", + Role: models.RoleAdmin, } db.Create(admin) @@ -1664,7 +1676,7 @@ func TestUserHandler_InviteUser_WithSMTPConfigured(t *testing.T) { UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "admin-smtp@example.com", - Role: "admin", + Role: models.RoleAdmin, } db.Create(admin) @@ -1727,7 +1739,7 @@ func TestUserHandler_InviteUser_WithSMTPAndConfiguredPublicURL_IncludesInviteURL UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "admin-publicurl@example.com", - Role: "admin", + Role: models.RoleAdmin, } db.Create(admin) @@ -1780,7 +1792,7 @@ func TestUserHandler_InviteUser_WithSMTPAndMalformedPublicURL_DoesNotExposeInvit UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "admin-malformed-publicurl@example.com", - Role: "admin", + Role: models.RoleAdmin, } db.Create(admin) @@ -1834,7 +1846,7 @@ func TestUserHandler_InviteUser_WithSMTPConfigured_DefaultAppName(t *testing.T) UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "admin-smtp-default@example.com", - Role: "admin", + Role: models.RoleAdmin, } db.Create(admin) @@ -1976,7 +1988,7 @@ func TestUserHandler_PreviewInviteURL_NonAdmin(t *testing.T) { r.ServeHTTP(w, req) assert.Equal(t, http.StatusForbidden, w.Code) - assert.Contains(t, w.Body.String(), "Admin access required") + assert.Contains(t, w.Body.String(), "admin privileges required") } func TestUserHandler_PreviewInviteURL_InvalidJSON(t *testing.T) { @@ -2137,6 +2149,7 @@ func TestUserHandler_UpdateUser_EmailConflict(t *testing.T) { r := gin.New() r.Use(func(c *gin.Context) { c.Set("role", "admin") + c.Set("userID", uint(11)) c.Next() }) r.PUT("/users/:id", handler.UpdateUser) @@ -2195,7 +2208,7 @@ func TestUserHandler_InviteUser_EmailNormalization(t *testing.T) { UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "admin@example.com", - Role: "admin", + Role: models.RoleAdmin, } db.Create(admin) @@ -2264,7 +2277,7 @@ func TestUserHandler_InviteUser_DefaultPermissionMode(t *testing.T) { UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "admin@example.com", - Role: "admin", + Role: models.RoleAdmin, } db.Create(admin) @@ -2322,7 +2335,7 @@ func TestUserHandler_CreateUser_DefaultRole(t *testing.T) { // Verify role defaults to "user" var user models.User db.Where("email = ?", "defaultrole@example.com").First(&user) - assert.Equal(t, "user", user.Role) + assert.Equal(t, models.RoleUser, user.Role) } func TestUserHandler_InviteUser_DefaultRole(t *testing.T) { @@ -2333,7 +2346,7 @@ func TestUserHandler_InviteUser_DefaultRole(t *testing.T) { UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "admin@example.com", - Role: "admin", + Role: models.RoleAdmin, } db.Create(admin) @@ -2361,7 +2374,7 @@ func TestUserHandler_InviteUser_DefaultRole(t *testing.T) { // Verify role defaults to "user" var user models.User db.Where("email = ?", "defaultroleinvite@example.com").First(&user) - assert.Equal(t, "user", user.Role) + assert.Equal(t, models.RoleUser, user.Role) } // TestUserHandler_PreviewInviteURL_Unconfigured_DoesNotUseRequestHost verifies that @@ -2484,7 +2497,7 @@ func TestResendInvite_NonAdmin(t *testing.T) { r.ServeHTTP(w, req) assert.Equal(t, http.StatusForbidden, w.Code) - assert.Contains(t, w.Body.String(), "Admin access required") + assert.Contains(t, w.Body.String(), "admin privileges required") } func TestResendInvite_InvalidID(t *testing.T) { @@ -2705,3 +2718,394 @@ func TestRedactInviteURL(t *testing.T) { }) } } + +// --- Passthrough rejection tests --- + +func setupPassthroughRouter(handler *UserHandler) *gin.Engine { + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", string(models.RolePassthrough)) + c.Next() + }) + r.POST("/api-key", handler.RegenerateAPIKey) + r.GET("/profile", handler.GetProfile) + r.PUT("/profile", handler.UpdateProfile) + return r +} + +func TestUserHandler_RegenerateAPIKey_PassthroughRejected(t *testing.T) { + handler, _ := setupUserHandler(t) + r := setupPassthroughRouter(handler) + + req := httptest.NewRequest(http.MethodPost, "/api-key", http.NoBody) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusForbidden, w.Code) + assert.Contains(t, w.Body.String(), "Passthrough users cannot manage API keys") +} + +func TestUserHandler_GetProfile_PassthroughRejected(t *testing.T) { + handler, _ := setupUserHandler(t) + r := setupPassthroughRouter(handler) + + req := httptest.NewRequest(http.MethodGet, "/profile", http.NoBody) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusForbidden, w.Code) + assert.Contains(t, w.Body.String(), "Passthrough users cannot access profile") +} + +func TestUserHandler_UpdateProfile_PassthroughRejected(t *testing.T) { + handler, _ := setupUserHandler(t) + r := setupPassthroughRouter(handler) + + body, _ := json.Marshal(map[string]string{"name": "Test"}) + req := httptest.NewRequest(http.MethodPut, "/profile", bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusForbidden, w.Code) + assert.Contains(t, w.Body.String(), "Passthrough users cannot update profile") +} + +// --- CreateUser / InviteUser invalid role --- + +func TestUserHandler_CreateUser_InvalidRole(t *testing.T) { + handler, _ := setupUserHandler(t) + + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Next() + }) + r.POST("/users", handler.CreateUser) + + body, _ := json.Marshal(map[string]string{ + "name": "Test User", + "email": "new@example.com", + "role": "superadmin", + "password": "password123", + }) + req := httptest.NewRequest(http.MethodPost, "/users", bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.Contains(t, w.Body.String(), "Invalid role") +} + +func TestUserHandler_InviteUser_InvalidRole(t *testing.T) { + handler, _ := setupUserHandler(t) + + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", uint(1)) + c.Next() + }) + r.POST("/invite", handler.InviteUser) + + body, _ := json.Marshal(map[string]string{ + "email": "invite@example.com", + "role": "superadmin", + }) + req := httptest.NewRequest(http.MethodPost, "/invite", bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.Contains(t, w.Body.String(), "Invalid role") +} + +// --- UpdateUser authentication/session edge cases --- + +func TestUserHandler_UpdateUser_MissingUserID(t *testing.T) { + handler, db := setupUserHandler(t) + + user := models.User{UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "target@example.com", Role: models.RoleUser, Enabled: true} + require.NoError(t, db.Create(&user).Error) + + gin.SetMode(gin.TestMode) + r := gin.New() + // No userID set in context + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Next() + }) + r.PUT("/users/:id", handler.UpdateUser) + + body, _ := json.Marshal(map[string]string{"name": "New Name"}) + req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("/users/%d", user.ID), bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusUnauthorized, w.Code) + assert.Contains(t, w.Body.String(), "Authentication required") +} + +func TestUserHandler_UpdateUser_InvalidSessionType(t *testing.T) { + handler, db := setupUserHandler(t) + + user := models.User{UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "target2@example.com", Role: models.RoleUser, Enabled: true} + require.NoError(t, db.Create(&user).Error) + + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", "not-a-uint") // wrong type + c.Next() + }) + r.PUT("/users/:id", handler.UpdateUser) + + body, _ := json.Marshal(map[string]string{"name": "New Name"}) + req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("/users/%d", user.ID), bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.Contains(t, w.Body.String(), "Invalid session") +} + +// --- UpdateUser role/enabled restriction for non-admin self --- + +func TestUserHandler_UpdateUser_NonAdminSelfRoleChange(t *testing.T) { + handler, db := setupUserHandler(t) + + user := models.User{UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "self@example.com", Role: models.RoleUser, Enabled: true} + require.NoError(t, db.Create(&user).Error) + + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "user") // non-admin + c.Set("userID", user.ID) // isSelf = true + c.Next() + }) + r.PUT("/users/:id", handler.UpdateUser) + + body, _ := json.Marshal(map[string]string{"role": "admin"}) + req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("/users/%d", user.ID), bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusForbidden, w.Code) + assert.Contains(t, w.Body.String(), "Cannot modify role or enabled status") +} + +// --- UpdateUser invalid role string --- + +func TestUserHandler_UpdateUser_InvalidRole(t *testing.T) { + handler, db := setupUserHandler(t) + + target := models.User{UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "target3@example.com", Role: models.RoleUser, Enabled: true} + require.NoError(t, db.Create(&target).Error) + + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", uint(9999)) // not the target + c.Next() + }) + r.PUT("/users/:id", handler.UpdateUser) + + body, _ := json.Marshal(map[string]string{"role": "superadmin"}) + req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("/users/%d", target.ID), bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.Contains(t, w.Body.String(), "Invalid role") +} + +// --- UpdateUser self-demotion and self-disable --- + +func TestUserHandler_UpdateUser_SelfDemotion(t *testing.T) { + handler, db := setupUserHandler(t) + + admin := models.User{UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "admin@self.example.com", Role: models.RoleAdmin, Enabled: true} + require.NoError(t, db.Create(&admin).Error) + + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", admin.ID) // isSelf = true + c.Next() + }) + r.PUT("/users/:id", handler.UpdateUser) + + body, _ := json.Marshal(map[string]string{"role": "user"}) + req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("/users/%d", admin.ID), bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusForbidden, w.Code) + assert.Contains(t, w.Body.String(), "Cannot change your own role") +} + +func TestUserHandler_UpdateUser_SelfDisable(t *testing.T) { + handler, db := setupUserHandler(t) + + admin := models.User{UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "admin@disable.example.com", Role: models.RoleAdmin, Enabled: true} + require.NoError(t, db.Create(&admin).Error) + + disabled := false + + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", admin.ID) // isSelf = true + c.Next() + }) + r.PUT("/users/:id", handler.UpdateUser) + + body, _ := json.Marshal(map[string]interface{}{"enabled": disabled}) + req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("/users/%d", admin.ID), bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusForbidden, w.Code) + assert.Contains(t, w.Body.String(), "Cannot disable your own account") +} + +// --- UpdateUser last-admin protection --- + +func TestUserHandler_UpdateUser_LastAdminDemotion(t *testing.T) { + handler, db := setupUserHandler(t) + + // Only one admin in the DB (the target) + target := models.User{UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "last-admin@example.com", Role: models.RoleAdmin, Enabled: true} + require.NoError(t, db.Create(&target).Error) + + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", uint(9999)) // different from target; not in DB but role injected via context + c.Next() + }) + r.PUT("/users/:id", handler.UpdateUser) + + body, _ := json.Marshal(map[string]string{"role": "user"}) + req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("/users/%d", target.ID), bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusForbidden, w.Code) + assert.Contains(t, w.Body.String(), "Cannot demote the last admin") +} + +func TestUserHandler_UpdateUser_LastAdminDisable(t *testing.T) { + handler, db := setupUserHandler(t) + + target := models.User{UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "last-admin-disable@example.com", Role: models.RoleAdmin, Enabled: true} + require.NoError(t, db.Create(&target).Error) + + disabled := false + + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", uint(9999)) + c.Next() + }) + r.PUT("/users/:id", handler.UpdateUser) + + body, _ := json.Marshal(map[string]interface{}{"enabled": disabled}) + req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("/users/%d", target.ID), bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusForbidden, w.Code) + assert.Contains(t, w.Body.String(), "Cannot disable the last admin") +} + +// --- UpdateUser session invalidation --- + +func TestUserHandler_UpdateUser_WithSessionInvalidation(t *testing.T) { + db := OpenTestDB(t) + _ = db.AutoMigrate(&models.User{}, &models.Setting{}, &models.SecurityAudit{}) + + authSvc := services.NewAuthService(db, config.Config{JWTSecret: "test-secret"}) + + caller := models.User{UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "caller-si@example.com", Role: models.RoleAdmin, Enabled: true} + target := models.User{UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "target-si@example.com", Role: models.RoleUser, Enabled: true} + require.NoError(t, db.Create(&caller).Error) + require.NoError(t, db.Create(&target).Error) + + handler := NewUserHandler(db, authSvc) + + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", caller.ID) + c.Next() + }) + r.PUT("/users/:id", handler.UpdateUser) + + body, _ := json.Marshal(map[string]string{"role": "passthrough"}) + req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("/users/%d", target.ID), bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + assert.Contains(t, w.Body.String(), "User updated successfully") + + var updated models.User + require.NoError(t, db.First(&updated, target.ID).Error) + assert.Greater(t, updated.SessionVersion, uint(0)) +} + +func TestUserHandler_UpdateUser_SessionInvalidationError(t *testing.T) { + mainDB := OpenTestDB(t) + _ = mainDB.AutoMigrate(&models.User{}, &models.Setting{}, &models.SecurityAudit{}) + + // Use a separate empty DB so InvalidateSessions cannot find the user + authDB := OpenTestDB(t) + authSvc := services.NewAuthService(authDB, config.Config{JWTSecret: "test-secret"}) + + target := models.User{UUID: uuid.NewString(), APIKey: uuid.NewString(), Email: "target-sie@example.com", Role: models.RoleUser, Enabled: true} + require.NoError(t, mainDB.Create(&target).Error) + + handler := NewUserHandler(mainDB, authSvc) + + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", "admin") + c.Set("userID", uint(9999)) + c.Next() + }) + r.PUT("/users/:id", handler.UpdateUser) + + body, _ := json.Marshal(map[string]string{"role": "passthrough"}) + req := httptest.NewRequest(http.MethodPut, fmt.Sprintf("/users/%d", target.ID), bytes.NewBuffer(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.Contains(t, w.Body.String(), "Failed to invalidate sessions") +} diff --git a/backend/internal/api/handlers/user_integration_test.go b/backend/internal/api/handlers/user_integration_test.go index 7b5e6eae7..7eed110f1 100644 --- a/backend/internal/api/handlers/user_integration_test.go +++ b/backend/internal/api/handlers/user_integration_test.go @@ -28,7 +28,7 @@ func TestUserLoginAfterEmailChange(t *testing.T) { cfg := config.Config{} authService := services.NewAuthService(db, cfg) authHandler := NewAuthHandler(authService) - userHandler := NewUserHandler(db) + userHandler := NewUserHandler(db, nil) // Setup Router gin.SetMode(gin.TestMode) diff --git a/backend/internal/api/middleware/auth.go b/backend/internal/api/middleware/auth.go index 6164e25e2..9eea57fb3 100644 --- a/backend/internal/api/middleware/auth.go +++ b/backend/internal/api/middleware/auth.go @@ -4,6 +4,7 @@ import ( "net/http" "strings" + "github.com/Wikid82/charon/backend/internal/models" "github.com/Wikid82/charon/backend/internal/services" "github.com/gin-gonic/gin" ) @@ -37,7 +38,7 @@ func AuthMiddleware(authService *services.AuthService) gin.HandlerFunc { } c.Set("userID", user.ID) - c.Set("role", user.Role) + c.Set("role", string(user.Role)) c.Next() } } @@ -95,15 +96,15 @@ func extractAuthCookieToken(c *gin.Context) string { return token } -func RequireRole(role string) gin.HandlerFunc { +func RequireRole(role models.UserRole) gin.HandlerFunc { return func(c *gin.Context) { - userRole, exists := c.Get("role") - if !exists { + userRole := c.GetString("role") + if userRole == "" { c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "Unauthorized"}) return } - if userRole.(string) != role && userRole.(string) != "admin" { + if userRole != string(role) && userRole != string(models.RoleAdmin) { c.AbortWithStatusJSON(http.StatusForbidden, gin.H{"error": "Forbidden"}) return } @@ -111,3 +112,14 @@ func RequireRole(role string) gin.HandlerFunc { c.Next() } } + +func RequireManagementAccess() gin.HandlerFunc { + return func(c *gin.Context) { + role := c.GetString("role") + if role == string(models.RolePassthrough) { + c.AbortWithStatusJSON(http.StatusForbidden, gin.H{"error": "Pass-through users cannot access management features"}) + return + } + c.Next() + } +} diff --git a/backend/internal/api/middleware/auth_test.go b/backend/internal/api/middleware/auth_test.go index 119862a2d..245d8538b 100644 --- a/backend/internal/api/middleware/auth_test.go +++ b/backend/internal/api/middleware/auth_test.go @@ -427,3 +427,61 @@ func TestExtractAuthCookieToken_IgnoresNonAuthCookies(t *testing.T) { token := extractAuthCookieToken(ctx) assert.Equal(t, "", token) } + +func TestRequireManagementAccess_PassthroughBlocked(t *testing.T) { + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", string(models.RolePassthrough)) + c.Next() + }) + r.Use(RequireManagementAccess()) + r.GET("/test", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/test", http.NoBody) + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusForbidden, w.Code) + assert.Contains(t, w.Body.String(), "Pass-through users cannot access management features") +} + +func TestRequireManagementAccess_UserAllowed(t *testing.T) { + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", string(models.RoleUser)) + c.Next() + }) + r.Use(RequireManagementAccess()) + r.GET("/test", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/test", http.NoBody) + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) +} + +func TestRequireManagementAccess_AdminAllowed(t *testing.T) { + gin.SetMode(gin.TestMode) + r := gin.New() + r.Use(func(c *gin.Context) { + c.Set("role", string(models.RoleAdmin)) + c.Next() + }) + r.Use(RequireManagementAccess()) + r.GET("/test", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/test", http.NoBody) + r.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) +} diff --git a/backend/internal/api/middleware/optional_auth.go b/backend/internal/api/middleware/optional_auth.go index 95123ae69..9edbe86ff 100644 --- a/backend/internal/api/middleware/optional_auth.go +++ b/backend/internal/api/middleware/optional_auth.go @@ -38,7 +38,7 @@ func OptionalAuth(authService *services.AuthService) gin.HandlerFunc { } c.Set("userID", user.ID) - c.Set("role", user.Role) + c.Set("role", string(user.Role)) c.Next() } } diff --git a/backend/internal/api/middleware/optional_auth_test.go b/backend/internal/api/middleware/optional_auth_test.go index e8e5f9447..f45522d4d 100644 --- a/backend/internal/api/middleware/optional_auth_test.go +++ b/backend/internal/api/middleware/optional_auth_test.go @@ -138,7 +138,7 @@ func TestOptionalAuth_ValidTokenSetsContext(t *testing.T) { t.Parallel() authService, db := setupAuthServiceWithDB(t) - user := &models.User{Email: "optional-auth@example.com", Name: "Optional Auth", Role: "admin", Enabled: true} + user := &models.User{Email: "optional-auth@example.com", Name: "Optional Auth", Role: models.RoleAdmin, Enabled: true} require.NoError(t, user.SetPassword("password123")) require.NoError(t, db.Create(user).Error) diff --git a/backend/internal/api/routes/routes.go b/backend/internal/api/routes/routes.go index 2533036d4..3ef436ca9 100644 --- a/backend/internal/api/routes/routes.go +++ b/backend/internal/api/routes/routes.go @@ -52,6 +52,14 @@ func runInitialUptimeBootstrap(enabled bool, uptimeService uptimeBootstrapServic uptimeService.CheckAll() } +// migrateViewerToPassthrough renames any legacy "viewer" roles to "passthrough". +func migrateViewerToPassthrough(db *gorm.DB) { + result := db.Model(&models.User{}).Where("role = ?", "viewer").Update("role", string(models.RolePassthrough)) + if result.RowsAffected > 0 { + logger.Log().WithField("count", result.RowsAffected).Info("Migrated viewer roles to passthrough") + } +} + // Register wires up API routes and performs automatic migrations. func Register(router *gin.Engine, db *gorm.DB, cfg config.Config) error { // Caddy Manager - created early so it can be used by settings handlers for config reload @@ -118,7 +126,7 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM return fmt.Errorf("auto migrate: %w", err) } - // Clean up invalid Let's Encrypt certificate associations + migrateViewerToPassthrough(db) // Let's Encrypt certs are auto-managed by Caddy and should not be assigned via certificate_id logger.Log().Info("Cleaning up invalid Let's Encrypt certificate associations...") var hostsWithInvalidCerts []models.ProxyHost @@ -239,7 +247,7 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM api.POST("/security/events", securityNotificationHandler.HandleSecurityEvent) // User handler (public endpoints) - userHandler := handlers.NewUserHandler(db) + userHandler := handlers.NewUserHandler(db, authService) api.GET("/setup", userHandler.GetSetupStatus) api.POST("/setup", userHandler.Setup) api.GET("/invite/validate", userHandler.ValidateInvite) @@ -251,109 +259,110 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM protected := api.Group("/") protected.Use(authMiddleware) { + // Self-service routes — accessible to all authenticated users including passthrough protected.POST("/auth/logout", authHandler.Logout) protected.POST("/auth/refresh", authHandler.Refresh) protected.GET("/auth/me", authHandler.Me) protected.POST("/auth/change-password", authHandler.ChangePassword) + protected.GET("/auth/accessible-hosts", authHandler.GetAccessibleHosts) + protected.GET("/auth/check-host/:hostId", authHandler.CheckHostAccess) + protected.GET("/user/profile", userHandler.GetProfile) + protected.POST("/user/profile", userHandler.UpdateProfile) + protected.POST("/user/api-key", userHandler.RegenerateAPIKey) + + // Management routes — blocked for passthrough users + management := protected.Group("/") + management.Use(middleware.RequireManagementAccess()) // Backups - protected.GET("/backups", backupHandler.List) - protected.POST("/backups", backupHandler.Create) - protected.DELETE("/backups/:filename", backupHandler.Delete) - protected.GET("/backups/:filename/download", backupHandler.Download) - protected.POST("/backups/:filename/restore", backupHandler.Restore) + management.GET("/backups", backupHandler.List) + management.POST("/backups", backupHandler.Create) + management.DELETE("/backups/:filename", backupHandler.Delete) + management.GET("/backups/:filename/download", backupHandler.Download) + management.POST("/backups/:filename/restore", backupHandler.Restore) // Logs // WebSocket endpoints logsWSHandler := handlers.NewLogsWSHandler(wsTracker) - protected.GET("/logs/live", logsWSHandler.HandleWebSocket) - protected.GET("/logs", logsHandler.List) - protected.GET("/logs/:filename", logsHandler.Read) - protected.GET("/logs/:filename/download", logsHandler.Download) + management.GET("/logs/live", logsWSHandler.HandleWebSocket) + management.GET("/logs", logsHandler.List) + management.GET("/logs/:filename", logsHandler.Read) + management.GET("/logs/:filename/download", logsHandler.Download) // WebSocket status monitoring - protected.GET("/websocket/connections", wsStatusHandler.GetConnections) - protected.GET("/websocket/stats", wsStatusHandler.GetStats) + management.GET("/websocket/connections", wsStatusHandler.GetConnections) + management.GET("/websocket/stats", wsStatusHandler.GetStats) // Security Notification Settings - Use handler created earlier for event intake - protected.GET("/security/notifications/settings", securityNotificationHandler.DeprecatedGetSettings) - protected.PUT("/security/notifications/settings", securityNotificationHandler.DeprecatedUpdateSettings) - protected.GET("/notifications/settings/security", securityNotificationHandler.GetSettings) - protected.PUT("/notifications/settings/security", securityNotificationHandler.UpdateSettings) + management.GET("/security/notifications/settings", securityNotificationHandler.DeprecatedGetSettings) + management.PUT("/security/notifications/settings", securityNotificationHandler.DeprecatedUpdateSettings) + management.GET("/notifications/settings/security", securityNotificationHandler.GetSettings) + management.PUT("/notifications/settings/security", securityNotificationHandler.UpdateSettings) // System permissions diagnostics and repair systemPermissionsHandler := handlers.NewSystemPermissionsHandler(cfg, securityService, nil) - protected.GET("/system/permissions", systemPermissionsHandler.GetPermissions) - protected.POST("/system/permissions/repair", systemPermissionsHandler.RepairPermissions) + management.GET("/system/permissions", systemPermissionsHandler.GetPermissions) + management.POST("/system/permissions/repair", systemPermissionsHandler.RepairPermissions) // Audit Logs auditLogHandler := handlers.NewAuditLogHandler(securityService) - protected.GET("/audit-logs", auditLogHandler.List) - protected.GET("/audit-logs/:uuid", auditLogHandler.Get) + management.GET("/audit-logs", auditLogHandler.List) + management.GET("/audit-logs/:uuid", auditLogHandler.Get) // Settings - with CaddyManager and Cerberus for security settings reload settingsHandler := handlers.NewSettingsHandlerWithDeps(db, caddyManager, cerb, securityService, dataRoot) - protected.GET("/settings", settingsHandler.GetSettings) - protected.POST("/settings", settingsHandler.UpdateSetting) - protected.PATCH("/settings", settingsHandler.UpdateSetting) // E2E tests use PATCH - protected.PATCH("/config", settingsHandler.PatchConfig) // Bulk configuration update + management.GET("/settings", settingsHandler.GetSettings) + management.POST("/settings", settingsHandler.UpdateSetting) + management.PATCH("/settings", settingsHandler.UpdateSetting) // E2E tests use PATCH + management.PATCH("/config", settingsHandler.PatchConfig) // Bulk configuration update // SMTP Configuration - protected.GET("/settings/smtp", middleware.RequireRole("admin"), settingsHandler.GetSMTPConfig) - protected.POST("/settings/smtp", settingsHandler.UpdateSMTPConfig) - protected.POST("/settings/smtp/test", settingsHandler.TestSMTPConfig) - protected.POST("/settings/smtp/test-email", settingsHandler.SendTestEmail) + management.GET("/settings/smtp", middleware.RequireRole(models.RoleAdmin), settingsHandler.GetSMTPConfig) + management.POST("/settings/smtp", settingsHandler.UpdateSMTPConfig) + management.POST("/settings/smtp/test", settingsHandler.TestSMTPConfig) + management.POST("/settings/smtp/test-email", settingsHandler.SendTestEmail) // URL Validation - protected.POST("/settings/validate-url", settingsHandler.ValidatePublicURL) - protected.POST("/settings/test-url", settingsHandler.TestPublicURL) - - // Auth related protected routes - protected.GET("/auth/accessible-hosts", authHandler.GetAccessibleHosts) - protected.GET("/auth/check-host/:hostId", authHandler.CheckHostAccess) + management.POST("/settings/validate-url", settingsHandler.ValidatePublicURL) + management.POST("/settings/test-url", settingsHandler.TestPublicURL) // Feature flags (DB-backed with env fallback) featureFlagsHandler := handlers.NewFeatureFlagsHandler(db) - protected.GET("/feature-flags", featureFlagsHandler.GetFlags) - protected.PUT("/feature-flags", featureFlagsHandler.UpdateFlags) - - // User Profile & API Key - protected.GET("/user/profile", userHandler.GetProfile) - protected.POST("/user/profile", userHandler.UpdateProfile) - protected.POST("/user/api-key", userHandler.RegenerateAPIKey) + management.GET("/feature-flags", featureFlagsHandler.GetFlags) + management.PUT("/feature-flags", featureFlagsHandler.UpdateFlags) // User Management (admin only routes are in RegisterRoutes) - protected.GET("/users", userHandler.ListUsers) - protected.POST("/users", userHandler.CreateUser) - protected.POST("/users/invite", userHandler.InviteUser) - protected.POST("/users/preview-invite-url", userHandler.PreviewInviteURL) - protected.GET("/users/:id", userHandler.GetUser) - protected.PUT("/users/:id", userHandler.UpdateUser) - protected.DELETE("/users/:id", userHandler.DeleteUser) - protected.PUT("/users/:id/permissions", userHandler.UpdateUserPermissions) - protected.POST("/users/:id/resend-invite", userHandler.ResendInvite) + management.GET("/users", userHandler.ListUsers) + management.POST("/users", userHandler.CreateUser) + management.POST("/users/invite", userHandler.InviteUser) + management.POST("/users/preview-invite-url", userHandler.PreviewInviteURL) + management.GET("/users/:id", userHandler.GetUser) + management.PUT("/users/:id", userHandler.UpdateUser) + management.DELETE("/users/:id", userHandler.DeleteUser) + management.PUT("/users/:id/permissions", userHandler.UpdateUserPermissions) + management.POST("/users/:id/resend-invite", userHandler.ResendInvite) // Updates updateService := services.NewUpdateService() updateHandler := handlers.NewUpdateHandler(updateService) - protected.GET("/system/updates", updateHandler.Check) + management.GET("/system/updates", updateHandler.Check) // System info systemHandler := handlers.NewSystemHandler() - protected.GET("/system/my-ip", systemHandler.GetMyIP) + management.GET("/system/my-ip", systemHandler.GetMyIP) // Notifications notificationHandler := handlers.NewNotificationHandler(notificationService) - protected.GET("/notifications", notificationHandler.List) - protected.POST("/notifications/:id/read", notificationHandler.MarkAsRead) - protected.POST("/notifications/read-all", notificationHandler.MarkAllAsRead) + management.GET("/notifications", notificationHandler.List) + management.POST("/notifications/:id/read", notificationHandler.MarkAsRead) + management.POST("/notifications/read-all", notificationHandler.MarkAllAsRead) // Domains domainHandler := handlers.NewDomainHandler(db, notificationService) - protected.GET("/domains", domainHandler.List) - protected.POST("/domains", domainHandler.Create) - protected.DELETE("/domains/:id", domainHandler.Delete) + management.GET("/domains", domainHandler.List) + management.POST("/domains", domainHandler.Create) + management.DELETE("/domains/:id", domainHandler.Delete) // DNS Providers - only available if encryption key is configured if cfg.EncryptionKey != "" { @@ -363,33 +372,33 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM } else { dnsProviderService := services.NewDNSProviderService(db, encryptionService) dnsProviderHandler := handlers.NewDNSProviderHandler(dnsProviderService) - protected.GET("/dns-providers", dnsProviderHandler.List) - protected.POST("/dns-providers", dnsProviderHandler.Create) - protected.GET("/dns-providers/types", dnsProviderHandler.GetTypes) - protected.GET("/dns-providers/:id", dnsProviderHandler.Get) - protected.PUT("/dns-providers/:id", dnsProviderHandler.Update) - protected.DELETE("/dns-providers/:id", dnsProviderHandler.Delete) - protected.POST("/dns-providers/:id/test", dnsProviderHandler.Test) - protected.POST("/dns-providers/test", dnsProviderHandler.TestCredentials) + management.GET("/dns-providers", dnsProviderHandler.List) + management.POST("/dns-providers", dnsProviderHandler.Create) + management.GET("/dns-providers/types", dnsProviderHandler.GetTypes) + management.GET("/dns-providers/:id", dnsProviderHandler.Get) + management.PUT("/dns-providers/:id", dnsProviderHandler.Update) + management.DELETE("/dns-providers/:id", dnsProviderHandler.Delete) + management.POST("/dns-providers/:id/test", dnsProviderHandler.Test) + management.POST("/dns-providers/test", dnsProviderHandler.TestCredentials) // Audit logs for DNS providers - protected.GET("/dns-providers/:id/audit-logs", auditLogHandler.ListByProvider) + management.GET("/dns-providers/:id/audit-logs", auditLogHandler.ListByProvider) // DNS Provider Auto-Detection (Phase 4) dnsDetectionService := services.NewDNSDetectionService(db) dnsDetectionHandler := handlers.NewDNSDetectionHandler(dnsDetectionService) - protected.POST("/dns-providers/detect", dnsDetectionHandler.Detect) - protected.GET("/dns-providers/detection-patterns", dnsDetectionHandler.GetPatterns) + management.POST("/dns-providers/detect", dnsDetectionHandler.Detect) + management.GET("/dns-providers/detection-patterns", dnsDetectionHandler.GetPatterns) // Multi-Credential Management (Phase 3) credentialService := services.NewCredentialService(db, encryptionService) credentialHandler := handlers.NewCredentialHandler(credentialService) - protected.GET("/dns-providers/:id/credentials", credentialHandler.List) - protected.POST("/dns-providers/:id/credentials", credentialHandler.Create) - protected.GET("/dns-providers/:id/credentials/:cred_id", credentialHandler.Get) - protected.PUT("/dns-providers/:id/credentials/:cred_id", credentialHandler.Update) - protected.DELETE("/dns-providers/:id/credentials/:cred_id", credentialHandler.Delete) - protected.POST("/dns-providers/:id/credentials/:cred_id/test", credentialHandler.Test) - protected.POST("/dns-providers/:id/enable-multi-credentials", credentialHandler.EnableMultiCredentials) + management.GET("/dns-providers/:id/credentials", credentialHandler.List) + management.POST("/dns-providers/:id/credentials", credentialHandler.Create) + management.GET("/dns-providers/:id/credentials/:cred_id", credentialHandler.Get) + management.PUT("/dns-providers/:id/credentials/:cred_id", credentialHandler.Update) + management.DELETE("/dns-providers/:id/credentials/:cred_id", credentialHandler.Delete) + management.POST("/dns-providers/:id/credentials/:cred_id/test", credentialHandler.Test) + management.POST("/dns-providers/:id/enable-multi-credentials", credentialHandler.EnableMultiCredentials) // Encryption Management - Admin only endpoints rotationService, rotErr := crypto.NewRotationService(db) @@ -397,7 +406,7 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM logger.Log().WithError(rotErr).Warn("Failed to initialize rotation service - key rotation features will be unavailable") } else { encryptionHandler := handlers.NewEncryptionHandler(rotationService, securityService) - adminEncryption := protected.Group("/admin/encryption") + adminEncryption := management.Group("/admin/encryption") adminEncryption.GET("/status", encryptionHandler.GetStatus) adminEncryption.POST("/rotate", encryptionHandler.Rotate) adminEncryption.GET("/history", encryptionHandler.GetHistory) @@ -411,7 +420,7 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM } pluginLoader := services.NewPluginLoaderService(db, pluginDir, nil) pluginHandler := handlers.NewPluginHandler(db, pluginLoader) - adminPlugins := protected.Group("/admin/plugins") + adminPlugins := management.Group("/admin/plugins") adminPlugins.GET("", pluginHandler.ListPlugins) adminPlugins.GET("/:id", pluginHandler.GetPlugin) adminPlugins.POST("/:id/enable", pluginHandler.EnablePlugin) @@ -421,7 +430,7 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM // Manual DNS Challenges (Phase 1) - For users without automated DNS API access manualChallengeService := services.NewManualChallengeService(db) manualChallengeHandler := handlers.NewManualChallengeHandler(manualChallengeService, dnsProviderService) - manualChallengeHandler.RegisterRoutes(protected) + manualChallengeHandler.RegisterRoutes(management) } } else { logger.Log().Warn("CHARON_ENCRYPTION_KEY not set - DNS provider and plugin features will be unavailable") @@ -431,37 +440,37 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM // The service will return proper error messages when Docker is not accessible dockerService := services.NewDockerService() dockerHandler := handlers.NewDockerHandler(dockerService, remoteServerService) - dockerHandler.RegisterRoutes(protected) + dockerHandler.RegisterRoutes(management) // Uptime Service — reuse the single uptimeService instance (defined above) // to share in-memory state (mutexes, notification batching) between // background checker, ProxyHostHandler, and API handlers. uptimeHandler := handlers.NewUptimeHandler(uptimeService) - protected.GET("/uptime/monitors", uptimeHandler.List) - protected.POST("/uptime/monitors", uptimeHandler.Create) - protected.GET("/uptime/monitors/:id/history", uptimeHandler.GetHistory) - protected.PUT("/uptime/monitors/:id", uptimeHandler.Update) - protected.DELETE("/uptime/monitors/:id", uptimeHandler.Delete) - protected.POST("/uptime/monitors/:id/check", uptimeHandler.CheckMonitor) - protected.POST("/uptime/sync", uptimeHandler.Sync) + management.GET("/uptime/monitors", uptimeHandler.List) + management.POST("/uptime/monitors", uptimeHandler.Create) + management.GET("/uptime/monitors/:id/history", uptimeHandler.GetHistory) + management.PUT("/uptime/monitors/:id", uptimeHandler.Update) + management.DELETE("/uptime/monitors/:id", uptimeHandler.Delete) + management.POST("/uptime/monitors/:id/check", uptimeHandler.CheckMonitor) + management.POST("/uptime/sync", uptimeHandler.Sync) // Notification Providers notificationProviderHandler := handlers.NewNotificationProviderHandlerWithDeps(notificationService, securityService, dataRoot) - protected.GET("/notifications/providers", notificationProviderHandler.List) - protected.POST("/notifications/providers", notificationProviderHandler.Create) - protected.PUT("/notifications/providers/:id", notificationProviderHandler.Update) - protected.DELETE("/notifications/providers/:id", notificationProviderHandler.Delete) - protected.POST("/notifications/providers/test", notificationProviderHandler.Test) - protected.POST("/notifications/providers/preview", notificationProviderHandler.Preview) - protected.GET("/notifications/templates", notificationProviderHandler.Templates) + management.GET("/notifications/providers", notificationProviderHandler.List) + management.POST("/notifications/providers", notificationProviderHandler.Create) + management.PUT("/notifications/providers/:id", notificationProviderHandler.Update) + management.DELETE("/notifications/providers/:id", notificationProviderHandler.Delete) + management.POST("/notifications/providers/test", notificationProviderHandler.Test) + management.POST("/notifications/providers/preview", notificationProviderHandler.Preview) + management.GET("/notifications/templates", notificationProviderHandler.Templates) // External notification templates (saved templates for providers) notificationTemplateHandler := handlers.NewNotificationTemplateHandlerWithDeps(notificationService, securityService, dataRoot) - protected.GET("/notifications/external-templates", notificationTemplateHandler.List) - protected.POST("/notifications/external-templates", notificationTemplateHandler.Create) - protected.PUT("/notifications/external-templates/:id", notificationTemplateHandler.Update) - protected.DELETE("/notifications/external-templates/:id", notificationTemplateHandler.Delete) - protected.POST("/notifications/external-templates/preview", notificationTemplateHandler.Preview) + management.GET("/notifications/external-templates", notificationTemplateHandler.List) + management.POST("/notifications/external-templates", notificationTemplateHandler.Create) + management.PUT("/notifications/external-templates/:id", notificationTemplateHandler.Update) + management.DELETE("/notifications/external-templates/:id", notificationTemplateHandler.Delete) + management.POST("/notifications/external-templates/preview", notificationTemplateHandler.Preview) // Ensure uptime feature flag exists to avoid record-not-found logs defaultUptime := models.Setting{Key: "feature.uptime.enabled", Value: "true", Type: "bool", Category: "feature"} @@ -510,7 +519,7 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM } }() - protected.POST("/system/uptime/check", func(c *gin.Context) { + management.POST("/system/uptime/check", func(c *gin.Context) { go uptimeService.CheckAll() c.JSON(200, gin.H{"message": "Uptime check started"}) }) @@ -542,19 +551,19 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM securityHandler.SetGeoIPService(geoipSvc) } - protected.GET("/security/status", securityHandler.GetStatus) + management.GET("/security/status", securityHandler.GetStatus) // Security Config management - protected.GET("/security/config", securityHandler.GetConfig) - protected.GET("/security/decisions", securityHandler.ListDecisions) - protected.GET("/security/rulesets", securityHandler.ListRuleSets) - protected.GET("/security/rate-limit/presets", securityHandler.GetRateLimitPresets) + management.GET("/security/config", securityHandler.GetConfig) + management.GET("/security/decisions", securityHandler.ListDecisions) + management.GET("/security/rulesets", securityHandler.ListRuleSets) + management.GET("/security/rate-limit/presets", securityHandler.GetRateLimitPresets) // GeoIP endpoints - protected.GET("/security/geoip/status", securityHandler.GetGeoIPStatus) + management.GET("/security/geoip/status", securityHandler.GetGeoIPStatus) // WAF exclusion endpoints - protected.GET("/security/waf/exclusions", securityHandler.GetWAFExclusions) + management.GET("/security/waf/exclusions", securityHandler.GetWAFExclusions) - securityAdmin := protected.Group("/security") - securityAdmin.Use(middleware.RequireRole("admin")) + securityAdmin := management.Group("/security") + securityAdmin.Use(middleware.RequireRole(models.RoleAdmin)) securityAdmin.POST("/config", securityHandler.UpdateConfig) securityAdmin.POST("/enable", securityHandler.Enable) securityAdmin.POST("/disable", securityHandler.Disable) @@ -595,7 +604,7 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM crowdsecExec := handlers.NewDefaultCrowdsecExecutor() crowdsecHandler := handlers.NewCrowdsecHandler(db, crowdsecExec, crowdsecBinPath, crowdsecDataDir) - crowdsecHandler.RegisterRoutes(protected) + crowdsecHandler.RegisterRoutes(management) // NOTE: CrowdSec reconciliation now happens in main.go BEFORE HTTP server starts // This ensures proper initialization order and prevents race conditions @@ -626,24 +635,24 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM logger.Log().WithError(err).Error("Failed to start security log watcher") } cerberusLogsHandler := handlers.NewCerberusLogsHandler(logWatcher, wsTracker) - protected.GET("/cerberus/logs/ws", cerberusLogsHandler.LiveLogs) + management.GET("/cerberus/logs/ws", cerberusLogsHandler.LiveLogs) // Access Lists accessListHandler := handlers.NewAccessListHandler(db) if geoipSvc != nil { accessListHandler.SetGeoIPService(geoipSvc) } - protected.GET("/access-lists/templates", accessListHandler.GetTemplates) - protected.GET("/access-lists", accessListHandler.List) - protected.POST("/access-lists", accessListHandler.Create) - protected.GET("/access-lists/:id", accessListHandler.Get) - protected.PUT("/access-lists/:id", accessListHandler.Update) - protected.DELETE("/access-lists/:id", accessListHandler.Delete) - protected.POST("/access-lists/:id/test", accessListHandler.TestIP) + management.GET("/access-lists/templates", accessListHandler.GetTemplates) + management.GET("/access-lists", accessListHandler.List) + management.POST("/access-lists", accessListHandler.Create) + management.GET("/access-lists/:id", accessListHandler.Get) + management.PUT("/access-lists/:id", accessListHandler.Update) + management.DELETE("/access-lists/:id", accessListHandler.Delete) + management.POST("/access-lists/:id/test", accessListHandler.TestIP) // Security Headers securityHeadersHandler := handlers.NewSecurityHeadersHandler(db, caddyManager) - securityHeadersHandler.RegisterRoutes(protected) + securityHeadersHandler.RegisterRoutes(management) // Certificate routes // Use cfg.CaddyConfigDir + "/data" for cert service so we scan the actual Caddy storage @@ -652,18 +661,19 @@ func RegisterWithDeps(router *gin.Engine, db *gorm.DB, cfg config.Config, caddyM logger.Log().WithField("caddy_data_dir", caddyDataDir).Info("Using Caddy data directory for certificates scan") certService := services.NewCertificateService(caddyDataDir, db) certHandler := handlers.NewCertificateHandler(certService, backupService, notificationService) - protected.GET("/certificates", certHandler.List) - protected.POST("/certificates", certHandler.Upload) - protected.DELETE("/certificates/:id", certHandler.Delete) - } + management.GET("/certificates", certHandler.List) + management.POST("/certificates", certHandler.Upload) + management.DELETE("/certificates/:id", certHandler.Delete) - // Caddy Manager already created above + // Proxy Hosts & Remote Servers + proxyHostHandler := handlers.NewProxyHostHandler(db, caddyManager, notificationService, uptimeService) + proxyHostHandler.RegisterRoutes(management) - proxyHostHandler := handlers.NewProxyHostHandler(db, caddyManager, notificationService, uptimeService) - proxyHostHandler.RegisterRoutes(protected) + remoteServerHandler := handlers.NewRemoteServerHandler(remoteServerService, notificationService) + remoteServerHandler.RegisterRoutes(management) + } - remoteServerHandler := handlers.NewRemoteServerHandler(remoteServerService, notificationService) - remoteServerHandler.RegisterRoutes(protected) + // Caddy Manager already created above // Initial Caddy Config Sync go func() { @@ -708,7 +718,7 @@ func RegisterImportHandler(router *gin.Engine, db *gorm.DB, cfg config.Config, c api := router.Group("/api/v1") authService := services.NewAuthService(db, cfg) authenticatedAdmin := api.Group("/") - authenticatedAdmin.Use(middleware.AuthMiddleware(authService), middleware.RequireRole("admin")) + authenticatedAdmin.Use(middleware.AuthMiddleware(authService), middleware.RequireRole(models.RoleAdmin)) importHandler.RegisterRoutes(authenticatedAdmin) // NPM Import Handler - supports Nginx Proxy Manager export format diff --git a/backend/internal/api/routes/routes_import_test.go b/backend/internal/api/routes/routes_import_test.go index 84a0010f0..ff07f54a2 100644 --- a/backend/internal/api/routes/routes_import_test.go +++ b/backend/internal/api/routes/routes_import_test.go @@ -73,7 +73,7 @@ func TestRegisterImportHandler_AuthzGuards(t *testing.T) { router.ServeHTTP(unauthW, unauthReq) assert.Equal(t, http.StatusUnauthorized, unauthW.Code) - nonAdmin := &models.User{Email: "user@example.com", Role: "user", Enabled: true} + nonAdmin := &models.User{Email: "user@example.com", Role: models.RoleUser, Enabled: true} require.NoError(t, db.Create(nonAdmin).Error) authSvc := services.NewAuthService(db, cfg) token, err := authSvc.GenerateToken(nonAdmin) diff --git a/backend/internal/api/routes/routes_test.go b/backend/internal/api/routes/routes_test.go index d5fcf600f..fb32b7c69 100644 --- a/backend/internal/api/routes/routes_test.go +++ b/backend/internal/api/routes/routes_test.go @@ -10,7 +10,9 @@ import ( "testing" "github.com/Wikid82/charon/backend/internal/config" + "github.com/Wikid82/charon/backend/internal/models" "github.com/gin-gonic/gin" + "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gorm.io/driver/sqlite" @@ -1298,3 +1300,25 @@ func TestRegister_CreatesAccessLogFileForLogWatcher(t *testing.T) { _, statErr := os.Stat(logFilePath) assert.NoError(t, statErr) } + +func TestMigrateViewerToPassthrough(t *testing.T) { + db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{}) + require.NoError(t, err) + require.NoError(t, db.AutoMigrate(&models.User{})) + + // Seed a user with the legacy "viewer" role + viewer := models.User{ + UUID: uuid.NewString(), + APIKey: uuid.NewString(), + Email: "viewer@example.com", + Role: models.UserRole("viewer"), + Enabled: true, + } + require.NoError(t, db.Create(&viewer).Error) + + migrateViewerToPassthrough(db) + + var updated models.User + require.NoError(t, db.First(&updated, viewer.ID).Error) + assert.Equal(t, models.RolePassthrough, updated.Role) +} diff --git a/backend/internal/api/tests/user_smtp_audit_test.go b/backend/internal/api/tests/user_smtp_audit_test.go index f27b74a91..571bac093 100644 --- a/backend/internal/api/tests/user_smtp_audit_test.go +++ b/backend/internal/api/tests/user_smtp_audit_test.go @@ -3,6 +3,7 @@ package tests import ( "bytes" "encoding/json" + "fmt" "net/http" "net/http/httptest" "strings" @@ -45,7 +46,7 @@ func createTestAdminUser(t *testing.T, db *gorm.DB) uint { UUID: "admin-uuid-1234", Email: "admin@test.com", Name: "Test Admin", - Role: "admin", + Role: models.RoleAdmin, Enabled: true, APIKey: "test-api-key", } @@ -66,7 +67,7 @@ func setupRouterWithAuth(db *gorm.DB, userID uint, role string) *gin.Engine { c.Next() }) - userHandler := handlers.NewUserHandler(db) + userHandler := handlers.NewUserHandler(db, nil) settingsHandler := handlers.NewSettingsHandler(db) api := r.Group("/api") @@ -124,7 +125,7 @@ func TestInviteToken_ExpiredCannotBeUsed(t *testing.T) { user := models.User{ UUID: "invite-uuid-1234", Email: "expired@test.com", - Role: "user", + Role: models.RoleUser, Enabled: false, InviteToken: "expired-token-12345678901234567890123456789012", InviteExpires: &expiredTime, @@ -153,7 +154,7 @@ func TestInviteToken_CannotBeReused(t *testing.T) { UUID: "accepted-uuid-1234", Email: "accepted@test.com", Name: "Accepted User", - Role: "user", + Role: models.RoleUser, Enabled: true, InviteToken: "accepted-token-1234567890123456789012345678901", InvitedAt: &invitedAt, @@ -217,7 +218,7 @@ func TestAcceptInvite_PasswordValidation(t *testing.T) { user := models.User{ UUID: "pending-uuid-1234", Email: "pending@test.com", - Role: "user", + Role: models.RoleUser, Enabled: false, InviteToken: "valid-token-12345678901234567890123456789012345", InviteExpires: &expires, @@ -269,15 +270,29 @@ func TestUserEndpoints_RequireAdmin(t *testing.T) { UUID: "user-uuid-1234", Email: "user@test.com", Name: "Regular User", - Role: "user", + Role: models.RoleUser, Enabled: true, + APIKey: "user-api-key-unique", } require.NoError(t, user.SetPassword("userpassword123")) require.NoError(t, db.Create(&user).Error) + // Create a second user to test admin-only operations against a non-self target + otherUser := models.User{ + UUID: "other-uuid-5678", + Email: "other@test.com", + Name: "Other User", + Role: models.RoleUser, + Enabled: true, + APIKey: "other-api-key-unique", + } + require.NoError(t, otherUser.SetPassword("otherpassword123")) + require.NoError(t, db.Create(&otherUser).Error) + // Router with regular user role r := setupRouterWithAuth(db, user.ID, "user") + otherID := fmt.Sprintf("%d", otherUser.ID) endpoints := []struct { method string path string @@ -286,10 +301,10 @@ func TestUserEndpoints_RequireAdmin(t *testing.T) { {"GET", "/api/users", ""}, {"POST", "/api/users", `{"email":"new@test.com","name":"New","password":"password123"}`}, {"POST", "/api/users/invite", `{"email":"invite@test.com"}`}, - {"GET", "/api/users/1", ""}, - {"PUT", "/api/users/1", `{"name":"Updated"}`}, - {"DELETE", "/api/users/1", ""}, - {"PUT", "/api/users/1/permissions", `{"permission_mode":"deny_all"}`}, + {"GET", "/api/users/" + otherID, ""}, + {"PUT", "/api/users/" + otherID, `{"name":"Updated"}`}, + {"DELETE", "/api/users/" + otherID, ""}, + {"PUT", "/api/users/" + otherID + "/permissions", `{"permission_mode":"deny_all"}`}, } for _, ep := range endpoints { @@ -316,7 +331,7 @@ func TestSMTPEndpoints_RequireAdmin(t *testing.T) { UUID: "user-uuid-5678", Email: "user2@test.com", Name: "Regular User 2", - Role: "user", + Role: models.RoleUser, Enabled: true, } require.NoError(t, user.SetPassword("userpassword123")) @@ -462,7 +477,7 @@ func TestInviteUser_DuplicateEmailBlocked(t *testing.T) { UUID: "existing-uuid-1234", Email: "existing@test.com", Name: "Existing User", - Role: "user", + Role: models.RoleUser, Enabled: true, } require.NoError(t, db.Create(&existing).Error) @@ -488,7 +503,7 @@ func TestInviteUser_EmailCaseInsensitive(t *testing.T) { UUID: "existing-uuid-5678", Email: "test@example.com", Name: "Existing User", - Role: "user", + Role: models.RoleUser, Enabled: true, } require.NoError(t, db.Create(&existing).Error) @@ -532,7 +547,7 @@ func TestUpdatePermissions_ValidModes(t *testing.T) { UUID: "perms-user-1234", Email: "permsuser@test.com", Name: "Perms User", - Role: "user", + Role: models.RoleUser, Enabled: true, } require.NoError(t, db.Create(&user).Error) @@ -574,7 +589,7 @@ func TestPublicEndpoints_NoAuthRequired(t *testing.T) { // Router WITHOUT auth middleware gin.SetMode(gin.TestMode) r := gin.New() - userHandler := handlers.NewUserHandler(db) + userHandler := handlers.NewUserHandler(db, nil) api := r.Group("/api") userHandler.RegisterRoutes(api) @@ -584,7 +599,7 @@ func TestPublicEndpoints_NoAuthRequired(t *testing.T) { user := models.User{ UUID: "public-test-uuid", Email: "public@test.com", - Role: "user", + Role: models.RoleUser, Enabled: false, InviteToken: "public-test-token-123456789012345678901234567", InviteExpires: &expires, diff --git a/backend/internal/cerberus/cerberus.go b/backend/internal/cerberus/cerberus.go index e28d37b9f..66bae81c4 100644 --- a/backend/internal/cerberus/cerberus.go +++ b/backend/internal/cerberus/cerberus.go @@ -272,7 +272,7 @@ func (c *Cerberus) isAuthenticatedAdmin(ctx *gin.Context) bool { return false } roleStr, ok := role.(string) - if !ok || roleStr != "admin" { + if !ok || roleStr != string(models.RoleAdmin) { return false } userID, exists := ctx.Get("userID") diff --git a/backend/internal/models/user.go b/backend/internal/models/user.go index 4cb9b3c62..f0455aa69 100644 --- a/backend/internal/models/user.go +++ b/backend/internal/models/user.go @@ -7,6 +7,27 @@ import ( "golang.org/x/crypto/bcrypt" ) +// UserRole represents an authenticated user's privilege tier. +type UserRole string + +const ( + // RoleAdmin has full access to all Charon features and management. + RoleAdmin UserRole = "admin" + // RoleUser can access the Charon management UI with restricted permissions. + RoleUser UserRole = "user" + // RolePassthrough can only authenticate for forward-auth proxy access. + RolePassthrough UserRole = "passthrough" +) + +// IsValid returns true when the role is one of the recognised privilege tiers. +func (r UserRole) IsValid() bool { + switch r { + case RoleAdmin, RoleUser, RolePassthrough: + return true + } + return false +} + // PermissionMode determines how user access to proxy hosts is evaluated. type PermissionMode string @@ -26,7 +47,7 @@ type User struct { APIKey string `json:"-" gorm:"uniqueIndex"` // For external API access, never exposed in JSON PasswordHash string `json:"-"` // Never serialize password hash Name string `json:"name"` - Role string `json:"role" gorm:"default:'user'"` // "admin", "user", "viewer" + Role UserRole `json:"role" gorm:"default:'user'"` Enabled bool `json:"enabled" gorm:"default:true"` FailedLoginAttempts int `json:"-" gorm:"default:0"` LockedUntil *time.Time `json:"-"` @@ -77,7 +98,7 @@ func (u *User) HasPendingInvite() bool { // - deny_all mode: User can ONLY access hosts in PermittedHosts (whitelist) func (u *User) CanAccessHost(hostID uint) bool { // Admins always have access - if u.Role == "admin" { + if u.Role == RoleAdmin { return true } diff --git a/backend/internal/models/user_test.go b/backend/internal/models/user_test.go index 86b40d861..1eeebf66b 100644 --- a/backend/internal/models/user_test.go +++ b/backend/internal/models/user_test.go @@ -87,7 +87,7 @@ func TestUser_HasPendingInvite(t *testing.T) { func TestUser_CanAccessHost_AllowAll(t *testing.T) { // User with allow_all mode (blacklist) - can access everything except listed hosts user := User{ - Role: "user", + Role: RoleUser, PermissionMode: PermissionModeAllowAll, PermittedHosts: []ProxyHost{ {ID: 1}, // Blocked host @@ -107,7 +107,7 @@ func TestUser_CanAccessHost_AllowAll(t *testing.T) { func TestUser_CanAccessHost_DenyAll(t *testing.T) { // User with deny_all mode (whitelist) - can only access listed hosts user := User{ - Role: "user", + Role: RoleUser, PermissionMode: PermissionModeDenyAll, PermittedHosts: []ProxyHost{ {ID: 5}, // Allowed host @@ -127,7 +127,7 @@ func TestUser_CanAccessHost_DenyAll(t *testing.T) { func TestUser_CanAccessHost_AdminBypass(t *testing.T) { // Admin users should always have access regardless of permission mode adminUser := User{ - Role: "admin", + Role: RoleAdmin, PermissionMode: PermissionModeDenyAll, PermittedHosts: []ProxyHost{}, // No hosts in whitelist } @@ -140,7 +140,7 @@ func TestUser_CanAccessHost_AdminBypass(t *testing.T) { func TestUser_CanAccessHost_DefaultBehavior(t *testing.T) { // User with empty/default permission mode should behave like allow_all user := User{ - Role: "user", + Role: RoleUser, PermissionMode: "", // Empty = default PermittedHosts: []ProxyHost{ {ID: 1}, // Should be blocked @@ -175,7 +175,7 @@ func TestUser_CanAccessHost_EmptyPermittedHosts(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { user := User{ - Role: "user", + Role: RoleUser, PermissionMode: tt.permissionMode, PermittedHosts: []ProxyHost{}, } @@ -190,6 +190,31 @@ func TestPermissionMode_Constants(t *testing.T) { assert.Equal(t, PermissionMode("deny_all"), PermissionModeDenyAll) } +func TestUserRole_Constants(t *testing.T) { + assert.Equal(t, UserRole("admin"), RoleAdmin) + assert.Equal(t, UserRole("user"), RoleUser) + assert.Equal(t, UserRole("passthrough"), RolePassthrough) +} + +func TestUserRole_IsValid(t *testing.T) { + tests := []struct { + role UserRole + expected bool + }{ + {RoleAdmin, true}, + {RoleUser, true}, + {RolePassthrough, true}, + {UserRole("viewer"), false}, + {UserRole("superadmin"), false}, + {UserRole(""), false}, + } + for _, tt := range tests { + t.Run(string(tt.role), func(t *testing.T) { + assert.Equal(t, tt.expected, tt.role.IsValid()) + }) + } +} + // Helper function to create time pointers func timePtr(t time.Time) *time.Time { return &t diff --git a/backend/internal/services/auth_service.go b/backend/internal/services/auth_service.go index d5202e389..997cff4e4 100644 --- a/backend/internal/services/auth_service.go +++ b/backend/internal/services/auth_service.go @@ -33,9 +33,9 @@ func (s *AuthService) Register(email, password, name string) (*models.User, erro var count int64 s.db.Model(&models.User{}).Count(&count) - role := "user" + role := models.RoleUser if count == 0 { - role = "admin" // First user is admin + role = models.RoleAdmin } user := &models.User{ @@ -98,7 +98,7 @@ func (s *AuthService) GenerateToken(user *models.User) (string, error) { expirationTime := time.Now().Add(24 * time.Hour) claims := &Claims{ UserID: user.ID, - Role: user.Role, + Role: string(user.Role), SessionVersion: user.SessionVersion, RegisteredClaims: jwt.RegisteredClaims{ ExpiresAt: jwt.NewNumericDate(expirationTime), diff --git a/backend/internal/services/auth_service_test.go b/backend/internal/services/auth_service_test.go index fedc40016..2cca4daff 100644 --- a/backend/internal/services/auth_service_test.go +++ b/backend/internal/services/auth_service_test.go @@ -30,14 +30,14 @@ func TestAuthService_Register(t *testing.T) { // Test 1: First user should be admin admin, err := service.Register("admin@example.com", "password123", "Admin User") require.NoError(t, err) - assert.Equal(t, "admin", admin.Role) + assert.Equal(t, models.RoleAdmin, admin.Role) assert.NotEmpty(t, admin.PasswordHash) assert.NotEqual(t, "password123", admin.PasswordHash) // Test 2: Second user should be regular user user, err := service.Register("user@example.com", "password123", "Regular User") require.NoError(t, err) - assert.Equal(t, "user", user.Role) + assert.Equal(t, models.RoleUser, user.Role) } func TestAuthService_Login(t *testing.T) { @@ -300,7 +300,7 @@ func TestAuthService_AuthenticateToken_InvalidUserIDInClaims(t *testing.T) { claims := Claims{ UserID: user.ID + 9999, - Role: "user", + Role: string(models.RoleUser), SessionVersion: user.SessionVersion, RegisteredClaims: jwt.RegisteredClaims{ ExpiresAt: jwt.NewNumericDate(time.Now().Add(24 * time.Hour)), diff --git a/backend/internal/services/backup_service_rehydrate_test.go b/backend/internal/services/backup_service_rehydrate_test.go index 0034d940e..1308e1299 100644 --- a/backend/internal/services/backup_service_rehydrate_test.go +++ b/backend/internal/services/backup_service_rehydrate_test.go @@ -45,7 +45,7 @@ func TestBackupService_RehydrateLiveDatabase(t *testing.T) { UUID: uuid.NewString(), Email: "restore-user@example.com", Name: "Restore User", - Role: "user", + Role: models.RoleUser, Enabled: true, APIKey: uuid.NewString(), } @@ -87,7 +87,7 @@ func TestBackupService_RehydrateLiveDatabase_FromBackupWithWAL(t *testing.T) { UUID: uuid.NewString(), Email: "restore-from-wal@example.com", Name: "Restore From WAL", - Role: "user", + Role: models.RoleUser, Enabled: true, APIKey: uuid.NewString(), } diff --git a/docs/plans/archived_docker-socket-group-spec.md b/docs/plans/archive/archived_docker-socket-group-spec.md similarity index 100% rename from docs/plans/archived_docker-socket-group-spec.md rename to docs/plans/archive/archived_docker-socket-group-spec.md diff --git a/docs/plans/archive/uptime_regression_spec.md b/docs/plans/archive/uptime_regression_spec.md new file mode 100644 index 000000000..a69a91c16 --- /dev/null +++ b/docs/plans/archive/uptime_regression_spec.md @@ -0,0 +1,362 @@ +# Uptime Monitoring Regression Investigation (Scheduled vs Manual) + +Date: 2026-03-01 +Owner: Planning Agent +Status: Investigation Complete, Fix Plan Proposed +Severity: High (false DOWN states on automated monitoring) + +## 1. Executive Summary + +Two services (Wizarr and Charon) can flip to `DOWN` during scheduled cycles while manual checks immediately return `UP` because scheduled checks use a host-level TCP gate that can short-circuit monitor-level HTTP checks. + +The scheduled path is: +- `ticker -> CheckAll -> checkAllHosts -> (host status down) -> markHostMonitorsDown` + +The manual path is: +- `POST /api/v1/uptime/monitors/:id/check -> CheckMonitor -> checkMonitor` + +Only the scheduled path runs host precheck gating. If host precheck fails (TCP to upstream host/port), `CheckAll` skips HTTP checks and forcibly writes monitor status to `down` with heartbeat message `Host unreachable`. + +This is a backend state mutation problem (not only UI rendering). + +## 1.1 Monitoring Policy (Authoritative Behavior) + +Charon uptime monitoring SHALL follow URL-truth semantics for HTTP/HTTPS monitors, +matching third-party external monitor behavior (Uptime Kuma style) without requiring +any additional service. + +Policy: +- HTTP/HTTPS monitors are URL-truth based. The monitor result is authoritative based + on the configured URL check outcome (status code/timeout/TLS/connectivity from URL + perspective). +- Internal TCP reachability precheck (`ForwardHost:ForwardPort`) is + non-authoritative for HTTP/HTTPS monitor status. +- TCP monitors remain endpoint-socket checks and may rely on direct socket + reachability semantics. +- Host precheck may still be used for optimization, grouping telemetry, and operator + diagnostics, but SHALL NOT force HTTP/HTTPS monitors to DOWN. + +## 2. Research Findings + +### 2.1 Execution Path Comparison (Required) + +### Scheduled path behavior +- Entry: `backend/internal/api/routes/routes.go` (background ticker, calls `uptimeService.CheckAll()`) +- `CheckAll()` calls `checkAllHosts()` first. + - File: `backend/internal/services/uptime_service.go:354` +- `checkAllHosts()` updates each `UptimeHost.Status` via TCP checks in `checkHost()`. + - File: `backend/internal/services/uptime_service.go:395` +- `checkHost()` dials `UptimeHost.Host` + monitor port (prefer `ProxyHost.ForwardPort`, fallback to URL port). + - File: `backend/internal/services/uptime_service.go:437` +- Back in `CheckAll()`, monitors are grouped by `UptimeHostID`. + - File: `backend/internal/services/uptime_service.go:367` +- If `UptimeHost.Status == "down"`, `markHostMonitorsDown()` is called and individual monitor checks are skipped. + - File: `backend/internal/services/uptime_service.go:381` + - File: `backend/internal/services/uptime_service.go:593` + +### Manual path behavior +- Entry: `POST /api/v1/uptime/monitors/:id/check`. + - Handler: `backend/internal/api/handlers/uptime_handler.go:107` +- Calls `service.CheckMonitor(*monitor)` asynchronously. + - File: `backend/internal/services/uptime_service.go:707` +- `checkMonitor()` performs direct HTTP/TCP monitor check and updates monitor + heartbeat. + - File: `backend/internal/services/uptime_service.go:711` + +### Key divergence +- Scheduled: host-gated (precheck can override monitor) +- Manual: direct monitor check (no host gate) + +## 3. Root Cause With Evidence + +## 3.1 Primary Root Cause: Host Precheck Overrides HTTP Success in Scheduled Cycles + +When `UptimeHost` is marked `down`, scheduled checks do not run `checkMonitor()` for that host's monitors. Instead they call `markHostMonitorsDown()` which: +- sets each monitor `Status = "down"` +- writes `UptimeHeartbeat{Status: "down", Message: "Host unreachable"}` +- maxes failure count (`FailureCount = MaxRetries`) + +Evidence: +- Short-circuit: `backend/internal/services/uptime_service.go:381` +- Forced down write: `backend/internal/services/uptime_service.go:610` +- Forced heartbeat message: `backend/internal/services/uptime_service.go:624` + +This exactly matches symptom pattern: +1. Manual refresh sets monitor `UP` via direct HTTP check. +2. Next scheduler cycle can force it back to `DOWN` from host precheck path. + +## 3.2 Hypothesis Check: TCP precheck can fail while public URL HTTP check succeeds + +Confirmed as plausible by design: +- `checkHost()` tests upstream reachability (`ForwardHost:ForwardPort`) from Charon runtime. +- `checkMonitor()` tests monitor URL (public domain URL, often via Caddy/public routing). + +A service can be publicly reachable by monitor URL while upstream TCP precheck fails due to network namespace/routing/DNS/hairpin differences. + +This is especially likely for: +- self-referential routes (Charon monitoring Charon via public hostname) +- host/container networking asymmetry +- services reachable through proxy path but not directly on upstream socket from current runtime context + +## 3.3 Recent Change Correlation (Required) + +### `SyncAndCheckForHost` (regression amplifier) +- Introduced in commit `2cd19d89` and called from proxy host create path. +- Files: + - `backend/internal/services/uptime_service.go:1195` + - `backend/internal/api/handlers/proxy_host_handler.go:418` +- Behavior: creates/syncs monitor and immediately runs `checkMonitor()`. + +Impact: makes monitors quickly show `UP` after create/manual, then scheduler can flip to `DOWN` if host precheck fails. This increased visibility of scheduled/manual inconsistency. + +### `CleanupStaleFailureCounts` +- Introduced in `2cd19d89`, refined in `7a12ab79`. +- File: `backend/internal/services/uptime_service.go:1277` +- It runs at startup and resets stale monitor states only; not per-cycle override logic. +- Not root cause of recurring per-cycle flip. + +### Frontend effective status changes +- Latest commit `0241de69` refactors `effectiveStatus` handling. +- File: `frontend/src/pages/Uptime.tsx`. +- Backend evidence proves this is not visual-only: scheduler writes `down` heartbeats/messages directly in DB. + +## 3.4 Grouping Logic Analysis (`UptimeHost`/`UpstreamHost`) + +Monitors are grouped by `UptimeHostID` in `CheckAll()`. `UptimeHost` is derived from `ProxyHost.ForwardHost` in sync flows. + +Relevant code: +- group map by `UptimeHostID`: `backend/internal/services/uptime_service.go:367` +- host linkage in sync: `backend/internal/services/uptime_service.go:189`, `backend/internal/services/uptime_service.go:226` +- sync single-host update path: `backend/internal/services/uptime_service.go:1023` + +Risk: one host precheck failure can mark all grouped monitors down without URL-level validation. + +## 4. Technical Specification (Fix Plan) + +## 4.1 Minimal Proper Fix (First) + +Goal: eliminate false DOWN while preserving existing behavior as much as possible. + +Change `CheckAll()` host-down branch to avoid hard override for HTTP/HTTPS monitors. + +Mandatory hotfix rule: +- WHEN a host precheck is `down`, THE SYSTEM SHALL partition host monitors by type inside `CheckAll()`. +- `markHostMonitorsDown` MUST be invoked only for `tcp` monitors. +- `http`/`https` monitors MUST still run through `checkMonitor()` and MUST NOT be force-written `down` by the host precheck path. +- Host precheck outcomes MAY be recorded for optimization/telemetry/grouping, but MUST NOT be treated as final status for `http`/`https` monitors. + +Proposed rule: +1. If host is down: + - For `http`/`https` monitors: still run `checkMonitor()` (do not force down). + - For `tcp` monitors: keep current host-down fast-path (`markHostMonitorsDown`) or direct tcp check. +2. If host is not down: + - Keep existing behavior (run `checkMonitor()` for all monitors). + +Rationale: +- Aligns scheduled behavior with manual for URL-based monitors. +- Preserves reverse proxy product semantics where public URL availability is the source of truth. +- Minimal code delta in `CheckAll()` decision branch. +- Preserves optimization for true TCP-only monitors. + +### Exact file/function targets +- `backend/internal/services/uptime_service.go` + - `CheckAll()` + - add small helper (optional): `partitionMonitorsByType(...)` + +## 4.2 Long-Term Robust Fix (Deferred) + +Introduce host precheck as advisory signal, not authoritative override. + +Design: +1. Add `HostReachability` result to run context (not persisted as forced monitor status). +2. Always execute per-monitor checks, but use host precheck to: + - tune retries/backoff + - annotate failure reason + - optimize notification batching +3. Optionally add feature flag: + - `feature.uptime.strict_host_precheck` (default `false`) + - allows legacy strict gating in environments that want it. + +Benefits: +- Removes false DOWN caused by precheck mismatch. +- Keeps performance and batching controls. +- More explicit semantics for operators. + +## 5. API/Schema Impact + +No API contract change required for minimal fix. +No database migration required for minimal fix. + +Long-term fix may add one feature flag setting only. + +## 6. EARS Requirements + +### Ubiquitous +- THE SYSTEM SHALL evaluate HTTP/HTTPS monitor availability using URL-level checks as the authoritative signal. + +### Event-driven +- WHEN the scheduled uptime cycle runs, THE SYSTEM SHALL execute HTTP/HTTPS monitor checks regardless of internal host precheck state. +- WHEN the scheduled uptime cycle runs and host precheck is down, THE SYSTEM SHALL apply host-level forced-down logic only to TCP monitors. + +### State-driven +- WHILE a monitor type is `http` or `https`, THE SYSTEM SHALL NOT force monitor status to `down` solely from internal host precheck failure. +- WHILE a monitor type is `tcp`, THE SYSTEM SHALL evaluate status using endpoint socket reachability semantics. + +### Unwanted behavior +- IF internal host precheck is unreachable AND URL-level HTTP/HTTPS check returns success, THEN THE SYSTEM SHALL set monitor status to `up`. +- IF internal host precheck is reachable AND URL-level HTTP/HTTPS check fails, THEN THE SYSTEM SHALL set monitor status to `down`. + +### Optional +- WHERE host precheck telemetry is enabled, THE SYSTEM SHALL record host-level reachability for diagnostics and grouping without overriding HTTP/HTTPS monitor final state. + +## 7. Implementation Plan + +### Phase 1: Reproduction Lock-In (Tests First) +- Add backend service test proving current regression: + - host precheck fails + - monitor URL check would succeed + - scheduled `CheckAll()` currently writes down (existing behavior) +- File: `backend/internal/services/uptime_service_test.go` (new test block) + +### Phase 2: Minimal Backend Fix +- Update `CheckAll()` branch logic to run HTTP/HTTPS monitors even when host is down. +- Make monitor partitioning explicit and mandatory in `CheckAll()` host-down branch. +- Add an implementation guard before partitioning: normalize monitor type using + `strings.TrimSpace` + `strings.ToLower` to prevent `HTTP`/`HTTPS` case + regressions and whitespace-related misclassification. +- Ensure `markHostMonitorsDown` is called only for TCP monitor partitions. +- File: `backend/internal/services/uptime_service.go` + +### Phase 3: Backend Validation +- Add/adjust tests: + - scheduled path no longer forces down when HTTP succeeds + - manual and scheduled reach same final state for HTTP monitors + - internal host unreachable + public URL HTTP 200 => monitor is `UP` + - internal host reachable + public URL failure => monitor is `DOWN` + - TCP monitor behavior unchanged under host-down conditions +- Files: + - `backend/internal/services/uptime_service_test.go` + - `backend/internal/services/uptime_service_race_test.go` (if needed for concurrency side-effects) + +### Phase 4: Integration/E2E Coverage +- Add targeted API-level integration test for scheduler vs manual parity. +- Add Playwright scenario for: + - monitor set UP by manual check + - remains UP after scheduled cycle when URL is reachable +- Add parity scenario for: + - internal TCP precheck unreachable + URL returns 200 => `UP` + - internal TCP precheck reachable + URL failure => `DOWN` +- Files: + - `backend/internal/api/routes/routes_test.go` (or uptime handler integration suite) + - `tests/monitoring/uptime-monitoring.spec.ts` (or equivalent uptime spec file) + +Scope note: +- This hotfix plan is intentionally limited to backend behavior correction and + regression tests (unit/integration/E2E). +- Dedicated documentation-phase work is deferred and out of scope for this + hotfix PR. + +## 8. Test Plan (Unit / Integration / E2E) + +Duplicate notification definition (hotfix acceptance/testing): +- A duplicate notification means the same `(monitor_id, status, + scheduler_tick_id)` is emitted more than once within a single scheduler run. + +## Unit Tests +1. `CheckAll_HostDown_DoesNotForceDown_HTTPMonitor_WhenHTTPCheckSucceeds` +2. `CheckAll_HostDown_StillHandles_TCPMonitor_Conservatively` +3. `CheckAll_ManualAndScheduledParity_HTTPMonitor` +4. `CheckAll_InternalHostUnreachable_PublicURL200_HTTPMonitorEndsUp` (blocking) +5. `CheckAll_InternalHostReachable_PublicURLFail_HTTPMonitorEndsDown` (blocking) + +## Integration Tests +1. Scheduler endpoint (`/api/v1/system/uptime/check`) parity with monitor check endpoint. +2. Verify DB heartbeat message is real HTTP result (not `Host unreachable`) for HTTP monitors where URL is reachable. +3. Verify when host precheck is down, HTTP monitor heartbeat/notification output is derived from `checkMonitor()` (not synthetic host-path `Host unreachable`). +4. Verify no duplicate notifications are emitted from host+monitor paths for the same scheduler run, where duplicate is defined as repeated `(monitor_id, status, scheduler_tick_id)`. +5. Verify internal host precheck unreachable + public URL 200 still resolves monitor `UP`. +6. Verify internal host precheck reachable + public URL failure resolves monitor `DOWN`. + +## E2E Tests +1. Create/sync monitor scenario where manual refresh returns `UP`. +2. Wait one scheduler interval. +3. Assert monitor remains `UP` and latest heartbeat is not forced `Host unreachable` for reachable URL. +4. Assert scenario: internal host precheck unreachable + public URL 200 => monitor remains `UP`. +5. Assert scenario: internal host precheck reachable + public URL failure => monitor is `DOWN`. + +## Regression Guardrails +- Add a test explicitly asserting that host precheck must not unconditionally override HTTP monitor checks. +- Add explicit assertions that HTTP monitors under host-down precheck emit + check-derived heartbeat messages and do not produce duplicate notifications + under the `(monitor_id, status, scheduler_tick_id)` rule within a single + scheduler run. + +## 9. Risks and Rollback + +## Risks +1. More HTTP checks under true host outage may increase check volume. +2. Notification patterns may shift from single host-level event to monitor-level batched events. +3. Edge cases for mixed-type monitor groups (HTTP + TCP) need deterministic behavior. + +## Mitigations +1. Preserve batching (`queueDownNotification`) and existing retry thresholds. +2. Keep TCP strict path unchanged in minimal fix. +3. Add explicit log fields and targeted tests for mixed groups. + +## Rollback Plan +1. Revert the `CheckAll()` branch change only (single-file rollback). +2. Keep added tests; mark expected behavior as legacy if temporary rollback needed. +3. If necessary, introduce temporary feature toggle to switch between strict and tolerant host gating. + +## 10. PR Slicing Strategy + +Decision: Single focused PR (hotfix + tests) + +Trigger reasons: +- High-severity runtime behavior fix requiring minimal blast radius +- Fast review/rollback with behavior-only delta plus regression coverage +- Avoid scope creep into optional hardening/feature-flag work + +### PR-1 (Hotfix + Tests) +Scope: +- `CheckAll()` host-down branch adjustment for HTTP/HTTPS +- Unit/integration/E2E regression tests for URL-truth semantics + +Files: +- `backend/internal/services/uptime_service.go` +- `backend/internal/services/uptime_service_test.go` +- `backend/internal/api/routes/routes_test.go` (or equivalent) +- `tests/monitoring/uptime-monitoring.spec.ts` (or equivalent) + +Validation gates: +- backend unit tests pass +- targeted uptime integration tests pass +- targeted uptime E2E tests pass +- no behavior regression in existing `CheckAll` tests + +Rollback: +- single revert of PR-1 commit + +## 11. Acceptance Criteria (DoD) + +1. Scheduled and manual checks produce consistent status for HTTP/HTTPS monitors. +2. A reachable monitor URL is not forced to `DOWN` solely by host precheck failure. +3. New regression tests fail before fix and pass after fix. +4. No break in TCP monitor behavior expectations. +5. No new critical/high security findings in touched paths. +6. Blocking parity case passes: internal host precheck unreachable + public URL 200 => scheduled result is `UP`. +7. Blocking parity case passes: internal host precheck reachable + public URL failure => scheduled result is `DOWN`. +8. Under host-down precheck, HTTP monitors produce check-derived heartbeat messages (not synthetic `Host unreachable` from host path). +9. No duplicate notifications are produced by host+monitor paths within a + single scheduler run, where duplicate is defined as repeated + `(monitor_id, status, scheduler_tick_id)`. + +## 12. Implementation Risks + +1. Increased scheduler workload during host-precheck failures because HTTP/HTTPS checks continue to run. +2. Notification cadence may change due to check-derived monitor outcomes replacing host-forced synthetic downs. +3. Mixed monitor groups (TCP + HTTP/HTTPS) require strict ordering/partitioning to avoid regression. + +Mitigations: +- Keep change localized to `CheckAll()` host-down branch decisioning. +- Add explicit regression tests for both parity directions and mixed monitor types. +- Keep rollback path as single-commit revert. diff --git a/docs/plans/current_spec.md b/docs/plans/current_spec.md index a69a91c16..490a4c5f5 100644 --- a/docs/plans/current_spec.md +++ b/docs/plans/current_spec.md @@ -1,362 +1,981 @@ -# Uptime Monitoring Regression Investigation (Scheduled vs Manual) +# User Management Consolidation & Privilege Tier System -Date: 2026-03-01 +Date: 2026-06-25 Owner: Planning Agent -Status: Investigation Complete, Fix Plan Proposed -Severity: High (false DOWN states on automated monitoring) +Status: Proposed +Severity: Medium (UX consolidation + feature enhancement) -## 1. Executive Summary +--- -Two services (Wizarr and Charon) can flip to `DOWN` during scheduled cycles while manual checks immediately return `UP` because scheduled checks use a host-level TCP gate that can short-circuit monitor-level HTTP checks. +## 1. Introduction -The scheduled path is: -- `ticker -> CheckAll -> checkAllHosts -> (host status down) -> markHostMonitorsDown` +### 1.1 Overview -The manual path is: -- `POST /api/v1/uptime/monitors/:id/check -> CheckMonitor -> checkMonitor` +Charon currently manages users through **two separate interfaces**: -Only the scheduled path runs host precheck gating. If host precheck fails (TCP to upstream host/port), `CheckAll` skips HTTP checks and forcibly writes monitor status to `down` with heartbeat message `Host unreachable`. +1. **Admin Account** page at `/settings/account` — self-service profile management (name, email, password, API key, certificate email) +2. **Account Management** page at `/settings/account-management` — admin-only user CRUD (list, invite, delete, update roles/permissions) -This is a backend state mutation problem (not only UI rendering). +This split is confusing and redundant. A logged-in admin must navigate to two different places to manage their own account versus managing other users. The system also lacks a formal privilege tier model — the `Role` field on the `User` model is a raw string defaulting to `"user"`, with only `"admin"` and `"user"` exposed in the frontend selector (the model comment mentions `"viewer"` but it is entirely unused). -## 1.1 Monitoring Policy (Authoritative Behavior) +### 1.2 Objectives -Charon uptime monitoring SHALL follow URL-truth semantics for HTTP/HTTPS monitors, -matching third-party external monitor behavior (Uptime Kuma style) without requiring -any additional service. +1. **Consolidate** user management into a single **"Users"** page that lists ALL users, including the admin. +2. **Introduce a formal privilege tier system** with three tiers: **Admin**, **User**, and **Pass-through**. +3. **Self-service profile editing** occurs inline or via a detail drawer/modal on the consolidated page — no separate "Admin Account" page. +4. **Remove** the `/settings/account` route and its sidebar/tab navigation entry entirely. +5. **Update navigation** to a single "Users" entry under Settings (or as a top-level item). +6. **Write E2E Playwright tests** for the consolidated page before any implementation changes. -Policy: -- HTTP/HTTPS monitors are URL-truth based. The monitor result is authoritative based - on the configured URL check outcome (status code/timeout/TLS/connectivity from URL - perspective). -- Internal TCP reachability precheck (`ForwardHost:ForwardPort`) is - non-authoritative for HTTP/HTTPS monitor status. -- TCP monitors remain endpoint-socket checks and may rely on direct socket - reachability semantics. -- Host precheck may still be used for optimization, grouping telemetry, and operator - diagnostics, but SHALL NOT force HTTP/HTTPS monitors to DOWN. +--- ## 2. Research Findings -### 2.1 Execution Path Comparison (Required) +### 2.1 Current Architecture: Backend + +#### 2.1.1 User Model — `backend/internal/models/user.go` + +```go +type User struct { + ID uint `json:"-" gorm:"primaryKey"` + UUID string `json:"uuid" gorm:"uniqueIndex"` + Email string `json:"email" gorm:"uniqueIndex"` + APIKey string `json:"-" gorm:"uniqueIndex"` + PasswordHash string `json:"-"` + Name string `json:"name"` + Role string `json:"role" gorm:"default:'user'"` // "admin", "user", "viewer" + Enabled bool `json:"enabled" gorm:"default:true"` + FailedLoginAttempts int `json:"-" gorm:"default:0"` + LockedUntil *time.Time `json:"-"` + LastLogin *time.Time `json:"last_login,omitempty"` + SessionVersion uint `json:"-" gorm:"default:0"` + // Invite system fields ... + PermissionMode PermissionMode `json:"permission_mode" gorm:"default:'allow_all'"` + PermittedHosts []ProxyHost `json:"permitted_hosts,omitempty" gorm:"many2many:user_permitted_hosts;"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} +``` + +**Key observations:** + +| Aspect | Current State | Issue | +|--------|--------------|-------| +| `Role` field | Raw string, default `"user"` | No Go-level enum/const; `"viewer"` mentioned in comment but unused anywhere | +| Permission logic | `CanAccessHost()` — admins bypass all checks | Sound; needs extension for pass-through tier | +| JSON serialization | `ID` is `json:"-"`, `APIKey` is `json:"-"` | Correctly hidden from API responses | +| Password hashing | bcrypt via `SetPassword()` / `CheckPassword()` | Solid | + +#### 2.1.2 Auth Service — `backend/internal/services/auth_service.go` + +- `Register()`: First user becomes admin (`role = "admin"`) +- `Login()`: Account lockout after 5 failed attempts; sets `LastLogin` +- `GenerateToken()`: JWT with claims `{UserID, Role, SessionVersion}`, 24h expiry +- `AuthenticateToken()`: Validates JWT + checks `Enabled` + matches `SessionVersion` +- `InvalidateSessions()`: Increments `SessionVersion` to revoke all existing tokens + +**Impact of role changes:** The `Role` string is embedded in the JWT. When a user's role is changed, their existing JWT still carries the old role until it expires or `SessionVersion` is bumped. The `InvalidateSessions()` method exists for this purpose, **but it is NOT currently called in `UpdateUser()`**. This is a security gap — a user demoted from admin to passthrough retains their admin JWT for up to 24 hours. Task 2.6 addresses this. + +#### 2.1.3 Auth Middleware — `backend/internal/api/middleware/auth.go` + +```go +func AuthMiddleware(authService *services.AuthService) gin.HandlerFunc +func RequireRole(role string) gin.HandlerFunc +``` + +- `AuthMiddleware`: Extracts token from header/cookie/query, sets `userID` and `role` in Gin context. +- `RequireRole`: Checks `role` against a **single** required role string, always allows `"admin"` as fallback. Currently used only for SMTP settings. + +**Gap:** Most admin-only endpoints in `user_handler.go` do inline `role != "admin"` checks via `requireAdmin(c)` in `permission_helpers.go` rather than using middleware. This works but is inconsistent. + +#### 2.1.4 User Handler — `backend/internal/api/handlers/user_handler.go` + +Two disjoint groups of endpoints: + +| Group | Endpoints | Purpose | Auth | +|-------|-----------|---------|------| +| **Self-service** | `GET /user/profile`, `POST /user/profile`, `POST /user/api-key` | Current user edits own profile/API key | Any authenticated user | +| **Admin CRUD** | `GET /users`, `POST /users`, `POST /users/invite`, `GET /users/:id`, `PUT /users/:id`, `DELETE /users/:id`, `PUT /users/:id/permissions`, `POST /users/:id/resend-invite`, `POST /users/preview-invite-url` | Admin manages all users | Admin only (inline check) | + +**Key handler behaviors:** + +- `ListUsers()`: Returns all users with safe fields (excludes password hash, API key). Admin only. +- `UpdateUser()`: Can change `name`, `email`, `password`, `role`, `enabled`. **Does NOT call `InvalidateSessions()` on role change** — a demoted user retains their old-role JWT for up to 24 hours. This is a security gap that must be fixed in this spec (see Task 2.6). +- `DeleteUser()`: Prevents self-deletion. +- `GetProfile()`: Returns `{id, email, name, role, has_api_key, api_key_masked}` for the calling user. +- `UpdateProfile()`: Requires `current_password` verification when email changes. +- `RegenerateAPIKey()`: Generates new UUID-based API key for the calling user. + +#### 2.1.5 Route Registration — `backend/internal/api/routes/routes.go` + +``` +Public: + POST /api/v1/auth/login + POST /api/v1/auth/register + GET /api/v1/auth/verify (forward auth for Caddy) + GET /api/v1/auth/status + GET /api/v1/setup/status + POST /api/v1/setup + GET /api/v1/invite/validate + POST /api/v1/invite/accept + +Protected (authMiddleware): + POST /api/v1/auth/logout + POST /api/v1/auth/refresh + GET /api/v1/auth/me + POST /api/v1/auth/change-password + GET /api/v1/user/profile ← Self-service group + POST /api/v1/user/profile ← + POST /api/v1/user/api-key ← + GET /api/v1/users ← Admin CRUD group + POST /api/v1/users ← + POST /api/v1/users/invite ← + POST /api/v1/users/preview-invite-url ← + GET /api/v1/users/:id ← + PUT /api/v1/users/:id ← + DELETE /api/v1/users/:id ← + PUT /api/v1/users/:id/permissions ← + POST /api/v1/users/:id/resend-invite ← +``` + +#### 2.1.6 Forward Auth — `backend/internal/api/handlers/auth_handler.go` + +The `Verify()` handler at `GET /api/v1/auth/verify` is the critical integration point for the pass-through tier: + +1. Extracts token from cookie/header/query +2. Authenticates user via `AuthenticateToken()` +3. Reads `X-Forwarded-Host` from Caddy +4. Checks `user.CanAccessHost(hostID)` against the permission model +5. Returns `200 OK` with `X-Auth-User` headers or `401`/`403` + +**Pass-through users will use this exact path** — they log in to Charon only to establish a session, then Caddy's `forward_auth` directive validates their access to proxied services. They never need to see the Charon management UI. + +### 2.2 Current Architecture: Frontend + +#### 2.2.1 Router — `frontend/src/App.tsx` + +```tsx +// Three routes serve user management: +} /> // top-level +}> + } /> // self-service + } /> // admin CRUD + +``` -### Scheduled path behavior -- Entry: `backend/internal/api/routes/routes.go` (background ticker, calls `uptimeService.CheckAll()`) -- `CheckAll()` calls `checkAllHosts()` first. - - File: `backend/internal/services/uptime_service.go:354` -- `checkAllHosts()` updates each `UptimeHost.Status` via TCP checks in `checkHost()`. - - File: `backend/internal/services/uptime_service.go:395` -- `checkHost()` dials `UptimeHost.Host` + monitor port (prefer `ProxyHost.ForwardPort`, fallback to URL port). - - File: `backend/internal/services/uptime_service.go:437` -- Back in `CheckAll()`, monitors are grouped by `UptimeHostID`. - - File: `backend/internal/services/uptime_service.go:367` -- If `UptimeHost.Status == "down"`, `markHostMonitorsDown()` is called and individual monitor checks are skipped. - - File: `backend/internal/services/uptime_service.go:381` - - File: `backend/internal/services/uptime_service.go:593` +The `UsersPage` component is mounted at **two** routes: `/users` and `/settings/account-management`. The `Account` component is only at `/settings/account`. -### Manual path behavior -- Entry: `POST /api/v1/uptime/monitors/:id/check`. - - Handler: `backend/internal/api/handlers/uptime_handler.go:107` -- Calls `service.CheckMonitor(*monitor)` asynchronously. - - File: `backend/internal/services/uptime_service.go:707` -- `checkMonitor()` performs direct HTTP/TCP monitor check and updates monitor + heartbeat. - - File: `backend/internal/services/uptime_service.go:711` +#### 2.2.2 Settings Page — `frontend/src/pages/Settings.tsx` -### Key divergence -- Scheduled: host-gated (precheck can override monitor) -- Manual: direct monitor check (no host gate) +Tab bar with 4 items: `System`, `Notifications`, `SMTP`, `Account`. Note that **Account Management is NOT in the tab bar** — it is only reachable via the sidebar. The `Account` tab links to `/settings/account` (the self-service profile page). + +#### 2.2.3 Sidebar Navigation — `frontend/src/components/Layout.tsx` -## 3. Root Cause With Evidence +Under the Settings section, two entries: -## 3.1 Primary Root Cause: Host Precheck Overrides HTTP Success in Scheduled Cycles +```tsx +{ name: t('navigation.adminAccount'), path: '/settings/account', icon: '🛡️' }, +{ name: t('navigation.accountManagement'), path: '/settings/account-management', icon: '👥' }, +``` -When `UptimeHost` is marked `down`, scheduled checks do not run `checkMonitor()` for that host's monitors. Instead they call `markHostMonitorsDown()` which: -- sets each monitor `Status = "down"` -- writes `UptimeHeartbeat{Status: "down", Message: "Host unreachable"}` -- maxes failure count (`FailureCount = MaxRetries`) - -Evidence: -- Short-circuit: `backend/internal/services/uptime_service.go:381` -- Forced down write: `backend/internal/services/uptime_service.go:610` -- Forced heartbeat message: `backend/internal/services/uptime_service.go:624` - -This exactly matches symptom pattern: -1. Manual refresh sets monitor `UP` via direct HTTP check. -2. Next scheduler cycle can force it back to `DOWN` from host precheck path. +#### 2.2.4 Account Page — `frontend/src/pages/Account.tsx` -## 3.2 Hypothesis Check: TCP precheck can fail while public URL HTTP check succeeds +Four cards: +1. **Profile** — name, email (with password verification for email changes) +2. **Certificate Email** — toggle between account email and custom email for ACME +3. **Password Change** — old/new/confirm password form +4. **API Key** — masked display + regenerate button -Confirmed as plausible by design: -- `checkHost()` tests upstream reachability (`ForwardHost:ForwardPort`) from Charon runtime. -- `checkMonitor()` tests monitor URL (public domain URL, often via Caddy/public routing). +Uses `api/user.ts` (singular) — endpoints: `GET /user/profile`, `POST /user/profile`, `POST /user/api-key`. -A service can be publicly reachable by monitor URL while upstream TCP precheck fails due to network namespace/routing/DNS/hairpin differences. +#### 2.2.5 Users Page — `frontend/src/pages/UsersPage.tsx` -This is especially likely for: -- self-referential routes (Charon monitoring Charon via public hostname) -- host/container networking asymmetry -- services reachable through proxy path but not directly on upstream socket from current runtime context +~900 lines. Contains: +- `InviteModal` — email, role select (`user`/`admin`), permission mode, host selection, URL preview +- `PermissionsModal` — permission mode + host checkboxes per user +- `UsersPage` (default export) — table listing all users with columns: User (name/email), Role (badge), Status (active/pending/expired), Permissions (whitelist/blacklist), Enabled (toggle), Actions (resend/permissions/delete) + +**Current limitations:** +- Admin users cannot be disabled (switch is `disabled` when `role === 'admin'`) +- Admin users cannot be deleted (delete button is `disabled` when `role === 'admin'`) +- No permission editing for admins (gear icon hidden when `role !== 'admin'`) +- Role selector in `InviteModal` only has `user` and `admin` options +- No inline profile editing — admins can't edit their own name/email/password from this page +- The admin's own row appears in the list but has limited interactivity + +#### 2.2.6 API Client Files + +**`frontend/src/api/user.ts`** (singular — self-service): + +```tsx +interface UserProfile { id, email, name, role, has_api_key, api_key_masked } +getProfile() → GET /user/profile +updateProfile(data) → POST /user/profile +regenerateApiKey() → POST /user/api-key +``` -## 3.3 Recent Change Correlation (Required) +**`frontend/src/api/users.ts`** (plural — admin CRUD): -### `SyncAndCheckForHost` (regression amplifier) -- Introduced in commit `2cd19d89` and called from proxy host create path. -- Files: - - `backend/internal/services/uptime_service.go:1195` - - `backend/internal/api/handlers/proxy_host_handler.go:418` -- Behavior: creates/syncs monitor and immediately runs `checkMonitor()`. +```tsx +interface User { id, uuid, email, name, role, enabled, last_login, invite_status, ... } +type PermissionMode = 'allow_all' | 'deny_all' +listUsers() → GET /users +getUser(id) → GET /users/:id +createUser(data) → POST /users +inviteUser(data) → POST /users/invite +updateUser(id, data) → PUT /users/:id +deleteUser(id) → DELETE /users/:id +updateUserPermissions(id, d) → PUT /users/:id/permissions +validateInvite(token) → GET /invite/validate +acceptInvite(data) → POST /invite/accept +previewInviteURL(email) → POST /users/preview-invite-url +resendInvite(id) → POST /users/:id/resend-invite +``` -Impact: makes monitors quickly show `UP` after create/manual, then scheduler can flip to `DOWN` if host precheck fails. This increased visibility of scheduled/manual inconsistency. +#### 2.2.7 Auth Context — `frontend/src/context/AuthContextValue.ts` -### `CleanupStaleFailureCounts` -- Introduced in `2cd19d89`, refined in `7a12ab79`. -- File: `backend/internal/services/uptime_service.go:1277` -- It runs at startup and resets stale monitor states only; not per-cycle override logic. -- Not root cause of recurring per-cycle flip. +```tsx +interface User { user_id: number; role: string; name?: string; email?: string; } +interface AuthContextType { user, login, logout, changePassword, isAuthenticated, isLoading } +``` -### Frontend effective status changes -- Latest commit `0241de69` refactors `effectiveStatus` handling. -- File: `frontend/src/pages/Uptime.tsx`. -- Backend evidence proves this is not visual-only: scheduler writes `down` heartbeats/messages directly in DB. +The `AuthProvider` in `AuthContext.tsx` fetches `/api/v1/auth/me` on mount and stores the user. Auto-logout after 15 minutes of inactivity. -## 3.4 Grouping Logic Analysis (`UptimeHost`/`UpstreamHost`) +#### 2.2.8 Route Protection — `frontend/src/components/RequireAuth.tsx` -Monitors are grouped by `UptimeHostID` in `CheckAll()`. `UptimeHost` is derived from `ProxyHost.ForwardHost` in sync flows. +Checks `isAuthenticated`, localStorage token, and `user !== null`. Redirects to `/login` if any check fails. **No role-based route protection exists** — all authenticated users see the same navigation and routes. -Relevant code: -- group map by `UptimeHostID`: `backend/internal/services/uptime_service.go:367` -- host linkage in sync: `backend/internal/services/uptime_service.go:189`, `backend/internal/services/uptime_service.go:226` -- sync single-host update path: `backend/internal/services/uptime_service.go:1023` +#### 2.2.9 Translation Keys — `frontend/src/locales/en/translation.json` -Risk: one host precheck failure can mark all grouped monitors down without URL-level validation. - -## 4. Technical Specification (Fix Plan) - -## 4.1 Minimal Proper Fix (First) - -Goal: eliminate false DOWN while preserving existing behavior as much as possible. - -Change `CheckAll()` host-down branch to avoid hard override for HTTP/HTTPS monitors. - -Mandatory hotfix rule: -- WHEN a host precheck is `down`, THE SYSTEM SHALL partition host monitors by type inside `CheckAll()`. -- `markHostMonitorsDown` MUST be invoked only for `tcp` monitors. -- `http`/`https` monitors MUST still run through `checkMonitor()` and MUST NOT be force-written `down` by the host precheck path. -- Host precheck outcomes MAY be recorded for optimization/telemetry/grouping, but MUST NOT be treated as final status for `http`/`https` monitors. - -Proposed rule: -1. If host is down: - - For `http`/`https` monitors: still run `checkMonitor()` (do not force down). - - For `tcp` monitors: keep current host-down fast-path (`markHostMonitorsDown`) or direct tcp check. -2. If host is not down: - - Keep existing behavior (run `checkMonitor()` for all monitors). - -Rationale: -- Aligns scheduled behavior with manual for URL-based monitors. -- Preserves reverse proxy product semantics where public URL availability is the source of truth. -- Minimal code delta in `CheckAll()` decision branch. -- Preserves optimization for true TCP-only monitors. - -### Exact file/function targets -- `backend/internal/services/uptime_service.go` - - `CheckAll()` - - add small helper (optional): `partitionMonitorsByType(...)` - -## 4.2 Long-Term Robust Fix (Deferred) - -Introduce host precheck as advisory signal, not authoritative override. - -Design: -1. Add `HostReachability` result to run context (not persisted as forced monitor status). -2. Always execute per-monitor checks, but use host precheck to: - - tune retries/backoff - - annotate failure reason - - optimize notification batching -3. Optionally add feature flag: - - `feature.uptime.strict_host_precheck` (default `false`) - - allows legacy strict gating in environments that want it. - -Benefits: -- Removes false DOWN caused by precheck mismatch. -- Keeps performance and batching controls. -- More explicit semantics for operators. - -## 5. API/Schema Impact - -No API contract change required for minimal fix. -No database migration required for minimal fix. - -Long-term fix may add one feature flag setting only. - -## 6. EARS Requirements - -### Ubiquitous -- THE SYSTEM SHALL evaluate HTTP/HTTPS monitor availability using URL-level checks as the authoritative signal. - -### Event-driven -- WHEN the scheduled uptime cycle runs, THE SYSTEM SHALL execute HTTP/HTTPS monitor checks regardless of internal host precheck state. -- WHEN the scheduled uptime cycle runs and host precheck is down, THE SYSTEM SHALL apply host-level forced-down logic only to TCP monitors. - -### State-driven -- WHILE a monitor type is `http` or `https`, THE SYSTEM SHALL NOT force monitor status to `down` solely from internal host precheck failure. -- WHILE a monitor type is `tcp`, THE SYSTEM SHALL evaluate status using endpoint socket reachability semantics. - -### Unwanted behavior -- IF internal host precheck is unreachable AND URL-level HTTP/HTTPS check returns success, THEN THE SYSTEM SHALL set monitor status to `up`. -- IF internal host precheck is reachable AND URL-level HTTP/HTTPS check fails, THEN THE SYSTEM SHALL set monitor status to `down`. - -### Optional -- WHERE host precheck telemetry is enabled, THE SYSTEM SHALL record host-level reachability for diagnostics and grouping without overriding HTTP/HTTPS monitor final state. - -## 7. Implementation Plan - -### Phase 1: Reproduction Lock-In (Tests First) -- Add backend service test proving current regression: - - host precheck fails - - monitor URL check would succeed - - scheduled `CheckAll()` currently writes down (existing behavior) -- File: `backend/internal/services/uptime_service_test.go` (new test block) - -### Phase 2: Minimal Backend Fix -- Update `CheckAll()` branch logic to run HTTP/HTTPS monitors even when host is down. -- Make monitor partitioning explicit and mandatory in `CheckAll()` host-down branch. -- Add an implementation guard before partitioning: normalize monitor type using - `strings.TrimSpace` + `strings.ToLower` to prevent `HTTP`/`HTTPS` case - regressions and whitespace-related misclassification. -- Ensure `markHostMonitorsDown` is called only for TCP monitor partitions. -- File: `backend/internal/services/uptime_service.go` - -### Phase 3: Backend Validation -- Add/adjust tests: - - scheduled path no longer forces down when HTTP succeeds - - manual and scheduled reach same final state for HTTP monitors - - internal host unreachable + public URL HTTP 200 => monitor is `UP` - - internal host reachable + public URL failure => monitor is `DOWN` - - TCP monitor behavior unchanged under host-down conditions -- Files: - - `backend/internal/services/uptime_service_test.go` - - `backend/internal/services/uptime_service_race_test.go` (if needed for concurrency side-effects) - -### Phase 4: Integration/E2E Coverage -- Add targeted API-level integration test for scheduler vs manual parity. -- Add Playwright scenario for: - - monitor set UP by manual check - - remains UP after scheduled cycle when URL is reachable -- Add parity scenario for: - - internal TCP precheck unreachable + URL returns 200 => `UP` - - internal TCP precheck reachable + URL failure => `DOWN` -- Files: - - `backend/internal/api/routes/routes_test.go` (or uptime handler integration suite) - - `tests/monitoring/uptime-monitoring.spec.ts` (or equivalent uptime spec file) - -Scope note: -- This hotfix plan is intentionally limited to backend behavior correction and - regression tests (unit/integration/E2E). -- Dedicated documentation-phase work is deferred and out of scope for this - hotfix PR. - -## 8. Test Plan (Unit / Integration / E2E) - -Duplicate notification definition (hotfix acceptance/testing): -- A duplicate notification means the same `(monitor_id, status, - scheduler_tick_id)` is emitted more than once within a single scheduler run. - -## Unit Tests -1. `CheckAll_HostDown_DoesNotForceDown_HTTPMonitor_WhenHTTPCheckSucceeds` -2. `CheckAll_HostDown_StillHandles_TCPMonitor_Conservatively` -3. `CheckAll_ManualAndScheduledParity_HTTPMonitor` -4. `CheckAll_InternalHostUnreachable_PublicURL200_HTTPMonitorEndsUp` (blocking) -5. `CheckAll_InternalHostReachable_PublicURLFail_HTTPMonitorEndsDown` (blocking) - -## Integration Tests -1. Scheduler endpoint (`/api/v1/system/uptime/check`) parity with monitor check endpoint. -2. Verify DB heartbeat message is real HTTP result (not `Host unreachable`) for HTTP monitors where URL is reachable. -3. Verify when host precheck is down, HTTP monitor heartbeat/notification output is derived from `checkMonitor()` (not synthetic host-path `Host unreachable`). -4. Verify no duplicate notifications are emitted from host+monitor paths for the same scheduler run, where duplicate is defined as repeated `(monitor_id, status, scheduler_tick_id)`. -5. Verify internal host precheck unreachable + public URL 200 still resolves monitor `UP`. -6. Verify internal host precheck reachable + public URL failure resolves monitor `DOWN`. - -## E2E Tests -1. Create/sync monitor scenario where manual refresh returns `UP`. -2. Wait one scheduler interval. -3. Assert monitor remains `UP` and latest heartbeat is not forced `Host unreachable` for reachable URL. -4. Assert scenario: internal host precheck unreachable + public URL 200 => monitor remains `UP`. -5. Assert scenario: internal host precheck reachable + public URL failure => monitor is `DOWN`. - -## Regression Guardrails -- Add a test explicitly asserting that host precheck must not unconditionally override HTTP monitor checks. -- Add explicit assertions that HTTP monitors under host-down precheck emit - check-derived heartbeat messages and do not produce duplicate notifications - under the `(monitor_id, status, scheduler_tick_id)` rule within a single - scheduler run. - -## 9. Risks and Rollback - -## Risks -1. More HTTP checks under true host outage may increase check volume. -2. Notification patterns may shift from single host-level event to monitor-level batched events. -3. Edge cases for mixed-type monitor groups (HTTP + TCP) need deterministic behavior. - -## Mitigations -1. Preserve batching (`queueDownNotification`) and existing retry thresholds. -2. Keep TCP strict path unchanged in minimal fix. -3. Add explicit log fields and targeted tests for mixed groups. - -## Rollback Plan -1. Revert the `CheckAll()` branch change only (single-file rollback). -2. Keep added tests; mark expected behavior as legacy if temporary rollback needed. -3. If necessary, introduce temporary feature toggle to switch between strict and tolerant host gating. - -## 10. PR Slicing Strategy - -Decision: Single focused PR (hotfix + tests) - -Trigger reasons: -- High-severity runtime behavior fix requiring minimal blast radius -- Fast review/rollback with behavior-only delta plus regression coverage -- Avoid scope creep into optional hardening/feature-flag work - -### PR-1 (Hotfix + Tests) -Scope: -- `CheckAll()` host-down branch adjustment for HTTP/HTTPS -- Unit/integration/E2E regression tests for URL-truth semantics - -Files: -- `backend/internal/services/uptime_service.go` -- `backend/internal/services/uptime_service_test.go` -- `backend/internal/api/routes/routes_test.go` (or equivalent) -- `tests/monitoring/uptime-monitoring.spec.ts` (or equivalent) - -Validation gates: -- backend unit tests pass -- targeted uptime integration tests pass -- targeted uptime E2E tests pass -- no behavior regression in existing `CheckAll` tests - -Rollback: -- single revert of PR-1 commit - -## 11. Acceptance Criteria (DoD) - -1. Scheduled and manual checks produce consistent status for HTTP/HTTPS monitors. -2. A reachable monitor URL is not forced to `DOWN` solely by host precheck failure. -3. New regression tests fail before fix and pass after fix. -4. No break in TCP monitor behavior expectations. -5. No new critical/high security findings in touched paths. -6. Blocking parity case passes: internal host precheck unreachable + public URL 200 => scheduled result is `UP`. -7. Blocking parity case passes: internal host precheck reachable + public URL failure => scheduled result is `DOWN`. -8. Under host-down precheck, HTTP monitors produce check-derived heartbeat messages (not synthetic `Host unreachable` from host path). -9. No duplicate notifications are produced by host+monitor paths within a - single scheduler run, where duplicate is defined as repeated - `(monitor_id, status, scheduler_tick_id)`. - -## 12. Implementation Risks - -1. Increased scheduler workload during host-precheck failures because HTTP/HTTPS checks continue to run. -2. Notification cadence may change due to check-derived monitor outcomes replacing host-forced synthetic downs. -3. Mixed monitor groups (TCP + HTTP/HTTPS) require strict ordering/partitioning to avoid regression. - -Mitigations: -- Keep change localized to `CheckAll()` host-down branch decisioning. -- Add explicit regression tests for both parity directions and mixed monitor types. -- Keep rollback path as single-commit revert. +Relevant keys: +```json +"navigation.adminAccount": "Admin Account", +"navigation.accountManagement": "Account Management", +"users.roleUser": "User", +"users.roleAdmin": "Admin" +``` + +No keys exist for `"roleViewer"` or `"rolePassthrough"`. + +### 2.3 Current Architecture: E2E Tests + +**No Playwright E2E tests exist** for user management or account pages. The `tests/` directory contains specs for DNS providers, modal dropdowns, and debug utilities only. + +### 2.4 Configuration Files Review + +| File | Relevance | Action Needed | +|------|-----------|---------------| +| `.gitignore` | May need entries for new test artifacts | Review during implementation | +| `codecov.yml` | Coverage thresholds may need adjustment | Review if new files change coverage ratios | +| `.dockerignore` | No impact expected | No changes | +| `Dockerfile` | No impact expected | No changes | + +--- + +## 3. Technical Specifications + +### 3.1 Privilege Tier System + +#### 3.1.1 Tier Definitions + +| Tier | Value | Charon UI Access | Forward Auth (Proxy) Access | Can Manage Others | +|------|-------|------------------|---------------------------|-------------------| +| **Admin** | `"admin"` | Full access to all pages and settings | All hosts (bypasses permission checks) | Yes — full CRUD on all users | +| **User** | `"user"` | Access to Charon UI except admin-only pages (user management, system settings) | Based on `permission_mode` + `permitted_hosts` | No | +| **Pass-through** | `"passthrough"` | Login page only — redirected away from all management pages after login | Based on `permission_mode` + `permitted_hosts` | No | + +#### 3.1.2 Behavioral Details + +**Admin:** +- Unchanged from current behavior. +- Can edit any user's role, permissions, name, email, enabled state. +- Can edit their own profile inline on the consolidated Users page. +- Cannot delete themselves. Cannot disable themselves. + +**User:** +- Can log in to the Charon management UI. +- Can view proxy hosts, certificates, DNS, uptime, and other read-oriented pages. +- Cannot access: User management, System settings, SMTP settings, Encryption, Plugins. +- Can edit their own profile (name, email, password, API key) via self-service section on the Users page or a dedicated "My Account" modal. +- Forward auth access is governed by `permission_mode` + `permitted_hosts`. + +**Pass-through:** +- Can log in to Charon (to obtain a session cookie/JWT). +- Immediately after login, is redirected to `/passthrough` landing page — **no access to the management UI**. +- The session cookie is used by Caddy's `forward_auth` to grant access to proxied services based on `permission_mode` + `permitted_hosts`. +- Can access **only**: `/auth/logout`, `/auth/refresh`, `/auth/me`, `/auth/change-password`. +- **Cannot access**: `/user/profile`, `/user/api-key`, or any management API endpoints. +- Cannot access any Charon management UI pages. + +#### 3.1.3 Backend Role Constants + +Add typed constants to `backend/internal/models/user.go`: + +```go +type UserRole string + +const ( + RoleAdmin UserRole = "admin" + RoleUser UserRole = "user" + RolePassthrough UserRole = "passthrough" +) + +func (r UserRole) IsValid() bool { + switch r { + case RoleAdmin, RoleUser, RolePassthrough: + return true + } + return false +} +``` + +Change `User.Role` from `string` to `UserRole` (which is still a `string` alias, so GORM and JSON work identically — zero migration friction). + +### 3.2 Database Schema Changes + +#### 3.2.1 User Table + +**No schema migration needed.** The `Role` column is already a `TEXT` field storing string values. Changing the Go type from `string` to `UserRole` (a `string` alias) requires no ALTER TABLE. New role values (`"passthrough"`) are immediately storable. + +#### 3.2.2 Data Migration + +A one-time migration function should: +1. Scan for any users with `role = "viewer"` and update them to `role = "passthrough"` (in case any were manually created via the unused viewer path). +2. Validate all existing roles are in the allowed set. + +This runs in the `AutoMigrate` block in `routes.go`. + +The migration must be in a clearly named function (e.g., `migrateViewerToPassthrough()`) and must log when it runs and how many records it affected: + +```go +func migrateViewerToPassthrough(db *gorm.DB) { + result := db.Model(&User{}).Where("role = ?", "viewer").Update("role", string(RolePassthrough)) + if result.RowsAffected > 0 { + log.Printf("[migration] Updated %d users from 'viewer' to 'passthrough' role", result.RowsAffected) + } +} +``` + +### 3.3 API Changes + +#### 3.3.1 Consolidated Self-Service Endpoints + +Merge the current `/user/profile` and `/user/api-key` endpoints into the existing `/users/:id` group by allowing users to edit their own record: + +| Current Endpoint | Action | Proposed | +|-----------------|--------|----------| +| `GET /user/profile` | Get own profile | **Keep** (convenience alias) | +| `POST /user/profile` | Update own profile | **Keep** (convenience alias) | +| `POST /user/api-key` | Regenerate API key | **Keep** (convenience alias) | +| `PUT /users/:id` | Admin updates any user | **Extend**: allow authenticated user to update their own record (name, email with password verification) | + +The self-service endpoints (`/user/*`) remain as convenient aliases that internally resolve to the caller's own user ID and delegate to the same service logic. No breaking API changes. + +#### 3.3.2 Role Validation + +The `UpdateUser()` and `CreateUser()` handlers must validate the `role` field against `UserRole.IsValid()`. The invite endpoint `InviteUser()` must accept `"passthrough"` as a valid role. + +#### 3.3.3 Pass-through Route Protection + +Add middleware to block pass-through users from accessing management endpoints: + +```go +func RequireManagementAccess() gin.HandlerFunc { + return func(c *gin.Context) { + role := c.GetString("role") + if role == string(models.RolePassthrough) { + c.AbortWithStatusJSON(http.StatusForbidden, gin.H{ + "error": "pass-through users cannot access management features", + }) + return + } + c.Next() + } +} +``` + +Apply this middleware by restructuring the protected routes into two sub-groups: + +**Exempt routes** (authMiddleware only — no management check): +``` +POST /auth/logout +POST /auth/refresh +GET /auth/me +POST /auth/change-password +GET /auth/accessible-hosts +GET /auth/check-host/:hostId +GET /user/profile +POST /user/profile +POST /user/api-key +``` + +**Management routes** (authMiddleware + `RequireManagementAccess`): +``` +GET /users +POST /users +POST /users/invite +POST /users/preview-invite-url +GET /users/:id +PUT /users/:id +DELETE /users/:id +PUT /users/:id/permissions +POST /users/:id/resend-invite +All /backup/* routes +All /logs/* routes +All /settings/* routes +All /system/* routes +All /security/* routes +All /features/* routes +All /proxy/* routes +All /certificates/* routes +All /dns/* routes +All /uptime/* routes +All /plugins/* routes +``` + +The route split in `routes.go` should look like: + +```go +// Self-service + auth routes — accessible to all authenticated users (including passthrough) +exempt := protected.Group("") +{ + exempt.POST("/auth/logout", ...) + exempt.POST("/auth/refresh", ...) + exempt.GET("/auth/me", ...) + exempt.POST("/auth/change-password", ...) + exempt.GET("/auth/accessible-hosts", ...) + exempt.GET("/auth/check-host/:hostId", ...) + exempt.GET("/user/profile", ...) + exempt.POST("/user/profile", ...) + exempt.POST("/user/api-key", ...) +} + +// Management routes — blocked for passthrough users +management := protected.Group("", middleware.RequireManagementAccess()) +{ + // All other routes registered here +} +``` + +#### 3.3.4 New Endpoint: Pass-through Landing + +No new backend endpoint needed. The forward auth flow (`GET /auth/verify`) already works for pass-through users. The minimal landing page is purely a frontend concern. + +### 3.4 Frontend Changes + +#### 3.4.1 Consolidated Users Page + +Transform `UsersPage.tsx` into the single source of truth for all user management: + +**New capabilities:** +1. **Admin's own row is fully interactive** — edit name/email/password, regenerate API key, view cert email settings. +2. **"My Profile" section** at the top (or accessible via a button) for the currently logged-in user to manage their own account, replacing the `Account.tsx` page entirely. +3. **Role selector** includes three options: `Admin`, `User`, `Pass-through`. +4. **Permission controls** are shown for `User` and `Pass-through` roles (hidden for `Admin`). +5. **Inline editing** or a detail drawer for editing individual users. + +**Components to modify:** + +| Component | File | Change | +|-----------|------|--------| +| `InviteModal` | `UsersPage.tsx` | Add `"passthrough"` to role ` setName(e.target.value)} - required - /> - -
- - setEmail(e.target.value)} - required - error={emailValid === false ? t('errors.invalidEmail') : undefined} - /> -
- - - - - - - - {/* Certificate Email Settings */} - - -
- - {t('account.certificateEmail')} -
- - {t('account.certificateEmailDescription')} - -
-
- -
- { - setUseUserEmail(checked === true) - if (checked && profile) { - setCertEmail(profile.email) - } - }} - /> - -
- - {!useUserEmail && ( -
- - setCertEmail(e.target.value)} - required={!useUserEmail} - error={certEmailValid === false ? t('errors.invalidEmail') : undefined} - errorTestId="cert-email-error" - aria-invalid={certEmailValid === false} - /> -
- )} -
- - - -
-
- - {/* Password Change */} - - -
- - {t('account.changePassword')} -
- {t('account.changePasswordDescription')} -
-
- -
- - setOldPassword(e.target.value)} - required - autoComplete="current-password" - /> -
-
- - setNewPassword(e.target.value)} - required - autoComplete="new-password" - /> - -
-
- - setConfirmPassword(e.target.value)} - required - error={confirmPassword && newPassword !== confirmPassword ? t('account.passwordsDoNotMatch') : undefined} - autoComplete="new-password" - /> -
-
- - - -
-
- - {/* API Key */} - - -
- - {t('account.apiKey')} -
- - {t('account.apiKeyDescription')} - -
- -
- - -
-
-
- - - {t('account.securityNoticeMessage')} - - - {/* Password Prompt Modal */} - {showPasswordPrompt && ( -
- - -
- - {t('account.confirmPassword')} -
- - {t('account.confirmPasswordDescription')} - -
-
- -
- - setConfirmPasswordForUpdate(e.target.value)} - required - autoFocus - /> -
-
- - - - -
-
-
- )} - - {/* Email Update Confirmation Modal */} - {showEmailConfirmModal && ( -
- - -
- - {t('account.updateCertEmailTitle')} -
- - {t('account.updateCertEmailDescription', { email })} - -
- - - - - -
-
- )} - - ) -} diff --git a/frontend/src/pages/PassthroughLanding.tsx b/frontend/src/pages/PassthroughLanding.tsx new file mode 100644 index 000000000..fab15f6c6 --- /dev/null +++ b/frontend/src/pages/PassthroughLanding.tsx @@ -0,0 +1,63 @@ +import { useEffect, useRef } from 'react' +import { useTranslation } from 'react-i18next' +import { useAuth } from '../hooks/useAuth' +import { Button } from '../components/ui/Button' +import { Card } from '../components/ui/Card' +import { Shield, LogOut } from 'lucide-react' + +export default function PassthroughLanding() { + const { t } = useTranslation() + const { user, logout } = useAuth() + const headingRef = useRef(null) + + useEffect(() => { + headingRef.current?.focus() + }, []) + + return ( +
+
+ +
+
+
+
+ +
+

+ {t('passthrough.title')} +

+ {user?.name && ( +

+ {user.name} +

+ )} +
+ +

+ {t('passthrough.description')} +

+ +

+ {t('passthrough.noAccessToManagement')} +

+ + +
+
+
+ ) +} diff --git a/frontend/src/pages/Settings.tsx b/frontend/src/pages/Settings.tsx index 973438aba..2b591b4e1 100644 --- a/frontend/src/pages/Settings.tsx +++ b/frontend/src/pages/Settings.tsx @@ -2,11 +2,13 @@ import { Link, Outlet, useLocation } from 'react-router-dom' import { useTranslation } from 'react-i18next' import { PageShell } from '../components/layout/PageShell' import { cn } from '../utils/cn' -import { Settings as SettingsIcon, Server, Mail, User, Bell } from 'lucide-react' +import { Settings as SettingsIcon, Server, Mail, Bell, Users } from 'lucide-react' +import { useAuth } from '../hooks/useAuth' export default function Settings() { const { t } = useTranslation() const location = useLocation() + const { user } = useAuth() const isActive = (path: string) => location.pathname === path @@ -14,7 +16,7 @@ export default function Settings() { { path: '/settings/system', label: t('settings.system'), icon: Server }, { path: '/settings/notifications', label: t('navigation.notifications'), icon: Bell }, { path: '/settings/smtp', label: t('settings.smtp'), icon: Mail }, - { path: '/settings/account', label: t('settings.account'), icon: User }, + ...(user?.role === 'admin' ? [{ path: '/settings/users', label: t('navigation.users'), icon: Users }] : []), ] return ( diff --git a/frontend/src/pages/UsersPage.tsx b/frontend/src/pages/UsersPage.tsx index adfdc4fc8..36d0f966f 100644 --- a/frontend/src/pages/UsersPage.tsx +++ b/frontend/src/pages/UsersPage.tsx @@ -1,4 +1,5 @@ -import { useState, useEffect, useCallback } from 'react' +import { useState, useEffect, useCallback, useRef } from 'react' +import { useFocusTrap } from '../hooks/useFocusTrap' import { useTranslation } from 'react-i18next' import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query' import { Link } from 'react-router-dom' @@ -17,10 +18,14 @@ import { updateUser, updateUserPermissions, resendInvite, + getProfile, + updateProfile, + regenerateApiKey, } from '../api/users' import type { User, InviteUserRequest, PermissionMode, UpdateUserPermissionsRequest } from '../api/users' import { getProxyHosts } from '../api/proxyHosts' import type { ProxyHost } from '../api/proxyHosts' +import { useAuth } from '../hooks/useAuth' import { Users, UserPlus, @@ -36,6 +41,10 @@ import { Loader2, ExternalLink, AlertTriangle, + Pencil, + Key, + Lock, + UserCircle, } from 'lucide-react' interface InviteModalProps { @@ -47,9 +56,10 @@ interface InviteModalProps { function InviteModal({ isOpen, onClose, proxyHosts }: InviteModalProps) { const { t } = useTranslation() const queryClient = useQueryClient() + const dialogRef = useRef(null) const [email, setEmail] = useState('') const [emailError, setEmailError] = useState(null) - const [role, setRole] = useState<'user' | 'admin'>('user') + const [role, setRole] = useState<'user' | 'admin' | 'passthrough'>('user') const [permissionMode, setPermissionMode] = useState('allow_all') const [selectedHosts, setSelectedHosts] = useState([]) const [inviteResult, setInviteResult] = useState<{ @@ -84,19 +94,7 @@ function InviteModal({ isOpen, onClose, proxyHosts }: InviteModalProps) { return true } - // Keyboard navigation - close on Escape - useEffect(() => { - const handleKeyDown = (e: KeyboardEvent) => { - if (e.key === 'Escape') { - onClose() - } - } - - if (isOpen) { - document.addEventListener('keydown', handleKeyDown) - return () => document.removeEventListener('keydown', handleKeyDown) - } - }, [isOpen, onClose]) + useFocusTrap(dialogRef, isOpen, onClose) // Fetch preview when email changes useEffect(() => { @@ -170,7 +168,7 @@ function InviteModal({ isOpen, onClose, proxyHosts }: InviteModalProps) { const handleClose = () => { setEmail('') setEmailError(null) - setRole('user') + setRole('user' as 'user' | 'admin' | 'passthrough') setPermissionMode('allow_all') setSelectedHosts([]) setInviteResult(null) @@ -196,6 +194,7 @@ function InviteModal({ isOpen, onClose, proxyHosts }: InviteModalProps) { {/* Layer 3: Form content (pointer-events-auto) */} - {role === 'user' && ( + {(role === 'user' || role === 'passthrough') && ( <>