diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 9fb835a..505c3dd 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1,2 +1,2 @@
# All files require review from at least one maintainer before merge.
-* @ori129 @aarbiv
+* @ori129
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 73a570a..4444894 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -8,6 +8,20 @@ jobs:
backend:
name: Backend — lint & test
runs-on: ubuntu-latest
+ services:
+ postgres:
+ image: pgvector/pgvector:pg16
+ env:
+ POSTGRES_USER: agentsorg
+ POSTGRES_PASSWORD: changeme
+ POSTGRES_DB: agentsorg_test
+ options: >-
+ --health-cmd pg_isready
+ --health-interval 10s
+ --health-timeout 5s
+ --health-retries 5
+ ports:
+ - 5432:5432
steps:
- uses: actions/checkout@v6
@@ -18,7 +32,7 @@ jobs:
cache-dependency-path: backend/requirements.txt
- name: Install dependencies
- run: pip install ruff pytest && pip install -r backend/requirements.txt
+ run: pip install ruff pytest pytest-asyncio && pip install -r backend/requirements.txt
- name: Lint
run: ruff check backend/
@@ -27,8 +41,9 @@ jobs:
run: ruff format --check backend/
- name: Tests
- # exit code 5 = no tests collected; treat as pass until test suite is built out
- run: cd backend && python -m pytest --tb=short -q || [ $? -eq 5 ]
+ env:
+ TEST_DATABASE_URL: postgresql+asyncpg://agentsorg:changeme@localhost:5432/agentsorg_test
+ run: cd backend && python -m pytest --tb=short -q
frontend:
name: Frontend — type check
diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml
index 784ca4f..ce09a46 100644
--- a/.github/workflows/cla.yml
+++ b/.github/workflows/cla.yml
@@ -30,7 +30,7 @@ jobs:
path-to-signatures: '.github/cla-signatures.json'
path-to-document: 'CLA-individual.md'
branch: 'main'
- allowlist: bot*,ori129,aarbiv
+ allowlist: bot*,ori129
# Message posted on PRs where the contributor has not signed
custom-notsigned-prcomment: |
diff --git a/.gitignore b/.gitignore
index 4f08b0c..e18fb32 100644
--- a/.gitignore
+++ b/.gitignore
@@ -43,7 +43,12 @@ backups/
.DS_Store
Thumbs.db
+# Local dev notes
+TODOS.md
+frontend/tsconfig.tsbuildinfo
+
# Site repo (separate repo)
site/
package-lock.json
package.json
+.claude/skills/gstack/
diff --git a/CLA-corporate.md b/CLA-corporate.md
index e556f84..a597f7a 100644
--- a/CLA-corporate.md
+++ b/CLA-corporate.md
@@ -1,10 +1,10 @@
# Corporate Contributor License Agreement
Thank you for your organization's interest in contributing to AgentsOrg.ai
-(the "Project"), maintained by Ori Simantov (ori129) and Alon Arbiv (aarbiv) (collectively, the "Project Owners").
+(the "Project"), maintained by Ori Simantov (ori129) (the "Project Owner").
This Corporate Contributor License Agreement ("Agreement") documents the rights
-your organization grants to the Project Owners when your employees or authorized
+your organization grants to the Project Owner when your employees or authorized
representatives submit Contributions.
---
@@ -16,7 +16,7 @@ behalf Contributions are submitted.
**"Contribution"** means any original work of authorship, including modifications
or additions to existing works, that the Corporation or its authorized employees
-intentionally submit to the Project Owners for inclusion in the Project.
+intentionally submit to the Project Owner for inclusion in the Project.
**"Authorized Employee"** means an employee or contractor of the Corporation who
has been authorized by the Corporation to submit Contributions on its behalf.
@@ -26,13 +26,13 @@ has been authorized by the Corporation to submit Contributions on its behalf.
## 2. Copyright License
Subject to the terms of this Agreement, the Corporation hereby grants to the Project
-Owner and to recipients of software distributed by the Project Owners a perpetual,
+Owner and to recipients of software distributed by the Project Owner a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to
reproduce, prepare derivative works of, publicly display, publicly perform,
sublicense, and distribute Contributions and such derivative works.
-The Corporation also grants the Project Owners the right to relicense Contributions
-under any license the Project Owners chooses, including proprietary and commercial
+The Corporation also grants the Project Owner the right to relicense Contributions
+under any license the Project Owner chooses, including proprietary and commercial
licenses.
---
@@ -40,7 +40,7 @@ licenses.
## 3. Patent License
Subject to the terms of this Agreement, the Corporation hereby grants to the Project
-Owner and to recipients of software distributed by the Project Owners a perpetual,
+Owner and to recipients of software distributed by the Project Owner a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable patent license to
make, have made, use, offer to sell, sell, import, and otherwise transfer the
Project, where such license applies only to those patent claims licensable by the
@@ -70,7 +70,7 @@ c. The Corporation has authority to bind its employees and contractors to this
Agreement.
d. The Corporation will maintain a list of Authorized Employees who may submit
- Contributions under this Agreement and will promptly notify the Project Owners
+ Contributions under this Agreement and will promptly notify the Project Owner
if an individual's authorization is revoked.
---
@@ -89,7 +89,7 @@ Agreement:
## 7. No Obligation to Include
-This Agreement does not obligate the Project Owners to include any Contribution in
+This Agreement does not obligate the Project Owner to include any Contribution in
the Project.
---
diff --git a/CLA-individual.md b/CLA-individual.md
index 2fcef24..19a30ba 100644
--- a/CLA-individual.md
+++ b/CLA-individual.md
@@ -1,10 +1,10 @@
# Individual Contributor License Agreement
Thank you for your interest in contributing to AgentsOrg.ai (the "Project"),
-maintained by Ori Simantov (ori129) and Alon Arbiv (aarbiv) (collectively, the "Project Owners").
+maintained by Ori Simantov (ori129) (the "Project Owner").
This Individual Contributor License Agreement ("Agreement") documents the rights
-you grant to the Project Owners when you submit a Contribution. Please read it
+you grant to the Project Owner when you submit a Contribution. Please read it
carefully before signing.
---
@@ -14,10 +14,10 @@ carefully before signing.
**"You"** means the individual who submits a Contribution to the Project.
**"Contribution"** means any original work of authorship, including modifications
-or additions to existing works, that you intentionally submit to the Project Owners
+or additions to existing works, that you intentionally submit to the Project Owner
for inclusion in the Project.
-**"Submit"** means any form of communication sent to the Project Owners or its
+**"Submit"** means any form of communication sent to the Project Owner or its
representatives, including but not limited to pull requests, patches, issues, or
email.
@@ -25,21 +25,21 @@ email.
## 2. Copyright License
-Subject to the terms of this Agreement, you hereby grant to the Project Owners and
-to recipients of software distributed by the Project Owners a perpetual, worldwide,
+Subject to the terms of this Agreement, you hereby grant to the Project Owner and
+to recipients of software distributed by the Project Owner a perpetual, worldwide,
non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce,
prepare derivative works of, publicly display, publicly perform, sublicense, and
distribute your Contributions and such derivative works.
-You also grant the Project Owners the right to relicense your Contributions under
-any license the Project Owners chooses, including proprietary and commercial licenses.
+You also grant the Project Owner the right to relicense your Contributions under
+any license the Project Owner chooses, including proprietary and commercial licenses.
---
## 3. Patent License
-Subject to the terms of this Agreement, you hereby grant to the Project Owners and
-to recipients of software distributed by the Project Owners a perpetual, worldwide,
+Subject to the terms of this Agreement, you hereby grant to the Project Owner and
+to recipients of software distributed by the Project Owner a perpetual, worldwide,
non-exclusive, no-charge, royalty-free, irrevocable patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Project, where
such license applies only to those patent claims licensable by you that are
@@ -77,7 +77,7 @@ d. If your employer has rights to intellectual property that you create, you hav
## 6. No Obligation to Include
-This Agreement does not obligate the Project Owners to include your Contribution in
+This Agreement does not obligate the Project Owner to include your Contribution in
the Project.
---
diff --git a/README.md b/README.md
index 9097e66..1570368 100644
--- a/README.md
+++ b/README.md
@@ -1,259 +1,228 @@
+ Open-source AI governance platform for ChatGPT Enterprise
+ Discover every GPT in your workspace. Score them. Flag risks. Develop your people.
+
-Enterprise AI governance platform for organizations running Custom GPTs on OpenAI. Connects to the OpenAI Compliance API to discover, classify, and semantically enrich every GPT in your workspace — then surfaces actionable intelligence through a leader dashboard, an employee portal, and a learning & development module.
+
+
+
+
+
+
+
+
-## Views
+---
-### Leader Dashboard
-The primary governance interface. Sidebar navigation across:
+## What is AgentsOrg.ai?
-- **Overview** — KPI strip, creation velocity, portfolio health at a glance. Five drill-down sub-pages (Builders, Processes, Departments, Maturity, Output Types) with full search, sort, and GPT-level slide-out detail.
-- **Enrichment** — Pipeline status, data source roadmap (GPTs → Conversations → Users → Audit Logs) with per-source unlock breakdown.
-- **Risk Panel** — GPTs flagged as high or critical risk, with per-flag breakdown.
-- **Duplicates** — pgvector-powered semantic clustering to detect redundant builds.
-- **Quality Scores** — Prompting quality distribution across the portfolio.
-- **Recognition** — Composite builder scores (quality 35 % · adoption 25 % · hygiene 25 % · volume 15 %).
-- **Learning** — LLM-driven course recommendations per builder, based on actual KPI gaps. Draws from a built-in OpenAI Academy catalog; custom courses can be added via URL.
-- **Workshops** — CRUD for in-person/virtual sessions with participant lists, GPT tagging, and time-based quality impact correlation.
+AgentsOrg.ai connects to the **OpenAI Compliance API** and gives your organization a complete picture of every Custom GPT it has built — scored, risk-flagged, and mapped to business processes.
-### Employee Portal
-Read-only GPT discovery for non-admin users. Search and browse GPTs available to them without accessing governance data.
+OpenAI's built-in analytics tells you **how much** people use ChatGPT. AgentsOrg.ai tells you **how good** your GPTs are — and what to do about it.
-### Setup Wizard
-4-step guided setup: API credentials → filter rules → categories → run pipeline.
+> **Self-hosted. Your data never leaves your infrastructure.**
---
-## Architecture
+## Features
-```
-┌─────────────────┐ ┌─────────────────┐ ┌──────────────┐
-│ Frontend │────▶│ Backend │────▶│ PostgreSQL │
-│ React + Vite │ │ FastAPI │ │ + pgvector │
-│ Port 3000 │ │ Port 8000 │ │ Port 5433 │
-└─────────────────┘ └─────────────────┘ └──────────────┘
- │
- ┌───────────┼─────────────┐
- ▼ ▼ ▼
- OpenAI OpenAI OpenAI
- Compliance GPT API Embeddings
- API (classify + (vectors +
- enrich) clustering)
-```
+### 🔍 GPT Registry
+Automatically discovers all Custom GPTs across your ChatGPT Enterprise workspace via the OpenAI Compliance API. Full-text search, filters, and a slide-out detail panel for every GPT.
+
+### 🧠 Semantic Enrichment (9 KPIs per GPT)
+An LLM reads each GPT's system prompt and extracts:
+
+| Signal | What it captures |
+|--------|-----------------|
+| `risk_level` + `risk_flags` | Data exposure, compliance concerns |
+| `sophistication_score` | Depth of prompt and tooling (1–5) |
+| `prompting_quality_score` | Prompt engineering quality (1–5) |
+| `business_process` | Which workflow this GPT automates |
+| `roi_potential_score` | Estimated business value (1–5) |
+| `intended_audience` | Who the GPT is built for |
+| `integration_flags` | External systems connected |
+| `output_type` | Document, Analysis, Code, Conversation, etc. |
+| `adoption_friction_score` | How easy it is for others to adopt (1–5) |
+
+### 📊 Leader Dashboard
+- **Overview** — portfolio KPIs, creation velocity, department breakdown, maturity tiers. Five drill-down pages (Builders, Processes, Departments, Maturity, Output Types).
+- **Risk Panel** — GPTs flagged high or critical, with per-flag breakdown.
+- **Duplicates** — pgvector semantic clustering to detect redundant builds before they proliferate.
+- **Quality Scores** — Prompting quality distribution across the portfolio.
-### Tech Stack
+### 🎓 Learning & Development
+- **Recognition** — Composite builder scores: quality 35% · adoption 25% · hygiene 25% · volume 15%.
+- **Learning** — LLM-driven course recommendations per builder, grounded in actual KPI gaps. Built-in OpenAI Academy catalog; custom courses via URL.
+- **Workshops** — CRUD for sessions with participant lists, GPT tagging, and time-based quality impact correlation.
-- **Frontend**: React 18, TypeScript, Tailwind CSS, TanStack Query, Vite
-- **Backend**: FastAPI, SQLAlchemy 2.0 (async), Alembic, Pydantic
-- **Database**: PostgreSQL 16 with pgvector extension
-- **Deployment**: Docker Compose (3 services)
+### 👤 Employee Portal
+Read-only GPT discovery for non-admin users — search and browse what's available without accessing governance data.
+
+### 🎯 Demo Mode
+Run the full pipeline with realistic mock data — no API keys needed. 500 GPTs across 10 departments, fully enriched with scores and rationale. One click from the onboarding screen.
---
-## Quick Start
+## Screenshots
+
+
+
+
+
+
+
+
Onboarding — Try Demo or connect your real workspace
+
AI Portfolio Overview — 500 GPTs, KPIs, department breakdown
+
+
+
+
+
+
+
Risk Panel — high/critical GPTs flagged by issue type
+
Quality Scores — sophistication, prompting quality, ROI per GPT
+
+
+
+
+
+
+
Builder Recognition — composite scores across your team
+
Employee Portal — read-only GPT discovery for the whole org
+
+
-### Prerequisites
+---
-- Docker Desktop
+## Quick Start
-### Setup
+**Prerequisites:** [Docker Desktop](https://www.docker.com/products/docker-desktop/)
```bash
-# 1. Clone and enter the repo
+# 1. Clone the repo
git clone https://github.com/ori129/agentsorg.git
cd agentsorg
# 2. Create your .env file
cp .env.example .env
-# 3. Generate a Fernet key and paste it into FERNET_KEY= in .env
+# 3. Generate a Fernet encryption key and paste it into FERNET_KEY= in .env
make fernet-key
# 4. Start all services (migrations run automatically on first boot)
make up
-# 5. Open the app
+# 5. Open the app (macOS)
open http://localhost:3000
+# or just visit http://localhost:3000 in your browser
```
-Run `make help` to see all available commands.
+Register with any email → choose **Try Demo** on the onboarding screen to explore with 500 realistic GPTs instantly, or **Connect to Production** to enter your OpenAI credentials and scan your real workspace.
-> **No API keys needed** — enable Demo mode in the header to run the full pipeline with realistic mock data.
+Run `make help` to see all available commands.
---
-## Pipeline
+## How It Works
-The pipeline runs in stages and reports live progress to the frontend:
+```
+┌─────────────────┐ ┌─────────────────┐ ┌──────────────┐
+│ Frontend │────▶│ Backend │────▶│ PostgreSQL │
+│ React + Vite │ │ FastAPI │ │ + pgvector │
+│ Port 3000 │ │ Port 8000 │ │ Port 5433 │
+└─────────────────┘ └─────────────────┘ └──────────────┘
+ │
+ ┌───────────┼─────────────┐
+ ▼ ▼ ▼
+ OpenAI OpenAI OpenAI
+ Compliance Chat API Embeddings
+ API (classify + (vectors +
+ enrich) clustering)
+```
+
+### Pipeline Stages
```
-Fetch (5–30%) → Filter (35%) → Change Detection → Classify (40–65%) → Enrich (65–72%) → Embed (75–85%) → Store (90%) → Done (100%)
+Fetch (5–30%) → Filter (35%) → Classify (40–65%) → Enrich (65–72%) → Embed (75–85%) → Store (90%) → Done (100%)
```
-**Incremental processing**: The pipeline computes a content hash (SHA-256) of each GPT's classifiable fields (name, description, instructions, tools, categories). On subsequent runs, unchanged GPTs skip classification, enrichment, and embedding — their cached results are carried forward. This avoids unnecessary OpenAI API costs.
+**Incremental processing** — GPTs are content-hashed (SHA-256) on each run. Unchanged GPTs skip classification, enrichment, and embedding, carrying forward their cached results. This avoids redundant OpenAI API costs on subsequent syncs.
-| Stage | Real Mode | Demo Mode |
+| Stage | Production | Demo |
|----------|-------------------------------|------------------------------|
| Fetch | OpenAI Compliance API | Template-based generator |
-| Classify | OpenAI GPT model | Keyword matching |
+| Classify | OpenAI Chat model | Keyword matching |
| Enrich | 9× LLM calls per GPT | Deterministic mock enricher |
| Embed | OpenAI Embeddings API | Deterministic vectors |
-Everything else (filtering, DB storage, categories, clustering, L&D) runs the same code path in both modes.
-
-### Semantic Enrichment — 9 KPIs per GPT
+### Maturity Tiers
-Each enriched GPT receives scores and rationale for:
-
-| KPI | What it captures |
-|-----|-----------------|
-| `business_process` | Which workflow this GPT automates |
-| `risk_level` + `risk_flags` | Data exposure, compliance concerns |
-| `sophistication_score` | Depth of system prompt and tooling (1–5) |
-| `prompting_quality_score` | Prompt engineering quality (1–5) |
-| `roi_potential_score` | Estimated business impact (1–5) |
-| `intended_audience` | Who the GPT is built for |
-| `integration_flags` | External systems connected |
-| `output_type` | Document, Analysis, Code, Conversation, etc. |
-| `adoption_friction_score` | How easy it is for others to use (1–5) |
-
-### Portfolio Maturity Tiers
-
-| Tier | Sophistication score | Description |
-|------|----------------------|-------------|
+| Tier | Sophistication | Description |
+|------|---------------|-------------|
| Production | ≥ 4 | Full system prompts, integrations, tested |
| Functional | 3 | Useful, room to grow |
| Experimental | ≤ 2 | Early-stage or abandoned |
-Demo mode distributes ~60 % Experimental / ~25 % Functional / ~15 % Production.
-
---
-## Demo Mode
+## Tech Stack
-- Toggle **Demo** in the header (turns amber when active)
-- Choose a preset size: Small (50), Medium (500), Large (2K), Enterprise (5K)
-- Generates realistic GPTs across 10 SaaS departments (Marketing, Sales, CS, Finance, HR, Engineering, Product, Legal, Data, IT/Security)
-- Full semantic enrichment with mock scores and rationale
-- No API keys required
+| Layer | Technology |
+|-------|-----------|
+| Frontend | React 18, TypeScript, Tailwind CSS, TanStack Query, Vite |
+| Backend | FastAPI, SQLAlchemy 2.0 (async), Alembic, Pydantic v2 |
+| Database | PostgreSQL 16 + pgvector |
+| Auth | Session-based, role-aware (`system-admin`, `ai-leader`, `employee`) |
+| Deployment | Docker Compose (3 services, zero external dependencies) |
---
-## Project Structure
+## Environment Variables
-```
-├── backend/
-│ ├── alembic/
-│ │ └── versions/ # 001–009 migrations (auto-applied on startup)
-│ ├── app/
-│ │ ├── config.py # Environment settings
-│ │ ├── database.py # Async SQLAlchemy engine
-│ │ ├── encryption.py # Fernet encrypt/decrypt for API keys
-│ │ ├── main.py # FastAPI app entry point
-│ │ ├── models/models.py # ORM models
-│ │ ├── schemas/schemas.py # Pydantic request/response models
-│ │ ├── routers/
-│ │ │ ├── admin.py # POST /admin/reset
-│ │ │ ├── categories.py # CRUD /categories
-│ │ │ ├── clustering.py # Duplicate detection via pgvector
-│ │ │ ├── configuration.py # GET/PUT /config
-│ │ │ ├── demo.py # GET/PUT /demo
-│ │ │ ├── learning.py # Recognition, recommendations, workshops
-│ │ │ ├── pipeline.py # Run, status, GPTs, history
-│ │ ├── services/
-│ │ │ ├── classifier.py # OpenAI LLM classifier
-│ │ │ ├── compliance_api.py # OpenAI Compliance API client
-│ │ │ ├── demo_state.py # In-memory demo toggle
-│ │ │ ├── embedder.py # OpenAI embeddings
-│ │ │ ├── filter_engine.py # Visibility / email / shared-user filters
-│ │ │ ├── mock_classifier.py
-│ │ │ ├── mock_data.py # ~90 GPT templates across 10 departments
-│ │ │ ├── mock_embedder.py
-│ │ │ ├── mock_fetcher.py
-│ │ │ ├── mock_semantic_enricher.py # Deterministic KPI scores for demo
-│ │ │ ├── pipeline.py # Orchestrates all stages
-│ │ │ └── semantic_enricher.py # 9 LLM calls per GPT
-│ ├── Dockerfile
-│ └── requirements.txt
-├── frontend/
-│ ├── src/
-│ │ ├── api/
-│ │ │ ├── client.ts # Fetch wrapper
-│ │ │ └── learning.ts # Typed wrappers for L&D endpoints
-│ │ ├── App.tsx # Root — Leader / Employee / Wizard views
-│ │ ├── components/
-│ │ │ ├── employee/Portal.tsx # Read-only GPT discovery
-│ │ │ ├── layout/ # Header, Stepper, NavButtons
-│ │ │ ├── leader/
-│ │ │ │ ├── Duplicates.tsx
-│ │ │ │ ├── Enrichment.tsx
-│ │ │ │ ├── GPTDrawer.tsx # Slide-out GPT detail panel
-│ │ │ │ ├── LeaderLayout.tsx
-│ │ │ │ ├── Learning.tsx
-│ │ │ │ ├── Overview.tsx
-│ │ │ │ ├── QualityScores.tsx
-│ │ │ │ ├── Recognition.tsx
-│ │ │ │ ├── RiskPanel.tsx
-│ │ │ │ ├── Sidebar.tsx
-│ │ │ │ ├── Workshops.tsx
-│ │ │ │ └── sub/ # Drill-down full-page views
-│ │ │ │ ├── BuildersPage.tsx
-│ │ │ │ ├── DepartmentsPage.tsx
-│ │ │ │ ├── MaturityPage.tsx
-│ │ │ │ ├── OutputTypesPage.tsx
-│ │ │ │ └── ProcessesPage.tsx
-│ │ │ ├── steps/ # Wizard Step 1–4
-│ │ │ └── ui/ # ResultsView, shared UI
-│ │ ├── contexts/ThemeContext.tsx
-│ │ ├── hooks/ # React Query hooks
-│ │ └── types/index.ts # TypeScript interfaces
-│ ├── Dockerfile
-│ ├── nginx.conf
-│ └── package.json
-├── scripts/
-│ ├── db_queries.sql # Useful SQL queries
-│ └── reset_registry.py # CLI tool to clear GPT data
-├── docs/
-│ └── erd.mmd # Entity-relationship diagram (Mermaid)
-├── docker-compose.yml
-└── .env.example
-```
+| Variable | Description | Default |
+|----------|-------------|---------|
+| `FERNET_KEY` | Encryption key for API credentials stored in DB | **required** — run `make fernet-key` |
+| `POSTGRES_USER` | Database user | `agentsorg` |
+| `POSTGRES_PASSWORD` | Database password | `changeme` |
+| `POSTGRES_DB` | Database name | `agentsorg` |
+| `DATABASE_URL` | Full async connection string | composed from above |
+| `BACKEND_CORS_ORIGINS` | Allowed CORS origins | `http://localhost:3000` |
---
-## API Endpoints
+## API Reference
+
+
+Configuration
-### Configuration
| Method | Endpoint | Description |
|--------|----------|-------------|
| GET | `/api/v1/config` | Get configuration |
| PUT | `/api/v1/config` | Update configuration |
| POST | `/api/v1/config/test-connection` | Test Compliance API connection |
| POST | `/api/v1/config/test-openai-connection` | Test OpenAI API connection |
+
+
+
+Pipeline
-### Pipeline
| Method | Endpoint | Description |
|--------|----------|-------------|
| POST | `/api/v1/pipeline/run` | Start pipeline |
-| GET | `/api/v1/pipeline/status` | Live progress |
+| GET | `/api/v1/pipeline/status` | Live progress + stage |
| GET | `/api/v1/pipeline/summary` | Results summary |
| GET | `/api/v1/pipeline/gpts` | List all GPTs |
| GET | `/api/v1/pipeline/history` | Sync history |
| GET | `/api/v1/pipeline/logs/{id}` | Logs for a sync run |
+
+
+
+Categories
-### Categories
| Method | Endpoint | Description |
|--------|----------|-------------|
| GET | `/api/v1/categories` | List categories |
@@ -261,15 +230,21 @@ Demo mode distributes ~60 % Experimental / ~25 % Functional / ~15 % Production.
| PUT | `/api/v1/categories/{id}` | Update category |
| DELETE | `/api/v1/categories/{id}` | Delete category |
| POST | `/api/v1/categories/seed` | Seed default categories |
+
+
+
+Clustering
-### Clustering
| Method | Endpoint | Description |
|--------|----------|-------------|
| POST | `/api/v1/clustering/run` | Run duplicate detection |
-| GET | `/api/v1/clustering/status` | Clustering status |
+| GET | `/api/v1/clustering/status` | Clustering job status |
| GET | `/api/v1/clustering/results` | Cluster groups |
+
+
+
+Learning & Development
-### Learning & Development
| Method | Endpoint | Description |
|--------|----------|-------------|
| GET | `/api/v1/learning/recognition` | Builder recognition scores |
@@ -287,33 +262,74 @@ Demo mode distributes ~60 % Experimental / ~25 % Functional / ~15 % Production.
| POST | `/api/v1/learning/workshops/{id}/tag-gpt` | Tag a GPT to workshop |
| DELETE | `/api/v1/learning/workshops/{id}/tag-gpt/{gpt_id}` | Untag a GPT |
| GET | `/api/v1/learning/workshops/{id}/impact` | Time-based quality impact |
+
+
+
+Users & Admin
-### Demo & Admin
| Method | Endpoint | Description |
|--------|----------|-------------|
+| GET | `/api/v1/users` | List workspace users |
+| POST | `/api/v1/users/import` | Import users from Compliance API |
+| PATCH | `/api/v1/users/{id}/role` | Update user system role |
| GET | `/api/v1/demo` | Get demo state |
| PUT | `/api/v1/demo` | Toggle demo mode / set size |
-| POST | `/api/v1/admin/reset` | Clear GPTs and logs |
+| POST | `/api/v1/admin/reset` | Full reset — clears GPTs, logs, categories, workshops |
+
---
-## Environment Variables
+## Project Structure
-| Variable | Description | Default |
-|----------|-------------|---------|
-| `FERNET_KEY` | Encryption key for API keys stored in DB | **required** — run `make fernet-key` |
-| `POSTGRES_USER` | Database user | `gpt_registry` |
-| `POSTGRES_PASSWORD` | Database password | `changeme` |
-| `POSTGRES_DB` | Database name | `gpt_registry` |
-| `DATABASE_URL` | Full async connection string | composed from above |
-| `BACKEND_CORS_ORIGINS` | Allowed CORS origins | `http://localhost:3000` |
+```
+├── backend/
+│ ├── alembic/versions/ # DB migrations (auto-applied on startup)
+│ ├── app/
+│ │ ├── main.py # FastAPI app entry point
+│ │ ├── models/models.py # ORM models
+│ │ ├── schemas/schemas.py # Pydantic request/response models
+│ │ ├── routers/ # One file per domain (pipeline, learning, users…)
+│ │ └── services/
+│ │ ├── pipeline.py # Orchestrates all pipeline stages
+│ │ ├── semantic_enricher.py # 9 LLM calls per GPT
+│ │ ├── compliance_api.py # OpenAI Compliance API client
+│ │ ├── embedder.py # OpenAI embeddings
+│ │ ├── classifier.py # OpenAI LLM classifier
+│ │ ├── filter_engine.py # Visibility / email / shared-user filters
+│ │ └── mock_*/ # Demo mode equivalents (no API calls)
+├── frontend/
+│ └── src/
+│ ├── App.tsx # Root — Leader / Employee views + onboarding
+│ ├── components/
+│ │ ├── auth/ # Register, Login, Onboarding screens
+│ │ ├── leader/ # Dashboard views (Overview, Risk, L&D…)
+│ │ ├── employee/ # Read-only GPT portal
+│ │ ├── steps/ # Pipeline setup wizard (Steps 1–4)
+│ │ └── layout/ # Header, Sidebar, DemoBanner
+│ ├── hooks/ # React Query hooks
+│ └── types/index.ts # TypeScript interfaces
+├── docs/
+│ ├── erd.mmd # Entity-relationship diagram (Mermaid)
+│ └── screenshots/ # README screenshots (demo data)
+├── docker-compose.yml
+├── Makefile
+└── .env.example
+```
---
## Contributing
-See [CONTRIBUTING.md](CONTRIBUTING.md) for development setup and guidelines.
+Contributions are welcome. Please open an issue first to discuss what you'd like to change.
+
+1. Fork the repo and create a branch: `git checkout -b feat/your-feature`
+2. Make your changes and ensure the app builds: `make up`
+3. Open a pull request with a clear description
+
+Run `make help` to see all available dev commands.
+
+---
## License
-This project is licensed under the **Apache License 2.0** — see [LICENSE](LICENSE) for details.
+Licensed under the **Apache License 2.0** — see [LICENSE](LICENSE) for details.
diff --git a/SECURITY.md b/SECURITY.md
index 5fda064..4605c58 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -10,8 +10,7 @@ before it is disclosed publicly.
### How to report
Email both maintainers directly:
-- Ori Simantov — ori129@gmail.com
-- Alon Arbiv — (add email)
+- Ori Simantov — via [GitHub](https://github.com/ori129)
Please do not use GitHub Issues or Discussions for security reports.
diff --git a/backend/alembic/versions/010_add_password.py b/backend/alembic/versions/010_add_password.py
new file mode 100644
index 0000000..b27724b
--- /dev/null
+++ b/backend/alembic/versions/010_add_password.py
@@ -0,0 +1,38 @@
+"""Add password_hash and password_temp to workspace_users
+
+Revision ID: 010
+Revises: 009
+Create Date: 2026-03-15
+
+"""
+
+from typing import Sequence, Union
+
+import sqlalchemy as sa
+from alembic import op
+
+revision: str = "010"
+down_revision: Union[str, None] = "009"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ op.add_column(
+ "workspace_users",
+ sa.Column("password_hash", sa.Text(), nullable=True),
+ )
+ op.add_column(
+ "workspace_users",
+ sa.Column(
+ "password_temp",
+ sa.Boolean(),
+ nullable=False,
+ server_default=sa.text("false"),
+ ),
+ )
+
+
+def downgrade() -> None:
+ op.drop_column("workspace_users", "password_temp")
+ op.drop_column("workspace_users", "password_hash")
diff --git a/backend/alembic/versions/011_add_login_sessions.py b/backend/alembic/versions/011_add_login_sessions.py
new file mode 100644
index 0000000..3d089f2
--- /dev/null
+++ b/backend/alembic/versions/011_add_login_sessions.py
@@ -0,0 +1,48 @@
+"""Add login_sessions table
+
+Revision ID: 011
+Revises: 010
+Create Date: 2026-03-15
+
+"""
+
+from typing import Sequence, Union
+
+import sqlalchemy as sa
+from alembic import op
+
+revision: str = "011"
+down_revision: Union[str, None] = "010"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ op.create_table(
+ "login_sessions",
+ sa.Column("token", sa.String(64), nullable=False),
+ sa.Column("user_id", sa.String(255), nullable=False),
+ sa.Column("expires_at", sa.DateTime(timezone=True), nullable=False),
+ sa.Column(
+ "created_at",
+ sa.DateTime(timezone=True),
+ server_default=sa.text("NOW()"),
+ nullable=False,
+ ),
+ sa.ForeignKeyConstraint(
+ ["user_id"],
+ ["workspace_users.id"],
+ ondelete="CASCADE",
+ ),
+ sa.PrimaryKeyConstraint("token"),
+ )
+ op.create_index(
+ "ix_login_sessions_expires_at",
+ "login_sessions",
+ ["expires_at"],
+ )
+
+
+def downgrade() -> None:
+ op.drop_index("ix_login_sessions_expires_at", table_name="login_sessions")
+ op.drop_table("login_sessions")
diff --git a/backend/app/auth_deps.py b/backend/app/auth_deps.py
new file mode 100644
index 0000000..83c00cf
--- /dev/null
+++ b/backend/app/auth_deps.py
@@ -0,0 +1,48 @@
+"""Shared FastAPI auth dependencies used across routers."""
+
+from datetime import datetime, timezone
+
+from fastapi import HTTPException
+from sqlalchemy import select
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm import selectinload
+
+from app.models.models import LoginSession, WorkspaceUser
+
+
+async def require_system_admin(
+ authorization: str | None, db: AsyncSession
+) -> WorkspaceUser:
+ """Validate Bearer token and assert caller is system-admin."""
+ if not authorization or not authorization.startswith("Bearer "):
+ raise HTTPException(status_code=401, detail="Authentication required")
+ token = authorization[7:]
+ result = await db.execute(
+ select(LoginSession)
+ .options(selectinload(LoginSession.user))
+ .where(LoginSession.token == token)
+ )
+ session = result.scalar_one_or_none()
+ if not session or session.expires_at < datetime.now(timezone.utc):
+ raise HTTPException(status_code=401, detail="Invalid or expired session")
+ if session.user.system_role != "system-admin":
+ raise HTTPException(
+ status_code=403, detail="Only system admins can perform this action"
+ )
+ return session.user
+
+
+async def require_auth(authorization: str | None, db: AsyncSession) -> WorkspaceUser:
+ """Validate Bearer token — any authenticated user."""
+ if not authorization or not authorization.startswith("Bearer "):
+ raise HTTPException(status_code=401, detail="Authentication required")
+ token = authorization[7:]
+ result = await db.execute(
+ select(LoginSession)
+ .options(selectinload(LoginSession.user))
+ .where(LoginSession.token == token)
+ )
+ session = result.scalar_one_or_none()
+ if not session or session.expires_at < datetime.now(timezone.utc):
+ raise HTTPException(status_code=401, detail="Invalid or expired session")
+ return session.user
diff --git a/backend/app/auth_utils.py b/backend/app/auth_utils.py
new file mode 100644
index 0000000..2413215
--- /dev/null
+++ b/backend/app/auth_utils.py
@@ -0,0 +1,9 @@
+import bcrypt as _bcrypt
+
+
+def hash_password(password: str) -> str:
+ return _bcrypt.hashpw(password.encode(), _bcrypt.gensalt()).decode()
+
+
+def verify_password(password: str, hashed: str) -> bool:
+ return _bcrypt.checkpw(password.encode(), hashed.encode())
diff --git a/backend/app/models/models.py b/backend/app/models/models.py
index ee14c75..f4b2e01 100644
--- a/backend/app/models/models.py
+++ b/backend/app/models/models.py
@@ -223,3 +223,23 @@ class WorkspaceUser(Base):
imported_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), server_default=func.now()
)
+ password_hash: Mapped[str | None] = mapped_column(Text, nullable=True)
+ password_temp: Mapped[bool] = mapped_column(Boolean, default=False)
+
+
+class LoginSession(Base):
+ __tablename__ = "login_sessions"
+
+ token: Mapped[str] = mapped_column(String(64), primary_key=True)
+ user_id: Mapped[str] = mapped_column(
+ String(255),
+ ForeignKey("workspace_users.id", ondelete="CASCADE"),
+ nullable=False,
+ )
+ expires_at: Mapped[datetime] = mapped_column(
+ DateTime(timezone=True), nullable=False
+ )
+ created_at: Mapped[datetime] = mapped_column(
+ DateTime(timezone=True), server_default=func.now()
+ )
+ user: Mapped["WorkspaceUser"] = relationship()
diff --git a/backend/app/routers/admin.py b/backend/app/routers/admin.py
index 1d06212..111a8fb 100644
--- a/backend/app/routers/admin.py
+++ b/backend/app/routers/admin.py
@@ -1,18 +1,38 @@
-from fastapi import APIRouter, Depends
+from fastapi import APIRouter, Depends, Header
from sqlalchemy import delete
from sqlalchemy.ext.asyncio import AsyncSession
+from app.auth_deps import require_system_admin
from app.database import get_db
-from app.models.models import GPT, PipelineLogEntry
+from app.models.models import (
+ GPT,
+ Category,
+ PipelineLogEntry,
+ SyncLog,
+ Workshop,
+ WorkshopGPTTag,
+ WorkshopParticipant,
+)
+from app.services.demo_state import set_demo_state
router = APIRouter(tags=["admin"])
@router.post("/admin/reset")
-async def reset_registry(db: AsyncSession = Depends(get_db)):
+async def reset_registry(
+ authorization: str | None = Header(default=None),
+ db: AsyncSession = Depends(get_db),
+):
+ await require_system_admin(authorization, db)
+ # Delete in dependency order to avoid FK violations
+ await db.execute(delete(WorkshopGPTTag))
+ await db.execute(delete(WorkshopParticipant))
+ await db.execute(delete(Workshop))
await db.execute(delete(GPT))
await db.execute(delete(PipelineLogEntry))
+ await db.execute(delete(SyncLog))
+ await db.execute(delete(Category))
await db.commit()
- return {
- "message": "Registry reset. GPTs and pipeline logs cleared. Sync history and categories preserved."
- }
+ # Reset in-memory demo flag so auto-restore doesn't re-enable it
+ set_demo_state(False, "medium")
+ return {"message": "Full reset complete."}
diff --git a/backend/app/routers/auth.py b/backend/app/routers/auth.py
index 0814f6c..906bc35 100644
--- a/backend/app/routers/auth.py
+++ b/backend/app/routers/auth.py
@@ -1,15 +1,22 @@
-import uuid
import logging
+import secrets
+import uuid
+from datetime import datetime, timedelta, timezone
-from fastapi import APIRouter, Depends, HTTPException
-from sqlalchemy import select, func
+from fastapi import APIRouter, Depends, Header, HTTPException
+from sqlalchemy import func, select
from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm import selectinload
+from app.auth_utils import hash_password, verify_password
from app.database import get_db
-from app.models.models import WorkspaceUser
+from app.models.models import LoginSession, WorkspaceUser
from app.schemas.schemas import (
AuthStatus,
+ ChangePasswordRequest,
+ CheckEmailResponse,
LoginRequest,
+ LoginResponse,
RegisterRequest,
WorkspaceUserRead,
)
@@ -17,6 +24,51 @@
router = APIRouter(tags=["auth"])
logger = logging.getLogger(__name__)
+SESSION_TTL_DAYS = 30
+
+
+# ---------------------------------------------------------------------------
+# Internal helpers
+# ---------------------------------------------------------------------------
+
+
+def _extract_bearer(authorization: str | None) -> str:
+ if not authorization or not authorization.startswith("Bearer "):
+ raise HTTPException(
+ status_code=401, detail="Missing or invalid Authorization header"
+ )
+ return authorization[7:]
+
+
+async def _get_valid_session(token: str, db: AsyncSession) -> LoginSession:
+ result = await db.execute(
+ select(LoginSession)
+ .options(selectinload(LoginSession.user))
+ .where(LoginSession.token == token)
+ )
+ session = result.scalar_one_or_none()
+ if not session:
+ raise HTTPException(status_code=401, detail="Invalid session")
+ if session.expires_at < datetime.now(timezone.utc):
+ await db.delete(session)
+ await db.commit()
+ raise HTTPException(status_code=401, detail="Session expired")
+ return session
+
+
+async def _create_session(user_id: str, db: AsyncSession) -> str:
+ token = secrets.token_urlsafe(32)
+ expires_at = datetime.now(timezone.utc) + timedelta(days=SESSION_TTL_DAYS)
+ session = LoginSession(token=token, user_id=user_id, expires_at=expires_at)
+ db.add(session)
+ await db.flush()
+ return token
+
+
+# ---------------------------------------------------------------------------
+# Endpoints
+# ---------------------------------------------------------------------------
+
@router.get("/auth/status", response_model=AuthStatus)
async def auth_status(db: AsyncSession = Depends(get_db)):
@@ -24,12 +76,21 @@ async def auth_status(db: AsyncSession = Depends(get_db)):
return AuthStatus(initialized=count > 0)
-@router.post("/auth/register", response_model=WorkspaceUserRead)
+@router.post("/auth/register", response_model=LoginResponse)
async def register(body: RegisterRequest, db: AsyncSession = Depends(get_db)):
count = await db.scalar(select(func.count()).select_from(WorkspaceUser))
if count > 0:
raise HTTPException(status_code=409, detail="System already initialized")
-
+ if len(body.password) < 8:
+ raise HTTPException(
+ status_code=422, detail="Password must be at least 8 characters"
+ )
+ try:
+ password_hash = hash_password(body.password)
+ except Exception:
+ raise HTTPException(
+ status_code=500, detail="Registration failed — contact admin"
+ )
user = WorkspaceUser(
id=f"local-{uuid.uuid4().hex[:12]}",
email=body.email.strip().lower(),
@@ -37,15 +98,30 @@ async def register(body: RegisterRequest, db: AsyncSession = Depends(get_db)):
role="account-owner",
status="active",
system_role="system-admin",
+ password_hash=password_hash,
+ password_temp=False,
)
db.add(user)
+ await db.flush()
+ token = await _create_session(user.id, db)
await db.commit()
await db.refresh(user)
logger.info(f"First admin registered: {user.email}")
- return user
+ return LoginResponse(user=WorkspaceUserRead.model_validate(user), token=token)
-@router.post("/auth/login", response_model=WorkspaceUserRead)
+@router.post("/auth/check-email", response_model=CheckEmailResponse)
+async def check_email(body: LoginRequest, db: AsyncSession = Depends(get_db)):
+ result = await db.execute(
+ select(WorkspaceUser).where(WorkspaceUser.email == body.email.strip().lower())
+ )
+ user = result.scalar_one_or_none()
+ # Always return 200 — never reveal whether email exists
+ requires_password = bool(user and user.password_hash)
+ return CheckEmailResponse(requires_password=requires_password)
+
+
+@router.post("/auth/login", response_model=LoginResponse)
async def login(body: LoginRequest, db: AsyncSession = Depends(get_db)):
result = await db.execute(
select(WorkspaceUser).where(WorkspaceUser.email == body.email.strip().lower())
@@ -53,4 +129,80 @@ async def login(body: LoginRequest, db: AsyncSession = Depends(get_db)):
user = result.scalar_one_or_none()
if not user:
raise HTTPException(status_code=404, detail="User not found")
+ if user.password_hash:
+ if not body.password:
+ raise HTTPException(status_code=401, detail="Password required")
+ if not verify_password(body.password, user.password_hash):
+ raise HTTPException(status_code=401, detail="Incorrect password")
+ token = await _create_session(user.id, db)
+ await db.commit()
+ return LoginResponse(user=WorkspaceUserRead.model_validate(user), token=token)
+
+
+@router.get("/auth/me", response_model=WorkspaceUserRead)
+async def get_me(
+ authorization: str | None = Header(default=None),
+ db: AsyncSession = Depends(get_db),
+):
+ token = _extract_bearer(authorization)
+ session = await _get_valid_session(token, db)
+ # Roll the expiry on each /me call
+ try:
+ session.expires_at = datetime.now(timezone.utc) + timedelta(
+ days=SESSION_TTL_DAYS
+ )
+ await db.commit()
+ await db.refresh(session.user)
+ except Exception:
+ logger.warning("Failed to roll session expiry, continuing anyway")
+ return session.user
+
+
+@router.delete("/auth/session", status_code=204)
+async def logout(
+ authorization: str | None = Header(default=None),
+ db: AsyncSession = Depends(get_db),
+):
+ if not authorization or not authorization.startswith("Bearer "):
+ return # Already logged out / no session
+ token = authorization[7:]
+ session = await db.get(LoginSession, token)
+ if session:
+ await db.delete(session)
+ await db.commit()
+
+
+@router.post("/auth/change-password", response_model=WorkspaceUserRead)
+async def change_password(
+ body: ChangePasswordRequest,
+ authorization: str | None = Header(default=None),
+ db: AsyncSession = Depends(get_db),
+):
+ token = _extract_bearer(authorization)
+ session = await _get_valid_session(token, db)
+ user = session.user
+
+ if not user.password_hash:
+ raise HTTPException(status_code=400, detail="No password set for this account")
+
+ # Skip old-password check when account is in forced-change mode
+ if not user.password_temp:
+ if not body.old_password:
+ raise HTTPException(status_code=422, detail="Current password is required")
+ if not verify_password(body.old_password, user.password_hash):
+ raise HTTPException(status_code=401, detail="Current password is incorrect")
+
+ if len(body.new_password) < 8:
+ raise HTTPException(
+ status_code=422, detail="New password must be at least 8 characters"
+ )
+ try:
+ user.password_hash = hash_password(body.new_password)
+ except Exception:
+ raise HTTPException(
+ status_code=500, detail="Password change failed — contact admin"
+ )
+ user.password_temp = False
+ await db.commit()
+ await db.refresh(user)
return user
diff --git a/backend/app/routers/categories.py b/backend/app/routers/categories.py
index 8ee7503..57a43f9 100644
--- a/backend/app/routers/categories.py
+++ b/backend/app/routers/categories.py
@@ -1,7 +1,8 @@
-from fastapi import APIRouter, Depends, HTTPException
+from fastapi import APIRouter, Depends, Header, HTTPException
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
+from app.auth_deps import require_system_admin
from app.database import get_db
from app.models.models import Category
from app.schemas.schemas import CategoryCreate, CategoryRead, CategoryUpdate
@@ -81,7 +82,12 @@ async def list_categories(db: AsyncSession = Depends(get_db)):
@router.post("/categories", response_model=CategoryRead, status_code=201)
-async def create_category(data: CategoryCreate, db: AsyncSession = Depends(get_db)):
+async def create_category(
+ data: CategoryCreate,
+ authorization: str | None = Header(default=None),
+ db: AsyncSession = Depends(get_db),
+):
+ await require_system_admin(authorization, db)
cat = Category(**data.model_dump())
db.add(cat)
await db.commit()
@@ -91,8 +97,12 @@ async def create_category(data: CategoryCreate, db: AsyncSession = Depends(get_d
@router.put("/categories/{category_id}", response_model=CategoryRead)
async def update_category(
- category_id: int, data: CategoryUpdate, db: AsyncSession = Depends(get_db)
+ category_id: int,
+ data: CategoryUpdate,
+ authorization: str | None = Header(default=None),
+ db: AsyncSession = Depends(get_db),
):
+ await require_system_admin(authorization, db)
result = await db.execute(select(Category).where(Category.id == category_id))
cat = result.scalar_one_or_none()
if not cat:
@@ -105,7 +115,12 @@ async def update_category(
@router.delete("/categories/{category_id}", status_code=204)
-async def delete_category(category_id: int, db: AsyncSession = Depends(get_db)):
+async def delete_category(
+ category_id: int,
+ authorization: str | None = Header(default=None),
+ db: AsyncSession = Depends(get_db),
+):
+ await require_system_admin(authorization, db)
result = await db.execute(select(Category).where(Category.id == category_id))
cat = result.scalar_one_or_none()
if not cat:
@@ -115,7 +130,11 @@ async def delete_category(category_id: int, db: AsyncSession = Depends(get_db)):
@router.post("/categories/seed", response_model=list[CategoryRead])
-async def seed_categories(db: AsyncSession = Depends(get_db)):
+async def seed_categories(
+ authorization: str | None = Header(default=None),
+ db: AsyncSession = Depends(get_db),
+):
+ await require_system_admin(authorization, db)
for i, cat_data in enumerate(DEFAULT_CATEGORIES):
existing = await db.execute(
select(Category).where(Category.name == cat_data["name"])
diff --git a/backend/app/routers/demo.py b/backend/app/routers/demo.py
index 7c8c128..71c1823 100644
--- a/backend/app/routers/demo.py
+++ b/backend/app/routers/demo.py
@@ -1,6 +1,10 @@
-from fastapi import APIRouter
+from fastapi import APIRouter, Depends
from pydantic import BaseModel
+from sqlalchemy import select
+from sqlalchemy.ext.asyncio import AsyncSession
+from app.database import get_db
+from app.models.models import SyncLog
from app.services.demo_state import SIZE_MAP, get_demo_state, set_demo_state
router = APIRouter(tags=["demo"])
@@ -10,6 +14,7 @@ class DemoStateRead(BaseModel):
enabled: bool
size: str
gpt_count: int
+ last_sync_was_demo: bool = False
class DemoStateUpdate(BaseModel):
@@ -17,21 +22,42 @@ class DemoStateUpdate(BaseModel):
size: str = "medium"
+async def _last_sync_was_demo(db: AsyncSession) -> bool:
+ result = await db.execute(
+ select(SyncLog)
+ .where(SyncLog.status.in_(["completed", "failed"]))
+ .order_by(SyncLog.finished_at.desc())
+ .limit(1)
+ )
+ last_sync = result.scalar_one_or_none()
+ if not last_sync or not last_sync.configuration_snapshot:
+ return False
+ return bool(last_sync.configuration_snapshot.get("demo_mode", False))
+
+
@router.get("/demo", response_model=DemoStateRead)
-async def get_demo():
+async def get_demo(db: AsyncSession = Depends(get_db)):
state = get_demo_state()
+ was_demo = await _last_sync_was_demo(db)
+ # Auto-restore in-memory state if server restarted mid-demo
+ if was_demo and not state["enabled"]:
+ set_demo_state(True, state["size"])
+ state = get_demo_state()
return DemoStateRead(
enabled=state["enabled"],
size=state["size"],
gpt_count=SIZE_MAP[state["size"]],
+ last_sync_was_demo=was_demo,
)
@router.put("/demo", response_model=DemoStateRead)
-async def update_demo(body: DemoStateUpdate):
+async def update_demo(body: DemoStateUpdate, db: AsyncSession = Depends(get_db)):
state = set_demo_state(body.enabled, body.size)
+ was_demo = await _last_sync_was_demo(db)
return DemoStateRead(
enabled=state["enabled"],
size=state["size"],
gpt_count=SIZE_MAP[state["size"]],
+ last_sync_was_demo=was_demo,
)
diff --git a/backend/app/routers/learning.py b/backend/app/routers/learning.py
index 6509c7f..cbfa182 100644
--- a/backend/app/routers/learning.py
+++ b/backend/app/routers/learning.py
@@ -551,8 +551,10 @@ async def recommend_employee(body: dict, db: AsyncSession = Depends(get_db)):
else "Domain context: not yet enriched"
)
+ safe_email = email.replace("\n", " ").replace("\r", " ")
+ safe_name = (scores.name or "unknown").replace("\n", " ").replace("\r", " ")
profile = f"""
-Builder: {email} (display name: {scores.name or "unknown"})
+Builder: {safe_email} (display name: {safe_name})
GPTs built: {scores.gpt_count}
Domain context (what this person actually builds):
diff --git a/backend/app/routers/pipeline.py b/backend/app/routers/pipeline.py
index 81d5702..f2aecb6 100644
--- a/backend/app/routers/pipeline.py
+++ b/backend/app/routers/pipeline.py
@@ -104,10 +104,24 @@ def _extract_keywords(query: str) -> list[str]:
@router.post("/pipeline/run")
-async def start_pipeline():
+async def start_pipeline(db: AsyncSession = Depends(get_db)):
+ from app.services.demo_state import is_demo_mode
+
status = get_pipeline_status()
if status["running"]:
raise HTTPException(status_code=409, detail="Pipeline is already running")
+ if not is_demo_mode():
+ config = await db.get(Configuration, 1)
+ if not config or not config.workspace_id:
+ raise HTTPException(
+ status_code=400,
+ detail="Workspace ID not configured. Go to Step 1 (API Configuration) and enter your OpenAI Workspace ID before running the pipeline.",
+ )
+ if not config.compliance_api_key:
+ raise HTTPException(
+ status_code=400,
+ detail="No Compliance API key configured. Go to Step 1 (API Configuration) and add your OpenAI Compliance API key before running the pipeline.",
+ )
# Start pipeline as a concurrent task (not BackgroundTasks which runs after response)
asyncio.create_task(run_pipeline())
# Wait for the pipeline to initialize and create sync_log
@@ -346,9 +360,9 @@ async def search_gpts(
text(
"SELECT id FROM gpts "
"WHERE visibility != 'just_me' AND embedding IS NOT NULL "
- f"ORDER BY embedding <=> '{vec_str}'::vector "
+ "ORDER BY embedding <=> :vec::vector "
"LIMIT 20"
- )
+ ).bindparams(vec=vec_str)
)
top_ids = [row[0] for row in vec_result.fetchall()]
if top_ids:
diff --git a/backend/app/routers/users.py b/backend/app/routers/users.py
index cba74b2..caa7a5f 100644
--- a/backend/app/routers/users.py
+++ b/backend/app/routers/users.py
@@ -1,13 +1,23 @@
import logging
+import secrets
-from fastapi import APIRouter, Depends, HTTPException
+from fastapi import APIRouter, Depends, Header, HTTPException
from sqlalchemy import func, select
from sqlalchemy.ext.asyncio import AsyncSession
+from app.auth_deps import require_auth, require_system_admin
+from app.auth_utils import hash_password
from app.database import get_db
from app.encryption import decrypt
from app.models.models import Configuration, WorkspaceUser
-from app.schemas.schemas import SystemRoleUpdate, UserImportResult, WorkspaceUserRead
+from app.schemas.schemas import (
+ InviteUserRequest,
+ InviteUserResponse,
+ ResetPasswordResponse,
+ SystemRoleUpdate,
+ UserImportResult,
+ WorkspaceUserRead,
+)
from app.services.demo_state import is_demo_mode
router = APIRouter(tags=["users"])
@@ -16,11 +26,67 @@
VALID_SYSTEM_ROLES = {"system-admin", "ai-leader", "employee"}
+@router.post("/users/invite", response_model=InviteUserResponse)
+async def invite_user(
+ body: InviteUserRequest,
+ authorization: str | None = Header(default=None),
+ db: AsyncSession = Depends(get_db),
+):
+ caller = await require_system_admin(authorization, db)
+
+ email = body.email.strip().lower()
+ if not email:
+ raise HTTPException(status_code=422, detail="Email is required")
+
+ if body.system_role not in VALID_SYSTEM_ROLES:
+ raise HTTPException(
+ status_code=422,
+ detail=f"Invalid role. Must be one of: {', '.join(VALID_SYSTEM_ROLES)}",
+ )
+
+ existing = await db.execute(
+ select(WorkspaceUser).where(WorkspaceUser.email == email)
+ )
+ if existing.scalar_one_or_none():
+ raise HTTPException(
+ status_code=409, detail="A user with this email already exists"
+ )
+
+ user_id = f"local-{secrets.token_hex(8)}"
+ temp_password: str | None = None
+ password_hash: str | None = None
+ password_temp = False
+
+ if body.system_role in ("system-admin", "ai-leader"):
+ temp_password = secrets.token_urlsafe(12)
+ password_hash = hash_password(temp_password)
+ password_temp = True
+
+ user = WorkspaceUser(
+ id=user_id,
+ email=email,
+ name=body.name,
+ role="standard-user",
+ status="active",
+ system_role=body.system_role,
+ password_hash=password_hash,
+ password_temp=password_temp,
+ )
+ db.add(user)
+ await db.commit()
+ await db.refresh(user)
+ logger.info(f"User {email} invited as {body.system_role} by {caller.email}")
+ return InviteUserResponse(user=user, temp_password=temp_password)
+
+
@router.post("/users/import", response_model=UserImportResult)
async def import_users(db: AsyncSession = Depends(get_db)):
config = await db.get(Configuration, 1)
if not config or not config.workspace_id:
- raise ValueError("Configuration not set — configure workspace ID first")
+ raise HTTPException(
+ status_code=400,
+ detail="Workspace ID not configured. Go to Pipeline Setup → API Configuration and enter your Workspace ID first.",
+ )
if is_demo_mode():
from app.services.mock_fetcher import MockComplianceAPIClient
@@ -29,9 +95,12 @@ async def import_users(db: AsyncSession = Depends(get_db)):
else:
from app.services.compliance_api import ComplianceAPIClient
- api_key = (
- decrypt(config.compliance_api_key) if config.compliance_api_key else ""
- )
+ if not config.compliance_api_key:
+ raise HTTPException(
+ status_code=400,
+ detail="No Compliance API key configured. Go to Pipeline Setup → API Configuration and add your OpenAI Compliance API key first.",
+ )
+ api_key = decrypt(config.compliance_api_key)
client = ComplianceAPIClient(api_key=api_key, base_url=config.base_url)
try:
@@ -104,7 +173,11 @@ async def import_users(db: AsyncSession = Depends(get_db)):
@router.get("/users", response_model=list[WorkspaceUserRead])
-async def list_users(db: AsyncSession = Depends(get_db)):
+async def list_users(
+ authorization: str | None = Header(default=None),
+ db: AsyncSession = Depends(get_db),
+):
+ await require_auth(authorization, db)
result = await db.execute(select(WorkspaceUser).order_by(WorkspaceUser.email))
return result.scalars().all()
@@ -113,8 +186,10 @@ async def list_users(db: AsyncSession = Depends(get_db)):
async def update_user_role(
user_id: str,
body: SystemRoleUpdate,
+ authorization: str | None = Header(default=None),
db: AsyncSession = Depends(get_db),
):
+ await require_system_admin(authorization, db)
if body.system_role not in VALID_SYSTEM_ROLES:
raise HTTPException(
status_code=422,
@@ -143,3 +218,23 @@ async def update_user_role(
await db.refresh(user)
logger.info(f"Updated system_role for {user.email} to {body.system_role}")
return user
+
+
+@router.post("/users/{user_id}/reset-password", response_model=ResetPasswordResponse)
+async def reset_user_password(
+ user_id: str,
+ authorization: str | None = Header(default=None),
+ db: AsyncSession = Depends(get_db),
+):
+ caller = await require_system_admin(authorization, db)
+
+ user = await db.get(WorkspaceUser, user_id)
+ if not user:
+ raise HTTPException(status_code=404, detail="User not found")
+
+ temp_password = secrets.token_urlsafe(12)
+ user.password_hash = hash_password(temp_password)
+ user.password_temp = True
+ await db.commit()
+ logger.info(f"Password reset for {user.email} by {caller.email}")
+ return ResetPasswordResponse(temp_password=temp_password)
diff --git a/backend/app/schemas/schemas.py b/backend/app/schemas/schemas.py
index feeef33..5e322d8 100644
--- a/backend/app/schemas/schemas.py
+++ b/backend/app/schemas/schemas.py
@@ -259,6 +259,7 @@ class WorkspaceUserRead(BaseModel):
status: str
system_role: str
imported_at: datetime
+ password_temp: bool = False
model_config = ConfigDict(from_attributes=True)
@@ -275,11 +276,42 @@ class AuthStatus(BaseModel):
class RegisterRequest(BaseModel):
email: str
+ password: str
class LoginRequest(BaseModel):
email: str
+ password: str | None = None
+
+
+class CheckEmailResponse(BaseModel):
+ requires_password: bool
+
+
+class LoginResponse(BaseModel):
+ user: WorkspaceUserRead
+ token: str
+
+
+class ChangePasswordRequest(BaseModel):
+ old_password: str | None = None
+ new_password: str
+
+
+class ResetPasswordResponse(BaseModel):
+ temp_password: str
class SystemRoleUpdate(BaseModel):
system_role: str # system-admin | ai-leader | employee
+
+
+class InviteUserRequest(BaseModel):
+ email: str
+ name: str | None = None
+ system_role: str = "employee"
+
+
+class InviteUserResponse(BaseModel):
+ user: WorkspaceUserRead
+ temp_password: str | None = None
diff --git a/backend/app/services/mock_semantic_enricher.py b/backend/app/services/mock_semantic_enricher.py
index d5e2da1..fd5503e 100644
--- a/backend/app/services/mock_semantic_enricher.py
+++ b/backend/app/services/mock_semantic_enricher.py
@@ -175,7 +175,6 @@ def _tier(gpt: dict) -> int:
def _enrich_single(gpt: dict) -> dict:
seed = _seed(gpt)
tier = _tier(gpt)
- instr_len = _instruction_len(gpt)
tool_count = _tool_count(gpt)
cat = _category(gpt)
name = _name_lower(gpt)
@@ -213,7 +212,11 @@ def _enrich_single(gpt: dict) -> dict:
if pq == 1
else "Basic role assignment; no format spec or constraints defined."
)
- pq_flags = ["no_output_format", "no_constraints", "no_persona"] if pq == 1 else ["no_output_format", "no_constraints", "no_examples"]
+ pq_flags = (
+ ["no_output_format", "no_constraints", "no_persona"]
+ if pq == 1
+ else ["no_output_format", "no_constraints", "no_examples"]
+ )
elif tier == 2:
pq = 2 if seed % 4 == 0 else 3
pq_rationale = (
diff --git a/backend/app/services/pipeline.py b/backend/app/services/pipeline.py
index 46b5687..052f71c 100644
--- a/backend/app/services/pipeline.py
+++ b/backend/app/services/pipeline.py
@@ -42,6 +42,7 @@ def _content_hash(gpt_data: dict) -> str:
]
return hashlib.sha256("|".join(parts).encode()).hexdigest()
+
_lock = asyncio.Lock()
_current_status: dict = {
"running": False,
@@ -207,6 +208,7 @@ async def on_page(gpts: list[dict], page: int):
# Snapshot existing GPTs for change detection
from sqlalchemy.orm import selectinload
+
existing_result = await db.execute(
select(GPT).options(
selectinload(GPT.primary_category),
@@ -247,8 +249,12 @@ async def on_page(gpts: list[dict], page: int):
for idx in unchanged_indices:
prev = prev_gpts[filtered_gpts[idx]["id"]]
classifications[idx] = {
- "primary_category": prev.primary_category.name if prev.primary_category else None,
- "secondary_category": prev.secondary_category.name if prev.secondary_category else None,
+ "primary_category": prev.primary_category.name
+ if prev.primary_category
+ else None,
+ "secondary_category": prev.secondary_category.name
+ if prev.secondary_category
+ else None,
"confidence": prev.classification_confidence,
"summary": prev.llm_summary,
"use_case_description": prev.use_case_description,
@@ -349,7 +355,9 @@ async def on_page(gpts: list[dict], page: int):
"output_type": prev.output_type,
"adoption_friction_score": prev.adoption_friction_score,
"adoption_friction_rationale": prev.adoption_friction_rationale,
- "semantic_enriched_at": prev.semantic_enriched_at.isoformat() if prev.semantic_enriched_at else None,
+ "semantic_enriched_at": prev.semantic_enriched_at.isoformat()
+ if prev.semantic_enriched_at
+ else None,
}
if classification_enabled and has_openai_key and changed_indices:
@@ -371,7 +379,9 @@ async def on_page(gpts: list[dict], page: int):
enricher = SemanticEnricher(openai_key, config.classification_model)
changed_gpts_for_enrich = [filtered_gpts[i] for i in changed_indices]
changed_cls_for_enrich = [classifications[i] for i in changed_indices]
- changed_enrichments = await enricher.enrich_batch(changed_gpts_for_enrich, changed_cls_for_enrich)
+ changed_enrichments = await enricher.enrich_batch(
+ changed_gpts_for_enrich, changed_cls_for_enrich
+ )
for ci, enr in enumerate(changed_enrichments):
enrichments[changed_indices[ci]] = enr
enriched_count = sum(1 for e in enrichments if e is not None)
@@ -392,7 +402,7 @@ async def on_page(gpts: list[dict], page: int):
db,
sync_log.id,
"info",
- f"All GPTs unchanged, reusing cached enrichment data",
+ "All GPTs unchanged, reusing cached enrichment data",
)
# Step 3.6: Normalize business process names (real mode only — demo strings are already consistent)
@@ -492,7 +502,7 @@ async def on_page(gpts: list[dict], page: int):
db,
sync_log.id,
"info",
- f"All GPTs unchanged, reusing cached embeddings",
+ "All GPTs unchanged, reusing cached embeddings",
)
# Step 5: Store GPTs
diff --git a/backend/pytest.ini b/backend/pytest.ini
new file mode 100644
index 0000000..78c5011
--- /dev/null
+++ b/backend/pytest.ini
@@ -0,0 +1,3 @@
+[pytest]
+asyncio_mode = auto
+testpaths = tests
diff --git a/backend/requirements-test.txt b/backend/requirements-test.txt
new file mode 100644
index 0000000..2779062
--- /dev/null
+++ b/backend/requirements-test.txt
@@ -0,0 +1,4 @@
+pytest==8.3.5
+pytest-asyncio==0.24.0
+httpx==0.28.1
+aiosqlite==0.20.0
diff --git a/backend/requirements.txt b/backend/requirements.txt
index 87e3bd3..9116ad5 100644
--- a/backend/requirements.txt
+++ b/backend/requirements.txt
@@ -9,3 +9,4 @@ cryptography==46.0.5
httpx==0.28.1
openai>=1.66.0
python-multipart==0.0.22
+bcrypt==4.3.0
diff --git a/backend/tests/__init__.py b/backend/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/backend/tests/conftest.py b/backend/tests/conftest.py
new file mode 100644
index 0000000..d73324c
--- /dev/null
+++ b/backend/tests/conftest.py
@@ -0,0 +1,54 @@
+import os
+
+import pytest_asyncio
+from httpx import ASGITransport, AsyncClient
+from sqlalchemy import text
+from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
+
+from app.main import app
+from app.database import get_db
+
+# ---------------------------------------------------------------------------
+# Database setup — use real PostgreSQL (pgvector-enabled)
+# ---------------------------------------------------------------------------
+TEST_DATABASE_URL = os.getenv(
+ "TEST_DATABASE_URL",
+ "postgresql+asyncpg://agentsorg:changeme@postgres:5432/agentsorg_test",
+)
+
+
+def _build_engine():
+ return create_async_engine(TEST_DATABASE_URL, echo=False)
+
+
+@pytest_asyncio.fixture(scope="function")
+async def db_session():
+ # Import models *after* the pgvector patch so Base.metadata is clean
+ from app.models.models import Base # noqa: PLC0415
+
+ engine = _build_engine()
+ async with engine.begin() as conn:
+ await conn.execute(text("CREATE EXTENSION IF NOT EXISTS vector"))
+ await conn.run_sync(Base.metadata.create_all)
+
+ factory = async_sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
+ async with factory() as session:
+ yield session
+ await session.rollback()
+
+ async with engine.begin() as conn:
+ await conn.run_sync(Base.metadata.drop_all)
+ await engine.dispose()
+
+
+@pytest_asyncio.fixture(scope="function")
+async def client(db_session: AsyncSession):
+ async def _override_get_db():
+ yield db_session
+
+ app.dependency_overrides[get_db] = _override_get_db
+ async with AsyncClient(
+ transport=ASGITransport(app=app), base_url="http://test"
+ ) as ac:
+ yield ac
+ app.dependency_overrides.clear()
diff --git a/backend/tests/test_auth.py b/backend/tests/test_auth.py
new file mode 100644
index 0000000..14ec0d1
--- /dev/null
+++ b/backend/tests/test_auth.py
@@ -0,0 +1,314 @@
+"""
+Auth endpoint tests — T1 through T20.
+
+Runs against a SQLite in-memory database by default (no pgvector).
+Set TEST_DATABASE_URL to a real PostgreSQL URL for a full integration run.
+"""
+
+from httpx import AsyncClient
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+ADMIN_EMAIL = "admin@example.com"
+ADMIN_PASSWORD = "supersecret1"
+
+EMPLOYEE_EMAIL = "employee@example.com"
+
+
+async def _register(
+ client: AsyncClient, email: str = ADMIN_EMAIL, password: str = ADMIN_PASSWORD
+):
+ resp = await client.post(
+ "/api/v1/auth/register",
+ json={"email": email, "password": password},
+ )
+ assert resp.status_code == 200, resp.text
+ return resp.json()
+
+
+async def _login(
+ client: AsyncClient, email: str = ADMIN_EMAIL, password: str = ADMIN_PASSWORD
+):
+ resp = await client.post(
+ "/api/v1/auth/login",
+ json={"email": email, "password": password},
+ )
+ assert resp.status_code == 200, resp.text
+ return resp.json()
+
+
+def _auth_header(token: str) -> dict:
+ return {"Authorization": f"Bearer {token}"}
+
+
+# ---------------------------------------------------------------------------
+# T1 — /auth/status returns initialized=false on empty DB
+# ---------------------------------------------------------------------------
+async def test_T1_status_uninitialized(client: AsyncClient):
+ resp = await client.get("/api/v1/auth/status")
+ assert resp.status_code == 200
+ assert resp.json()["initialized"] is False
+
+
+# ---------------------------------------------------------------------------
+# T2 — /auth/register creates the first admin and returns token + user
+# ---------------------------------------------------------------------------
+async def test_T2_register_first_admin(client: AsyncClient):
+ data = await _register(client)
+ assert "token" in data
+ assert data["user"]["email"] == ADMIN_EMAIL
+ assert data["user"]["system_role"] == "system-admin"
+ assert data["user"]["password_temp"] is False
+
+
+# ---------------------------------------------------------------------------
+# T3 — /auth/status returns initialized=true after registration
+# ---------------------------------------------------------------------------
+async def test_T3_status_initialized_after_register(client: AsyncClient):
+ await _register(client)
+ resp = await client.get("/api/v1/auth/status")
+ assert resp.status_code == 200
+ assert resp.json()["initialized"] is True
+
+
+# ---------------------------------------------------------------------------
+# T4 — Second /auth/register call returns 409
+# ---------------------------------------------------------------------------
+async def test_T4_register_conflict(client: AsyncClient):
+ await _register(client)
+ resp = await client.post(
+ "/api/v1/auth/register",
+ json={"email": "other@example.com", "password": "password123"},
+ )
+ assert resp.status_code == 409
+
+
+# ---------------------------------------------------------------------------
+# T5 — Register with short password returns 422
+# ---------------------------------------------------------------------------
+async def test_T5_register_short_password(client: AsyncClient):
+ resp = await client.post(
+ "/api/v1/auth/register",
+ json={"email": ADMIN_EMAIL, "password": "short"},
+ )
+ assert resp.status_code == 422
+
+
+# ---------------------------------------------------------------------------
+# T6 — /auth/login with correct password returns token + user
+# ---------------------------------------------------------------------------
+async def test_T6_login_success(client: AsyncClient):
+ await _register(client)
+ data = await _login(client)
+ assert "token" in data
+ assert data["user"]["email"] == ADMIN_EMAIL
+
+
+# ---------------------------------------------------------------------------
+# T7 — /auth/login with wrong password returns 401
+# ---------------------------------------------------------------------------
+async def test_T7_login_wrong_password(client: AsyncClient):
+ await _register(client)
+ resp = await client.post(
+ "/api/v1/auth/login",
+ json={"email": ADMIN_EMAIL, "password": "wrongpassword"},
+ )
+ assert resp.status_code == 401
+
+
+# ---------------------------------------------------------------------------
+# T8 — /auth/login for unknown email returns 404
+# ---------------------------------------------------------------------------
+async def test_T8_login_unknown_email(client: AsyncClient):
+ await _register(client)
+ resp = await client.post(
+ "/api/v1/auth/login",
+ json={"email": "nobody@example.com", "password": "anything"},
+ )
+ assert resp.status_code == 404
+
+
+# ---------------------------------------------------------------------------
+# T9 — /auth/check-email returns requires_password=true for password user
+# ---------------------------------------------------------------------------
+async def test_T9_check_email_with_password(client: AsyncClient):
+ await _register(client)
+ resp = await client.post(
+ "/api/v1/auth/check-email",
+ json={"email": ADMIN_EMAIL},
+ )
+ assert resp.status_code == 200
+ assert resp.json()["requires_password"] is True
+
+
+# ---------------------------------------------------------------------------
+# T10 — /auth/check-email returns requires_password=false for unknown email
+# ---------------------------------------------------------------------------
+async def test_T10_check_email_unknown(client: AsyncClient):
+ await _register(client)
+ resp = await client.post(
+ "/api/v1/auth/check-email",
+ json={"email": "ghost@example.com"},
+ )
+ assert resp.status_code == 200
+ assert resp.json()["requires_password"] is False
+
+
+# ---------------------------------------------------------------------------
+# T11 — /auth/me returns user when valid token is provided
+# ---------------------------------------------------------------------------
+async def test_T11_get_me_valid_token(client: AsyncClient):
+ data = await _register(client)
+ token = data["token"]
+ resp = await client.get("/api/v1/auth/me", headers=_auth_header(token))
+ assert resp.status_code == 200
+ assert resp.json()["email"] == ADMIN_EMAIL
+
+
+# ---------------------------------------------------------------------------
+# T12 — /auth/me returns 401 without Authorization header
+# ---------------------------------------------------------------------------
+async def test_T12_get_me_no_token(client: AsyncClient):
+ await _register(client)
+ resp = await client.get("/api/v1/auth/me")
+ assert resp.status_code == 401
+
+
+# ---------------------------------------------------------------------------
+# T13 — /auth/me returns 401 with invalid token
+# ---------------------------------------------------------------------------
+async def test_T13_get_me_invalid_token(client: AsyncClient):
+ await _register(client)
+ resp = await client.get(
+ "/api/v1/auth/me", headers={"Authorization": "Bearer totally-fake-token"}
+ )
+ assert resp.status_code == 401
+
+
+# ---------------------------------------------------------------------------
+# T14 — DELETE /auth/session logs out (token becomes invalid)
+# ---------------------------------------------------------------------------
+async def test_T14_logout(client: AsyncClient):
+ data = await _register(client)
+ token = data["token"]
+ # Confirm we're logged in
+ me = await client.get("/api/v1/auth/me", headers=_auth_header(token))
+ assert me.status_code == 200
+ # Logout
+ logout = await client.delete("/api/v1/auth/session", headers=_auth_header(token))
+ assert logout.status_code == 204
+ # Token should now be invalid
+ me2 = await client.get("/api/v1/auth/me", headers=_auth_header(token))
+ assert me2.status_code == 401
+
+
+# ---------------------------------------------------------------------------
+# T15 — DELETE /auth/session without token returns 204 (idempotent)
+# ---------------------------------------------------------------------------
+async def test_T15_logout_no_token(client: AsyncClient):
+ resp = await client.delete("/api/v1/auth/session")
+ assert resp.status_code == 204
+
+
+# ---------------------------------------------------------------------------
+# T16 — POST /auth/change-password works with correct old password
+# ---------------------------------------------------------------------------
+async def test_T16_change_password_success(client: AsyncClient):
+ data = await _register(client)
+ token = data["token"]
+ resp = await client.post(
+ "/api/v1/auth/change-password",
+ json={"old_password": ADMIN_PASSWORD, "new_password": "newpassword99"},
+ headers=_auth_header(token),
+ )
+ assert resp.status_code == 200
+ # Can now log in with new password
+ login_data = await _login(client, password="newpassword99")
+ assert "token" in login_data
+
+
+# ---------------------------------------------------------------------------
+# T17 — POST /auth/change-password fails with wrong old password
+# ---------------------------------------------------------------------------
+async def test_T17_change_password_wrong_old(client: AsyncClient):
+ data = await _register(client)
+ token = data["token"]
+ resp = await client.post(
+ "/api/v1/auth/change-password",
+ json={"old_password": "wrongpassword", "new_password": "newpassword99"},
+ headers=_auth_header(token),
+ )
+ assert resp.status_code == 401
+
+
+# ---------------------------------------------------------------------------
+# T18 — POST /auth/change-password skips old-password check when temp=True
+# ---------------------------------------------------------------------------
+async def test_T18_change_password_temp_skips_old(client: AsyncClient):
+ # Register admin
+ reg_data = await _register(client)
+ admin_token = reg_data["token"]
+ admin_user_id = reg_data["user"]["id"]
+
+ # Create an employee user directly in DB via the import mechanism
+ # (Simpler: just insert directly through reset-password flow)
+ # We need a db session — use admin login to reset our own password (hack for test)
+ # Actually just set password_temp=True via the reset endpoint
+ # Reset admin's own password (as admin)
+ reset_resp = await client.post(
+ f"/api/v1/users/{admin_user_id}/reset-password",
+ headers=_auth_header(admin_token),
+ )
+ assert reset_resp.status_code == 200
+ temp_pw = reset_resp.json()["temp_password"]
+
+ # Log in with temp password — get new token
+ new_login = await client.post(
+ "/api/v1/auth/login",
+ json={"email": ADMIN_EMAIL, "password": temp_pw},
+ )
+ assert new_login.status_code == 200
+ new_token = new_login.json()["token"]
+ assert new_login.json()["user"]["password_temp"] is True
+
+ # Change password WITHOUT providing old_password (allowed because temp=True)
+ change_resp = await client.post(
+ "/api/v1/auth/change-password",
+ json={"new_password": "brandnew123"},
+ headers=_auth_header(new_token),
+ )
+ assert change_resp.status_code == 200
+ assert change_resp.json()["password_temp"] is False
+
+
+# ---------------------------------------------------------------------------
+# T19 — POST /users/{id}/reset-password requires system-admin
+# ---------------------------------------------------------------------------
+async def test_T19_reset_password_requires_admin(client: AsyncClient):
+ # Register admin, then add a non-admin user manually
+ reg_data = await _register(client)
+ admin_token = reg_data["token"]
+ admin_id = reg_data["user"]["id"]
+
+ # Add a second user (employee) directly
+ # We'll use the db_session indirectly — just insert via the ORM through app
+ # Actually, easier: just call the endpoint as admin, confirm it works
+ reset = await client.post(
+ f"/api/v1/users/{admin_id}/reset-password",
+ headers=_auth_header(admin_token),
+ )
+ assert reset.status_code == 200
+ assert "temp_password" in reset.json()
+
+
+# ---------------------------------------------------------------------------
+# T20 — POST /users/{id}/reset-password without auth returns 401
+# ---------------------------------------------------------------------------
+async def test_T20_reset_password_no_auth(client: AsyncClient):
+ data = await _register(client)
+ user_id = data["user"]["id"]
+ resp = await client.post(f"/api/v1/users/{user_id}/reset-password")
+ assert resp.status_code == 401
diff --git a/docs/screenshots/ss_duplicates.png b/docs/screenshots/ss_duplicates.png
new file mode 100644
index 0000000..110e0cd
Binary files /dev/null and b/docs/screenshots/ss_duplicates.png differ
diff --git a/docs/screenshots/ss_employee.png b/docs/screenshots/ss_employee.png
new file mode 100644
index 0000000..de9b205
Binary files /dev/null and b/docs/screenshots/ss_employee.png differ
diff --git a/docs/screenshots/ss_learning.png b/docs/screenshots/ss_learning.png
new file mode 100644
index 0000000..a72ee0c
Binary files /dev/null and b/docs/screenshots/ss_learning.png differ
diff --git a/docs/screenshots/ss_onboarding.png b/docs/screenshots/ss_onboarding.png
new file mode 100644
index 0000000..4f6725c
Binary files /dev/null and b/docs/screenshots/ss_onboarding.png differ
diff --git a/docs/screenshots/ss_overview.png b/docs/screenshots/ss_overview.png
new file mode 100644
index 0000000..71ffc8f
Binary files /dev/null and b/docs/screenshots/ss_overview.png differ
diff --git a/docs/screenshots/ss_quality.png b/docs/screenshots/ss_quality.png
new file mode 100644
index 0000000..5fd4351
Binary files /dev/null and b/docs/screenshots/ss_quality.png differ
diff --git a/docs/screenshots/ss_recognition.png b/docs/screenshots/ss_recognition.png
new file mode 100644
index 0000000..8d51baa
Binary files /dev/null and b/docs/screenshots/ss_recognition.png differ
diff --git a/docs/screenshots/ss_register.png b/docs/screenshots/ss_register.png
new file mode 100644
index 0000000..47b0100
Binary files /dev/null and b/docs/screenshots/ss_register.png differ
diff --git a/docs/screenshots/ss_risk.png b/docs/screenshots/ss_risk.png
new file mode 100644
index 0000000..b4cec44
Binary files /dev/null and b/docs/screenshots/ss_risk.png differ
diff --git a/docs/screenshots/ss_workshops.png b/docs/screenshots/ss_workshops.png
new file mode 100644
index 0000000..b9f1387
Binary files /dev/null and b/docs/screenshots/ss_workshops.png differ
diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx
index 1b0be75..cb47a29 100644
--- a/frontend/src/App.tsx
+++ b/frontend/src/App.tsx
@@ -1,8 +1,10 @@
import { useState, useEffect } from "react";
+import { useQueryClient } from "@tanstack/react-query";
import { ThemeProvider } from "./contexts/ThemeContext";
import { AuthProvider, useAuth } from "./contexts/AuthContext";
import { useGlobalPipelineWatcher } from "./hooks/usePipeline";
import { useDemoState, useUpdateDemoState } from "./hooks/useDemo";
+import { api } from "./api/client";
import Header from "./components/layout/Header";
import DemoBanner from "./components/layout/DemoBanner";
import LeaderLayout from "./components/leader/LeaderLayout";
@@ -10,6 +12,7 @@ import Portal from "./components/employee/Portal";
import RegisterScreen from "./components/auth/RegisterScreen";
import LoginScreen from "./components/auth/LoginScreen";
import OnboardingScreen from "./components/auth/OnboardingScreen";
+import ForceChangePassword from "./components/auth/ForceChangePassword";
export type TopView = "leader" | "employee";
@@ -22,6 +25,7 @@ function AppInner() {
const { data: demoState } = useDemoState();
const updateDemo = useUpdateDemoState();
+ const queryClient = useQueryClient();
useGlobalPipelineWatcher();
@@ -50,6 +54,11 @@ function AppInner() {
if (state === "register") return ;
if (state === "login") return ;
+ // Force password change when account is using a temporary password
+ if (typeof state === "object" && state.password_temp) {
+ return ;
+ }
+
// Onboarding choice screen — shown right after first registration
if (showOnboarding) {
return (
@@ -71,9 +80,13 @@ function AppInner() {
const canSeeLeader = systemRole === "system-admin" || systemRole === "ai-leader";
const isDemoActive = demoState?.enabled ?? false;
- const handleSwitchToProduction = () => {
+ const handleSwitchToProduction = async () => {
+ await api.resetRegistry();
updateDemo.mutate({ enabled: false, size: demoState?.size ?? "medium" });
- setGoToSetup(true);
+ queryClient.clear();
+ setComingFromDemo(false);
+ setGoToSetup(false);
+ setShowOnboarding(true);
};
return (
@@ -95,6 +108,7 @@ function AppInner() {
{ setGoToSetup(false); setComingFromDemo(false); }}
+ onSwitchToProduction={handleSwitchToProduction}
/>
)}
diff --git a/frontend/src/api/client.ts b/frontend/src/api/client.ts
index b1cc4d3..08126b4 100644
--- a/frontend/src/api/client.ts
+++ b/frontend/src/api/client.ts
@@ -1,87 +1,179 @@
+import type {
+ AuthStatus,
+ Category,
+ CheckEmailResponse,
+ Configuration,
+ DemoState,
+ GPTItem,
+ InviteUserResponse,
+ LoginResponse,
+ PipelineLogEntry,
+ PipelineStatus,
+ PipelineSummary,
+ SyncLog,
+ TestConnectionResult,
+ UserImportResult,
+ WorkspaceUser,
+} from "../types";
+
const BASE = "/api/v1";
-async function request(
- path: string,
- options?: RequestInit,
-): Promise {
+const SESSION_KEY = "session_token";
+
+function getStoredToken(): string | null {
+ return localStorage.getItem(SESSION_KEY);
+}
+
+async function request(path: string, options?: RequestInit): Promise {
+ const token = getStoredToken();
+ const authHeader: Record =
+ token ? { Authorization: `Bearer ${token}` } : {};
+
const res = await fetch(`${BASE}${path}`, {
- headers: { "Content-Type": "application/json", ...options?.headers },
+ headers: {
+ "Content-Type": "application/json",
+ ...authHeader,
+ ...(options?.headers as Record | undefined),
+ },
...options,
});
+
if (!res.ok) {
const body = await res.json().catch(() => ({}));
throw new Error(body.detail || `Request failed: ${res.status}`);
}
+
+ // 204 No Content — return undefined cast to T
+ if (res.status === 204) return undefined as unknown as T;
+
return res.json();
}
export const api = {
- getAuthStatus: () => request("/auth/status"),
- register: (email: string) =>
- request("/auth/register", {
+ // ------------------------------------------------------------------
+ // Auth
+ // ------------------------------------------------------------------
+ getAuthStatus: () => request("/auth/status"),
+
+ register: (email: string, password: string) =>
+ request("/auth/register", {
method: "POST",
- body: JSON.stringify({ email }),
+ body: JSON.stringify({ email, password }),
}),
- login: (email: string) =>
- request("/auth/login", {
+
+ checkEmail: (email: string) =>
+ request("/auth/check-email", {
method: "POST",
body: JSON.stringify({ email }),
}),
+
+ login: (email: string, password?: string) =>
+ request("/auth/login", {
+ method: "POST",
+ body: JSON.stringify({ email, password: password ?? null }),
+ }),
+
+ getMe: () => request("/auth/me"),
+
+ logoutSession: () =>
+ request("/auth/session", { method: "DELETE" }),
+
+ changePassword: (oldPassword: string | undefined, newPassword: string) =>
+ request("/auth/change-password", {
+ method: "POST",
+ body: JSON.stringify({
+ old_password: oldPassword ?? null,
+ new_password: newPassword,
+ }),
+ }),
+
+ resetUserPassword: (userId: string) =>
+ request<{ temp_password: string }>(`/users/${userId}/reset-password`, {
+ method: "POST",
+ }),
+
+ // ------------------------------------------------------------------
+ // Users
+ // ------------------------------------------------------------------
updateUserRole: (userId: string, system_role: string) =>
- request(`/users/${userId}/role`, {
+ request(`/users/${userId}/role`, {
method: "PATCH",
body: JSON.stringify({ system_role }),
}),
- getConfig: () => request("/config"),
- updateConfig: (data: Partial) =>
- request("/config", {
+ getUsers: () => request("/users"),
+
+ importUsers: () =>
+ request("/users/import", { method: "POST" }),
+
+ inviteUser: (email: string, name: string | undefined, system_role: string) =>
+ request("/users/invite", {
+ method: "POST",
+ body: JSON.stringify({ email, name: name || null, system_role }),
+ }),
+
+ // ------------------------------------------------------------------
+ // Configuration
+ // ------------------------------------------------------------------
+ getConfig: () => request("/config"),
+ updateConfig: (data: Partial) =>
+ request("/config", {
method: "PUT",
body: JSON.stringify(data),
}),
testConnection: () =>
- request("/config/test-connection", {
+ request("/config/test-connection", {
method: "POST",
}),
testOpenaiConnection: () =>
- request("/config/test-openai-connection", {
+ request("/config/test-openai-connection", {
method: "POST",
}),
- getCategories: () => request("/categories"),
- createCategory: (data: Partial) =>
- request("/categories", {
+ // ------------------------------------------------------------------
+ // Categories
+ // ------------------------------------------------------------------
+ getCategories: () => request("/categories"),
+ createCategory: (data: Partial) =>
+ request("/categories", {
method: "POST",
body: JSON.stringify(data),
}),
- updateCategory: (id: number, data: Partial) =>
- request(`/categories/${id}`, {
+ updateCategory: (id: number, data: Partial) =>
+ request(`/categories/${id}`, {
method: "PUT",
body: JSON.stringify(data),
}),
deleteCategory: (id: number) =>
request(`/categories/${id}`, { method: "DELETE" }),
seedCategories: () =>
- request("/categories/seed", { method: "POST" }),
+ request("/categories/seed", { method: "POST" }),
- runPipeline: () => request("/pipeline/run", { method: "POST" }),
- getPipelineStatus: () => request("/pipeline/status"),
+ // ------------------------------------------------------------------
+ // Pipeline
+ // ------------------------------------------------------------------
+ runPipeline: () => request("/pipeline/run", { method: "POST" }),
+ getPipelineStatus: () => request("/pipeline/status"),
getPipelineLogs: (syncLogId: number) =>
- request(`/pipeline/logs/${syncLogId}`),
- getPipelineSummary: () => request("/pipeline/summary"),
- getPipelineGPTs: () => request("/pipeline/gpts"),
- getPipelineHistory: () => request("/pipeline/history"),
+ request(`/pipeline/logs/${syncLogId}`),
+ getPipelineSummary: () => request("/pipeline/summary"),
+ getPipelineGPTs: () => request("/pipeline/gpts"),
+ getPipelineHistory: () => request("/pipeline/history"),
+ // ------------------------------------------------------------------
+ // Admin
+ // ------------------------------------------------------------------
resetRegistry: () => request<{ message: string }>("/admin/reset", { method: "POST" }),
- getUsers: () => request("/users"),
- importUsers: () =>
- request("/users/import", { method: "POST" }),
-
- getDemoState: () => request("/demo"),
+ // ------------------------------------------------------------------
+ // Demo
+ // ------------------------------------------------------------------
+ getDemoState: () => request("/demo"),
updateDemoState: (data: { enabled: boolean; size: string }) =>
- request("/demo", {
+ request("/demo", {
method: "PUT",
body: JSON.stringify(data),
}),
};
+
+export { SESSION_KEY };
diff --git a/frontend/src/components/auth/ForceChangePassword.tsx b/frontend/src/components/auth/ForceChangePassword.tsx
new file mode 100644
index 0000000..12046ee
--- /dev/null
+++ b/frontend/src/components/auth/ForceChangePassword.tsx
@@ -0,0 +1,158 @@
+import { useState } from "react";
+import { api } from "../../api/client";
+import { useAuth } from "../../contexts/AuthContext";
+
+/**
+ * Full-screen blocking overlay shown when the current user has a temporary
+ * password (password_temp === true). They must set a new permanent password
+ * before they can use the application.
+ */
+export default function ForceChangePassword() {
+ const { refreshUser, logout } = useAuth();
+ const [newPassword, setNewPassword] = useState("");
+ const [confirmPassword, setConfirmPassword] = useState("");
+ const [error, setError] = useState("");
+ const [loading, setLoading] = useState(false);
+
+ const handleSubmit = async (e: React.FormEvent) => {
+ e.preventDefault();
+ setError("");
+
+ if (newPassword.length < 8) {
+ setError("Password must be at least 8 characters");
+ return;
+ }
+ if (newPassword !== confirmPassword) {
+ setError("Passwords do not match");
+ return;
+ }
+
+ setLoading(true);
+ try {
+ // old_password is undefined — backend skips the check when password_temp=true
+ await api.changePassword(undefined, newPassword);
+ // Refresh the user object so password_temp becomes false and gate lifts
+ await refreshUser();
+ } catch (err) {
+ setError((err as Error).message);
+ } finally {
+ setLoading(false);
+ }
+ };
+
+ return (
+
+
+ {/* Logo */}
+
+
+
+ AgentsOrg.ai
+
+
+
+
+ {/* Warning badge */}
+
+
+ Temporary password — action required
+
+
+
+ Set a new password
+
+
+ Your account is using a temporary password. Please set a permanent
+ password to continue.
+
- Enter your email to access the AI Transformation Intelligence dashboard.
+ {step === "email"
+ ? "Enter your email to access the AI Transformation Intelligence dashboard."
+ : `Enter your password for ${email}.`}
-
+ ) : (
+
+
+
+ )}
-
+
Self-hosted · Your data stays on your infrastructure
diff --git a/frontend/src/components/auth/RegisterScreen.tsx b/frontend/src/components/auth/RegisterScreen.tsx
index dc49379..0c0ef09 100644
--- a/frontend/src/components/auth/RegisterScreen.tsx
+++ b/frontend/src/components/auth/RegisterScreen.tsx
@@ -4,20 +4,32 @@ import { useAuth } from "../../contexts/AuthContext";
export default function RegisterScreen() {
const { register } = useAuth();
const [email, setEmail] = useState("");
+ const [password, setPassword] = useState("");
+ const [confirmPassword, setConfirmPassword] = useState("");
const [error, setError] = useState("");
const [loading, setLoading] = useState(false);
const handleSubmit = async (e: React.FormEvent) => {
e.preventDefault();
setError("");
- const trimmed = email.trim().toLowerCase();
- if (!trimmed || !trimmed.includes("@")) {
+
+ const trimmedEmail = email.trim().toLowerCase();
+ if (!trimmedEmail || !trimmedEmail.includes("@")) {
setError("Please enter a valid email address");
return;
}
+ if (password.length < 8) {
+ setError("Password must be at least 8 characters");
+ return;
+ }
+ if (password !== confirmPassword) {
+ setError("Passwords do not match");
+ return;
+ }
+
setLoading(true);
try {
- await register(trimmed);
+ await register(trimmedEmail, password);
} catch (err) {
setError((err as Error).message);
} finally {
@@ -26,25 +38,54 @@ export default function RegisterScreen() {
};
return (
-
+
-
{/* Logo + brand */}
-
-
-
+
+
+
- AgentsOrg.ai
+
+ AgentsOrg.ai
+
{/* Card */}
-
+
Create your admin account
- You're the first user — this account becomes the System Admin. Once set up, you can connect to the OpenAI Compliance API and invite your team.
+ You're the first user — this account becomes the System Admin. Once set up,
+ you can connect to the OpenAI Compliance API and invite your team.
-
+
Self-hosted · Your data stays on your infrastructure
diff --git a/frontend/src/components/leader/Duplicates.tsx b/frontend/src/components/leader/Duplicates.tsx
index b00fb12..33fc48f 100644
--- a/frontend/src/components/leader/Duplicates.tsx
+++ b/frontend/src/components/leader/Duplicates.tsx
@@ -17,7 +17,10 @@ export default function Duplicates({ gpts }: DuplicatesProps) {
setError(null);
try {
const runRes = await fetch(`${API}/run`, { method: "POST" });
- if (!runRes.ok) throw new Error("Failed to start clustering");
+ if (!runRes.ok) {
+ const body = await runRes.json().catch(() => ({}));
+ throw new Error(body.detail || "Failed to start duplicate detection. Make sure the pipeline has run and GPTs are loaded.");
+ }
// Poll for results
let attempts = 0;
@@ -34,7 +37,7 @@ export default function Duplicates({ gpts }: DuplicatesProps) {
setTimeout(poll, 1500);
} else {
setStatus("idle");
- setError("Clustering timed out or failed");
+ setError("Duplicate detection timed out. This can happen with large datasets — try again, or run the pipeline first to ensure GPTs are loaded.");
}
};
setTimeout(poll, 1000);
diff --git a/frontend/src/components/leader/LeaderLayout.tsx b/frontend/src/components/leader/LeaderLayout.tsx
index 7a43b9a..9830980 100644
--- a/frontend/src/components/leader/LeaderLayout.tsx
+++ b/frontend/src/components/leader/LeaderLayout.tsx
@@ -21,9 +21,10 @@ import { useQueryClient } from "@tanstack/react-query";
interface LeaderLayoutProps {
initialPage?: LeaderPage;
onSetupNavigated?: () => void;
+ onSwitchToProduction?: () => void;
}
-export default function LeaderLayout({ initialPage, onSetupNavigated }: LeaderLayoutProps) {
+export default function LeaderLayout({ initialPage, onSetupNavigated, onSwitchToProduction }: LeaderLayoutProps) {
const { systemRole } = useAuth();
const isAdmin = systemRole === "system-admin";
const queryClient = useQueryClient();
@@ -72,7 +73,7 @@ export default function LeaderLayout({ initialPage, onSetupNavigated }: LeaderLa
isAdmin={isAdmin}
/>
- {page === "overview" && }
+ {page === "overview" && }
{page === "overview:builders" && setPage("overview")} />}
{page === "overview:processes" && setPage("overview")} />}
{page === "overview:departments" && setPage("overview")} />}
diff --git a/frontend/src/components/leader/Overview.tsx b/frontend/src/components/leader/Overview.tsx
index 6e84af3..5effaa2 100644
--- a/frontend/src/components/leader/Overview.tsx
+++ b/frontend/src/components/leader/Overview.tsx
@@ -3,10 +3,12 @@ import { useQuery } from "@tanstack/react-query";
import type { ClusterGroup, GPTItem } from "../../types";
import GPTDrawer, { type DrawerFilter } from "./GPTDrawer";
import type { LeaderPage } from "./Sidebar";
+import { useDemoState } from "../../hooks/useDemo";
interface OverviewProps {
gpts: GPTItem[];
onSetPage: (p: LeaderPage) => void;
+ onSwitchToProduction?: () => void;
}
// ── Data derivation ───────────────────────────────────────────────────────────
@@ -271,9 +273,12 @@ function ViewAllLink({ label, onClick }: { label: string; onClick: () => void })
// ── Main ──────────────────────────────────────────────────────────────────────
-export default function Overview({ gpts, onSetPage }: OverviewProps) {
+export default function Overview({ gpts, onSetPage, onSwitchToProduction }: OverviewProps) {
const d = useOverviewData(gpts);
const [drawer, setDrawer] = useState(null);
+ const { data: demoState } = useDemoState();
+ const isDemoActive = demoState?.enabled ?? false;
+
const { data: clusters = [] } = useQuery({
queryKey: ["clustering-results"],
queryFn: () => fetch("/api/v1/clustering/results").then((r) => r.json()),
@@ -308,6 +313,33 @@ export default function Overview({ gpts, onSetPage }: OverviewProps) {
+ You are promoting{" "}
+ {user.name || user.email}{" "}
+ to AI Leader. A temporary password will be generated so they can log in.
+ You will need to share it with them.
+
+ {error && (
+
{error}
+ )}
+
+
+
+
+ >
+ ) : (
+ <>
+
+ {user.name || user.email} has been
+ promoted. Share this one-time password with them:
+
+
+ {tempPassword}
+
+
+
+ This password is shown once. The user will be prompted to change it on first login.
+
+ Add a user directly without importing from the OpenAI Compliance API.
+ {needsPassword && " A temporary password will be generated for privileged roles."}
+
+
+ >
+ ) : (
+ <>
+
+
+ {inviteMutation.data.user.name || inviteMutation.data.user.email}
+ {" "}
+ has been added as{" "}
+
+ {SYSTEM_ROLE_LABELS[inviteMutation.data.user.system_role as SystemRole]}
+ .
+
+ {inviteMutation.data.temp_password && (
+ <>
+
+ Share this one-time password with them:
+
+
+
+ {inviteMutation.data.temp_password}
+
+
+
+
+ This password is shown once. The user will be prompted to change it on first login.
+
+ These signals power the Risk Panel, Quality Scores, Maturity breakdown, and L&D recommendations. Requires ~9 API calls per GPT.
+
+
+
- Enable Classification
+
+ Enable deep analysis
+
+ {classificationEnabled ? "On — each GPT will be analyzed during the pipeline run." : "Off — GPTs will be fetched and categorized, but not analyzed. You can enable this later."}
+