Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
2a97ce1
chore: setup baseline for PR1 migration
mlx93 Dec 4, 2025
3b43090
feat(PR1): Vercel AI SDK Foundation
mlx93 Dec 4, 2025
4f2e0cf
PR1 complete: Direct API support, EditorLayout integration, test page…
mlx93 Dec 4, 2025
83537b2
feat(PR1.5): Add AI SDK tools and Go HTTP server for tool execution
mlx93 Dec 4, 2025
473a6db
fix: Remove node-fetch import, use native fetch (Node.js 18+)
mlx93 Dec 4, 2025
943a009
PR1.61: Fix AI SDK body parameter stale data issue
mlx93 Dec 4, 2025
8a5e9cd
chore: Move PR1.5 docs to UnchartedTerritory repo
mlx93 Dec 4, 2025
fdfa765
PR1.65: UI Feature Parity Implementation
mlx93 Dec 5, 2025
377ad71
PR1.7 Prereq: Fix double processing + AddFileToChartPending
mlx93 Dec 5, 2025
5e5c3d0
PR1.7: Centrifugo integration, Pending UI, Commit/Discard
mlx93 Dec 5, 2025
abf69ce
PR2.0: Integrate AI SDK into main workspace path
mlx93 Dec 5, 2025
b3fa0a2
PR2.0: Deprecate test-ai-chat path, fix AI SDK integration for revisi…
mlx93 Dec 5, 2025
18e1b53
Fix Accept/Reject buttons visibility in AI SDK mode + unit test fixes
mlx93 Dec 5, 2025
1eef035
PR3.0: Implement full feature parity - plan workflow, intent routing,…
mlx93 Dec 6, 2025
72c2dfe
PR3.0: Wire proceedPlanAction to PlanChatMessage for AI SDK plans
mlx93 Dec 6, 2025
f534d5d
PR3.0: Add status validation and reset-on-failure to proceedPlanAction
mlx93 Dec 6, 2025
4b84c40
PR3.0: Fix Centrifugo to allow metadata updates for streaming messages
mlx93 Dec 6, 2025
bc6b607
PR3.0/3.1: AI SDK UX improvements and buffered tool calls fix
mlx93 Dec 6, 2025
e4ad96e
WIP: PR3.2 feature parity improvements (in progress)
mlx93 Dec 6, 2025
c2527a5
PR3.2: Two-phase plan/execute workflow parity
mlx93 Dec 7, 2025
affe86e
PR3.2: Centered chat layout until plan execution
mlx93 Dec 7, 2025
6bbba4d
PR3.3: AI SDK execution path for text-only plans
mlx93 Dec 7, 2025
271e595
PR3.3: Fix execute-via-ai-sdk and PlanChatMessage refinements
mlx93 Dec 7, 2025
09b1e98
PR3.3: Additional execute-via-ai-sdk refinements
mlx93 Dec 7, 2025
2ac9d2e
PR3.3: Update prompts and execute-via-ai-sdk
mlx93 Dec 7, 2025
16c81bc
PR3.3: Update execution prompt
mlx93 Dec 7, 2025
7e53f4c
PR3.3: Refine execute-via-ai-sdk action
mlx93 Dec 7, 2025
e453ddb
fix: E2E test improvements - fix login helper, placeholder text, stri…
mlx93 Dec 7, 2025
fe1dbcb
fix(PR3.4): Add text-only plan fallback for AI SDK path
mlx93 Dec 7, 2025
d3330d4
feat(PR4.0): Chart Validation Agent and Live Provider Switching
mlx93 Dec 7, 2025
e1e6c39
docs: Update ARCHITECTURE.md with latest changes
mlx93 Dec 7, 2025
1f5ab8b
docs: Add test results summary for PR submission
mlx93 Dec 8, 2025
955f307
docs: Add system overview diagram to ARCHITECTURE.md
mlx93 Dec 8, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,8 @@ bin
test-results/
.envrc
.specstory/

# Backend env file
.env
.env.local
OpenAiKey
514 changes: 511 additions & 3 deletions chartsmith-app/ARCHITECTURE.md

Large diffs are not rendered by default.

230 changes: 230 additions & 0 deletions chartsmith-app/app/api/chat/__tests__/route.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,230 @@
/**
* Chat API Route Tests
*
* Tests for the /api/chat endpoint.
* These tests verify request validation and error handling
* without making actual API calls to OpenRouter.
*/

// Suppress console.error during error handling tests (expected errors)
const originalConsoleError = console.error;
beforeAll(() => {
console.error = jest.fn();
});
afterAll(() => {
console.error = originalConsoleError;
});

// Mock the AI SDK before importing the route
jest.mock('ai', () => ({
streamText: jest.fn(),
convertToModelMessages: jest.fn((messages) => messages),
stepCountIs: jest.fn((count) => ({ type: 'step-count', count })),
UIMessage: {},
}));

jest.mock('@/lib/ai', () => ({
getModel: jest.fn(() => ({ modelId: 'test-model' })),
isValidProvider: jest.fn((p) => ['anthropic', 'openai'].includes(p)),
isValidModel: jest.fn((m) => m.startsWith('anthropic/') || m.startsWith('openai/')),
CHARTSMITH_SYSTEM_PROMPT: 'Test system prompt',
MAX_STREAMING_DURATION: 60,
}));

import { POST } from '../route';
import { streamText } from 'ai';
import { getModel, isValidProvider, isValidModel } from '@/lib/ai';

describe('POST /api/chat', () => {
beforeEach(() => {
jest.clearAllMocks();

// Mock streamText to return a mock response
// PR2.0: Route uses toUIMessageStreamResponse (AI SDK v5)
(streamText as jest.Mock).mockReturnValue({
toUIMessageStreamResponse: () => new Response('streamed response', {
headers: { 'Content-Type': 'text/event-stream' },
}),
toTextStreamResponse: () => new Response('streamed response', {
headers: { 'Content-Type': 'text/event-stream' },
}),
});
});

describe('Request Validation', () => {
it('should return 400 if messages array is missing', async () => {
const request = new Request('http://localhost/api/chat', {
method: 'POST',
body: JSON.stringify({}),
});

const response = await POST(request);
const data = await response.json();

expect(response.status).toBe(400);
expect(data.error).toBe('Invalid request');
expect(data.details).toContain('messages');
});

it('should return 400 if messages is not an array', async () => {
const request = new Request('http://localhost/api/chat', {
method: 'POST',
body: JSON.stringify({ messages: 'not an array' }),
});

const response = await POST(request);
const data = await response.json();

expect(response.status).toBe(400);
expect(data.error).toBe('Invalid request');
});

it('should return 400 for invalid provider', async () => {
(isValidProvider as jest.Mock).mockReturnValue(false);

const request = new Request('http://localhost/api/chat', {
method: 'POST',
body: JSON.stringify({
messages: [{ role: 'user', content: 'Hello' }],
provider: 'invalid-provider',
}),
});

const response = await POST(request);
const data = await response.json();

expect(response.status).toBe(400);
expect(data.error).toBe('Invalid provider');
});

it('should return 400 for invalid model', async () => {
(isValidModel as jest.Mock).mockReturnValue(false);

const request = new Request('http://localhost/api/chat', {
method: 'POST',
body: JSON.stringify({
messages: [{ role: 'user', content: 'Hello' }],
model: 'invalid/model',
}),
});

const response = await POST(request);
const data = await response.json();

expect(response.status).toBe(400);
expect(data.error).toBe('Invalid model');
});
});

describe('Successful Requests', () => {
beforeEach(() => {
(isValidProvider as jest.Mock).mockReturnValue(true);
(isValidModel as jest.Mock).mockReturnValue(true);
});

it('should accept valid request with messages only', async () => {
const request = new Request('http://localhost/api/chat', {
method: 'POST',
body: JSON.stringify({
messages: [{ role: 'user', content: 'Hello' }],
}),
});

const response = await POST(request);

expect(response.status).toBe(200);
expect(streamText).toHaveBeenCalled();
});

it('should accept valid request with provider', async () => {
const request = new Request('http://localhost/api/chat', {
method: 'POST',
body: JSON.stringify({
messages: [{ role: 'user', content: 'Hello' }],
provider: 'anthropic',
}),
});

const response = await POST(request);

expect(response.status).toBe(200);
expect(getModel).toHaveBeenCalledWith('anthropic', undefined);
});

it('should accept valid request with provider and model', async () => {
const request = new Request('http://localhost/api/chat', {
method: 'POST',
body: JSON.stringify({
messages: [{ role: 'user', content: 'Hello' }],
provider: 'anthropic',
model: 'anthropic/claude-sonnet-4',
}),
});

const response = await POST(request);

expect(response.status).toBe(200);
expect(getModel).toHaveBeenCalledWith('anthropic', 'anthropic/claude-sonnet-4');
});

it('should return streaming response', async () => {
const request = new Request('http://localhost/api/chat', {
method: 'POST',
body: JSON.stringify({
messages: [{ role: 'user', content: 'Hello' }],
}),
});

const response = await POST(request);

expect(response.headers.get('Content-Type')).toBe('text/event-stream');
});
});

describe('Error Handling', () => {
it('should return 500 for missing API key error', async () => {
(isValidProvider as jest.Mock).mockReturnValue(true);
(getModel as jest.Mock).mockImplementation(() => {
throw new Error('OPENROUTER_API_KEY environment variable is not set');
});

const request = new Request('http://localhost/api/chat', {
method: 'POST',
body: JSON.stringify({
messages: [{ role: 'user', content: 'Hello' }],
}),
});

const response = await POST(request);
const data = await response.json();

expect(response.status).toBe(500);
expect(data.error).toBe('Configuration error');
});

it('should return 500 for unexpected errors', async () => {
(isValidProvider as jest.Mock).mockReturnValue(true);
(isValidModel as jest.Mock).mockReturnValue(true);
// Reset getModel to succeed, but streamText fails
(getModel as jest.Mock).mockReturnValue({ modelId: 'test-model' });
(streamText as jest.Mock).mockImplementation(() => {
throw new Error('Unexpected network error');
});

const request = new Request('http://localhost/api/chat', {
method: 'POST',
body: JSON.stringify({
messages: [{ role: 'user', content: 'Hello' }],
}),
});

const response = await POST(request);
const data = await response.json();

expect(response.status).toBe(500);
expect(data.error).toBe('Failed to process request');
expect(data.details).toContain('network error');
});
});
});

Loading