diff --git a/package.json b/package.json
index 97493e32..0a834841 100644
--- a/package.json
+++ b/package.json
@@ -72,7 +72,10 @@
"sharp",
"simple-git-hooks",
"workerd"
- ]
+ ],
+ "overrides": {
+ "@modelcontextprotocol/sdk": "^1.25.3"
+ }
},
"devDependencies": {
"@types/json-schema": "^7.0.15"
diff --git a/packages/mcp-cloudflare/package.json b/packages/mcp-cloudflare/package.json
index 0e027fa8..404e3e7e 100644
--- a/packages/mcp-cloudflare/package.json
+++ b/packages/mcp-cloudflare/package.json
@@ -4,9 +4,7 @@
"private": true,
"type": "module",
"license": "FSL-1.1-ALv2",
- "files": [
- "./dist/*"
- ],
+ "files": ["./dist/*"],
"exports": {
".": {
"types": "./dist/index.ts",
@@ -45,6 +43,7 @@
"wrangler": "^4.45.0"
},
"dependencies": {
+ "@ai-sdk/mcp": "catalog:",
"@ai-sdk/openai": "catalog:",
"@ai-sdk/react": "catalog:",
"@cloudflare/workers-oauth-provider": "catalog:",
diff --git a/packages/mcp-cloudflare/src/client/components/chat/chat-message.tsx b/packages/mcp-cloudflare/src/client/components/chat/chat-message.tsx
index 79c78802..d6a74ee3 100644
--- a/packages/mcp-cloudflare/src/client/components/chat/chat-message.tsx
+++ b/packages/mcp-cloudflare/src/client/components/chat/chat-message.tsx
@@ -127,6 +127,52 @@ const ToolPart = memo(function ToolPart({
);
});
+// Helper to check if a part is an AI SDK 6 tool part (type starts with "tool-")
+const isToolPart = (part: { type: string }): part is {
+ type: `tool-${string}`;
+} & ChatToolInvocation => {
+ return part.type.startsWith("tool-") && part.type !== "tool-invocation";
+};
+
+// Helper to check if a part is a legacy tool-invocation part (AI SDK 4/5 format)
+// Legacy format: { type: "tool-invocation", toolInvocation: ChatToolInvocation }
+const isLegacyToolInvocation = (part: { type: string }): part is {
+ type: "tool-invocation";
+ toolInvocation: ChatToolInvocation;
+} => {
+ return (
+ part.type === "tool-invocation" &&
+ "toolInvocation" in part &&
+ typeof (part as any).toolInvocation === "object"
+ );
+};
+
+// Helper to convert tool output to proper content format
+const convertToolOutput = (
+ output: unknown,
+): { content: Array<{ type: "text"; text: string }> } | undefined => {
+ if (output === undefined || output === null) {
+ return undefined;
+ }
+
+ // If output is already in MCP format with content array
+ if (
+ typeof output === "object" &&
+ "content" in (output as object) &&
+ Array.isArray((output as { content: unknown }).content)
+ ) {
+ return output as { content: Array<{ type: "text"; text: string }> };
+ }
+
+ // If output is a string, wrap it
+ if (typeof output === "string") {
+ return { content: [{ type: "text", text: output }] };
+ }
+
+ // For other objects, JSON stringify
+ return { content: [{ type: "text", text: JSON.stringify(output, null, 2) }] };
+};
+
// Main component for rendering individual message parts
const MessagePart = memo(function MessagePart({
part,
@@ -137,30 +183,62 @@ const MessagePart = memo(function MessagePart({
messageData,
onSlashCommand,
}: MessagePartProps) {
- switch (part.type) {
- case "text":
- return (
-
- );
- case "tool-invocation":
- return (
-
- );
- default:
- // Fallback for unknown part types
- return null;
+ // Handle text parts
+ if (part.type === "text") {
+ return (
+
+ );
+ }
+
+ // Handle legacy tool-invocation parts (AI SDK 4/5 format from persisted messages)
+ // Legacy format: { type: "tool-invocation", toolInvocation: {...} }
+ if (isLegacyToolInvocation(part)) {
+ return (
+
+ );
}
+
+ // Handle tool parts (AI SDK 6 format: type is "tool-${toolName}")
+ if (isToolPart(part)) {
+ // Map AI SDK 6 state to our ChatToolInvocation state
+ const partState = (part as any).state;
+ const mappedState: "partial-call" | "call" | "result" =
+ partState === "result"
+ ? "result"
+ : partState === "partial-call"
+ ? "partial-call"
+ : "call";
+
+ // Convert AI SDK 6 tool part to our ChatToolInvocation format
+ const toolInvocation: ChatToolInvocation = {
+ toolCallId: part.toolCallId,
+ toolName: part.type.replace(/^tool-/, ""),
+ args: (part as any).input ?? {},
+ state: mappedState,
+ result: convertToolOutput((part as any).output),
+ };
+ return (
+
+ );
+ }
+
+ // Fallback for unknown part types
+ return null;
});
// Export the memoized components
diff --git a/packages/mcp-cloudflare/src/client/components/chat/chat-messages.tsx b/packages/mcp-cloudflare/src/client/components/chat/chat-messages.tsx
index fadf5672..8f76e02a 100644
--- a/packages/mcp-cloudflare/src/client/components/chat/chat-messages.tsx
+++ b/packages/mcp-cloudflare/src/client/components/chat/chat-messages.tsx
@@ -3,15 +3,15 @@ import { Loader2, AlertCircle } from "lucide-react";
import { Button } from "../ui/button";
import { MessagePart } from ".";
import { ToolActions } from "../ui/tool-actions";
-import type { Message, ProcessedMessagePart, ChatMessagesProps } from "./types";
+import type { ProcessedMessagePart, ChatMessagesProps } from "./types";
import { isAuthError, getErrorMessage } from "../../utils/chat-error-handler";
import { useAuth } from "../../contexts/auth-context";
-// Cache for stable part objects to avoid recreating them
-const partCache = new WeakMap();
+// Import UIMessage from our types (re-exported as Message for compatibility)
+import type { UIMessage } from "@ai-sdk/react";
function processMessages(
- messages: Message[],
+ messages: UIMessage[],
isChatLoading: boolean,
isLocalStreaming?: boolean,
isMessageStreaming?: (messageId: string) => boolean,
@@ -28,42 +28,25 @@ function processMessages(
if (message.parts && message.parts.length > 0) {
const lastPartIndex = message.parts.length - 1;
- message.parts.forEach((part, partIndex) => {
- const isLastPartOfLastMessage =
- isLastMessage && partIndex === lastPartIndex;
-
- allParts.push({
- part,
- messageId: message.id,
- messageRole: message.role,
- partIndex,
- // Stream if it's AI response OR local streaming simulation
- isStreaming:
- (isLastPartOfLastMessage &&
- isChatLoading &&
- part.type === "text") ||
- (part.type === "text" && !!isMessageStreaming?.(message.id)),
- });
- });
- } else if (message.content) {
- // Use cached part object to maintain stable references
- let part = partCache.get(message);
- if (!part) {
- part = { type: "text", text: message.content };
- partCache.set(message, part);
- }
-
- allParts.push({
- part,
- messageId: message.id,
- messageRole: message.role,
- partIndex: 0,
- // Stream if it's AI response OR local streaming simulation
- isStreaming:
- (isLastMessage && isChatLoading) ||
- isMessageStreaming?.(message.id) ||
- false,
- });
+ message.parts.forEach(
+ (part: UIMessage["parts"][number], partIndex: number) => {
+ const isLastPartOfLastMessage =
+ isLastMessage && partIndex === lastPartIndex;
+
+ allParts.push({
+ part,
+ messageId: message.id,
+ messageRole: message.role,
+ partIndex,
+ // Stream if it's AI response OR local streaming simulation
+ isStreaming:
+ (isLastPartOfLastMessage &&
+ isChatLoading &&
+ part.type === "text") ||
+ (part.type === "text" && !!isMessageStreaming?.(message.id)),
+ });
+ },
+ );
}
});
@@ -132,7 +115,7 @@ export function ChatMessages({
const originalMessage = messages.find(
(m) => m.id === item.messageId,
);
- const messageData = originalMessage?.data as any;
+ const messageData = originalMessage?.metadata as any;
const hasToolActions =
messageData?.type === "tools-list" &&
messageData?.toolsDetailed &&
@@ -146,7 +129,7 @@ export function ChatMessages({
messageRole={item.messageRole}
partIndex={item.partIndex}
isStreaming={item.isStreaming}
- messageData={originalMessage?.data}
+ messageData={originalMessage?.metadata}
onSlashCommand={onSlashCommand}
/>
{/* Show tool actions list for tools-list messages */}
diff --git a/packages/mcp-cloudflare/src/client/components/chat/chat-ui.tsx b/packages/mcp-cloudflare/src/client/components/chat/chat-ui.tsx
index 520a27f1..8993a12a 100644
--- a/packages/mcp-cloudflare/src/client/components/chat/chat-ui.tsx
+++ b/packages/mcp-cloudflare/src/client/components/chat/chat-ui.tsx
@@ -6,7 +6,7 @@
import ScrollToBottom from "react-scroll-to-bottom";
import { Button } from "../ui/button";
import { ChatInput, ChatMessages } from ".";
-import type { Message } from "ai/react";
+import type { UIMessage } from "@ai-sdk/react";
// Constant empty function to avoid creating new instances on every render
const EMPTY_FUNCTION = () => {};
@@ -28,7 +28,7 @@ const SAMPLE_PROMPTS = [
] as const;
interface ChatUIProps {
- messages: Message[];
+ messages: UIMessage[];
input: string;
error?: Error | null;
isChatLoading: boolean;
diff --git a/packages/mcp-cloudflare/src/client/components/chat/chat.tsx b/packages/mcp-cloudflare/src/client/components/chat/chat.tsx
index 12d4b1bd..50064f83 100644
--- a/packages/mcp-cloudflare/src/client/components/chat/chat.tsx
+++ b/packages/mcp-cloudflare/src/client/components/chat/chat.tsx
@@ -1,7 +1,8 @@
"use client";
import { useChat } from "@ai-sdk/react";
-import { useEffect, useRef, useCallback } from "react";
+import { DefaultChatTransport, type UIMessage } from "ai";
+import { useState, useEffect, useRef, useCallback, useMemo } from "react";
import { AuthForm, ChatUI } from ".";
import { useAuth } from "../../contexts/auth-context";
import { Bot, Loader2, LogOut, PanelLeftOpen, Sparkles } from "lucide-react";
@@ -42,32 +43,54 @@ export function Chat({ isOpen, onClose, onLogout }: ChatProps) {
isMessageStreaming,
} = useStreamingSimulation();
+ // Manage input state manually (AI SDK 5+ removed built-in input management)
+ const [input, setInput] = useState("");
+
+ // Create transport with endpoint mode in body - memoize to avoid recreation
+ const transport = useMemo(
+ () =>
+ new DefaultChatTransport({
+ api: "/api/chat",
+ body: { endpointMode },
+ }),
+ [endpointMode],
+ );
+
const {
messages,
- input,
- handleInputChange,
- handleSubmit,
+ sendMessage,
status,
stop,
error,
- reload,
+ regenerate,
setMessages,
- setInput,
- append,
} = useChat({
- api: "/api/chat",
// No auth header needed - server reads from cookie
- // No ID to disable useChat's built-in persistence
// We handle persistence manually via usePersistedChat hook
- initialMessages,
- // Enable sending the data field with messages for custom message types
- sendExtraMessageFields: true,
- // Pass endpoint mode to the API
- body: {
- endpointMode,
- },
+ messages: initialMessages,
+ transport,
});
+ // Handle input change (manually managed in AI SDK 5+)
+ const handleInputChange = useCallback(
+ (e: React.ChangeEvent) => {
+ setInput(e.target.value);
+ },
+ [],
+ );
+
+ // Handle form submission (manually managed in AI SDK 5+)
+ const handleSubmit = useCallback(
+ (e: React.FormEvent) => {
+ e.preventDefault();
+ if (!input.trim()) return;
+
+ sendMessage({ text: input });
+ setInput("");
+ },
+ [input, sendMessage],
+ );
+
// No need for custom scroll handling - react-scroll-to-bottom handles it
// Clear messages function - used locally for /clear command and logout
@@ -175,21 +198,23 @@ Try asking me things like:
};
}, []);
- // Track previous auth state to detect logout events
- const prevAuthStateRef = useRef(isAuthenticated);
+ // Track previous auth and endpoint mode to detect changes requiring message clearing
+ const prevStateRef = useRef({ isAuthenticated, endpointMode });
- // Clear messages when user logs out (auth state changes from authenticated to not)
+ // Clear messages when user logs out or endpoint mode changes
useEffect(() => {
- const wasAuthenticated = prevAuthStateRef.current;
+ const prev = prevStateRef.current;
+
+ // Clear on logout (was authenticated but now isn't) or endpoint mode change
+ const didLogout = prev.isAuthenticated && !isAuthenticated;
+ const didChangeMode = prev.endpointMode !== endpointMode;
- // Detect logout: was authenticated but now isn't
- if (wasAuthenticated && !isAuthenticated) {
+ if (didLogout || didChangeMode) {
clearMessages();
}
- // Update the ref for next comparison
- prevAuthStateRef.current = isAuthenticated;
- }, [isAuthenticated, clearMessages]);
+ prevStateRef.current = { isAuthenticated, endpointMode };
+ }, [isAuthenticated, endpointMode, clearMessages]);
// Save messages when they change
useEffect(() => {
@@ -215,7 +240,7 @@ Try asking me things like:
) {
hadAuthErrorRef.current = false;
// Retry the failed message
- reload();
+ regenerate();
}
// Reset retry state on successful completion (no error)
@@ -225,7 +250,23 @@ Try asking me things like:
// Update auth state ref
wasAuthenticatedRef.current = isAuthenticated;
- }, [isAuthenticated, error, reload]);
+ }, [isAuthenticated, error, regenerate]);
+
+ // Helper to create a UIMessage with parts (AI SDK 5+ format)
+ const createUIMessage = useCallback(
+ (
+ id: string,
+ role: "user" | "assistant" | "system",
+ text: string,
+ metadata?: Record,
+ ): UIMessage => ({
+ id,
+ role,
+ parts: [{ type: "text", text }],
+ ...(metadata && { metadata }),
+ }),
+ [],
+ );
// Handle slash commands
const handleSlashCommand = useCallback(
@@ -234,34 +275,32 @@ Try asking me things like:
setInput("");
// Add the slash command as a user message first
- const userMessage = {
- id: Date.now().toString(),
- role: "user" as const,
- content: `/${command}`,
- createdAt: new Date(),
- };
+ const userMessage = createUIMessage(
+ Date.now().toString(),
+ "user",
+ `/${command}`,
+ );
if (command === "clear") {
// Clear everything
clearMessages();
} else if (command === "logout") {
// Add message, then logout
- setMessages((prev: any[]) => [...prev, userMessage]);
+ setMessages((prev) => [...prev, userMessage]);
onLogout();
} else if (command === "help") {
// Add user message first
- setMessages((prev: any[]) => [...prev, userMessage]);
+ setMessages((prev) => [...prev, userMessage]);
// Create help message with metadata and add after a brief delay for better UX
setTimeout(() => {
const helpMessageData = createHelpMessage();
- const helpMessage = {
- id: (Date.now() + 1).toString(),
- role: "system" as const,
- content: helpMessageData.content,
- createdAt: new Date(),
- data: { ...helpMessageData.data, simulateStreaming: true },
- };
+ const helpMessage = createUIMessage(
+ (Date.now() + 1).toString(),
+ "system",
+ helpMessageData.content,
+ { ...helpMessageData.data, simulateStreaming: true },
+ );
setMessages((prev) => [...prev, helpMessage]);
// Start streaming simulation
@@ -269,40 +308,38 @@ Try asking me things like:
}, 100);
} else if (command === "tools") {
// Add user message first
- setMessages((prev: any[]) => [...prev, userMessage]);
+ setMessages((prev) => [...prev, userMessage]);
// Create tools message
setTimeout(() => {
const toolsMessageData = createToolsMessage();
- const toolsMessage = {
- id: (Date.now() + 1).toString(),
- role: "system" as const,
- content: toolsMessageData.content,
- createdAt: new Date(),
- data: { ...toolsMessageData.data, simulateStreaming: true },
- };
+ const toolsMessage = createUIMessage(
+ (Date.now() + 1).toString(),
+ "system",
+ toolsMessageData.content,
+ { ...toolsMessageData.data, simulateStreaming: true },
+ );
setMessages((prev) => [...prev, toolsMessage]);
startStreaming(toolsMessage.id, 600);
}, 100);
} else {
// Handle unknown slash commands - add user message and error
- const errorMessage = {
- id: (Date.now() + 1).toString(),
- role: "system" as const,
- content: `Unknown command: /${command}. Available commands: /help, /tools, /clear, /logout`,
- createdAt: new Date(),
- };
+ const errorMessage = createUIMessage(
+ (Date.now() + 1).toString(),
+ "system",
+ `Unknown command: /${command}. Available commands: /help, /tools, /clear, /logout`,
+ );
setMessages((prev) => [...prev, userMessage, errorMessage]);
}
},
[
clearMessages,
onLogout,
- setInput,
setMessages,
createHelpMessage,
createToolsMessage,
+ createUIMessage,
startStreaming,
],
);
@@ -317,18 +354,10 @@ Try asking me things like:
return;
}
- // Clear the input and directly send the message using append
- append({ role: "user", content: prompt });
- },
- [append, handleSlashCommand],
- );
-
- // Wrap form submission to ensure scrolling
- const handleFormSubmit = useCallback(
- (e: React.FormEvent) => {
- handleSubmit(e);
+ // Send the message using sendMessage (AI SDK 5+ API)
+ sendMessage({ text: prompt });
},
- [handleSubmit],
+ [sendMessage, handleSlashCommand],
);
// Show loading state while checking auth session
@@ -439,9 +468,9 @@ Try asking me things like:
isMessageStreaming={isMessageStreaming}
isOpen={isOpen}
onInputChange={handleInputChange}
- onSubmit={handleFormSubmit}
+ onSubmit={handleSubmit}
onStop={stop}
- onRetry={reload}
+ onRetry={regenerate}
onSlashCommand={handleSlashCommand}
onSendPrompt={handleSendPrompt}
/>
diff --git a/packages/mcp-cloudflare/src/client/components/chat/types.ts b/packages/mcp-cloudflare/src/client/components/chat/types.ts
index a22d8b15..6a3dee8a 100644
--- a/packages/mcp-cloudflare/src/client/components/chat/types.ts
+++ b/packages/mcp-cloudflare/src/client/components/chat/types.ts
@@ -3,13 +3,13 @@
*/
import type React from "react";
-import type { Message } from "ai/react";
+import type { UIMessage } from "@ai-sdk/react";
-// Re-export AI SDK types for convenience
-export type { Message } from "ai/react";
+// Re-export AI SDK types for convenience - Message renamed to UIMessage in AI SDK 5+
+export type { UIMessage as Message } from "@ai-sdk/react";
// Extended message type that includes our custom metadata
-export interface ExtendedMessage extends Message {
+export interface ExtendedMessage extends UIMessage {
data?: {
type?: string;
prompts?: any[];
@@ -91,7 +91,7 @@ export interface ChatToolInvocation {
// Message processing types
export interface ProcessedMessagePart {
- part: NonNullable[number];
+ part: NonNullable[number];
messageId: string;
messageRole: string;
partIndex: number;
@@ -107,7 +107,7 @@ export interface ChatProps {
}
export interface ChatUIProps {
- messages: Message[];
+ messages: UIMessage[];
input: string;
error?: Error | null;
isChatLoading: boolean;
@@ -124,7 +124,7 @@ export interface ChatUIProps {
}
export interface ChatMessagesProps {
- messages: Message[];
+ messages: UIMessage[];
isChatLoading: boolean;
isLocalStreaming?: boolean;
isMessageStreaming?: (messageId: string) => boolean;
@@ -153,7 +153,7 @@ export interface PanelBackdropProps {
}
export interface MessagePartProps {
- part: NonNullable[number];
+ part: NonNullable[number];
messageId: string;
messageRole: string;
partIndex: number;
diff --git a/packages/mcp-cloudflare/src/client/hooks/use-persisted-chat.ts b/packages/mcp-cloudflare/src/client/hooks/use-persisted-chat.ts
index 6281f826..10b0ef08 100644
--- a/packages/mcp-cloudflare/src/client/hooks/use-persisted-chat.ts
+++ b/packages/mcp-cloudflare/src/client/hooks/use-persisted-chat.ts
@@ -1,11 +1,49 @@
import { useCallback, useMemo } from "react";
-import type { Message } from "ai";
+import type { UIMessage } from "ai";
const CHAT_STORAGE_KEY = "sentry_chat_messages";
const TIMESTAMP_STORAGE_KEY = "sentry_chat_timestamp";
const MAX_STORED_MESSAGES = 100; // Limit storage size
const CACHE_EXPIRY_MS = 60 * 60 * 1000; // 1 hour in milliseconds
+// Legacy AI SDK 4.x message format (before migration to parts-based format)
+interface LegacyMessage {
+ id: string;
+ role: "user" | "assistant" | "system";
+ content?: string;
+ parts?: UIMessage["parts"];
+ // Legacy used 'data', new SDK uses 'metadata'
+ data?: Record;
+ metadata?: Record;
+}
+
+// Migrate legacy messages (AI SDK 4.x format with `content`) to new format (with `parts`)
+function migrateMessage(msg: LegacyMessage): UIMessage {
+ // Preserve metadata from either 'metadata' or legacy 'data' property
+ const metadata = msg.metadata ?? msg.data;
+
+ // If message already has parts, use it as-is (but ensure metadata is preserved)
+ if (msg.parts && Array.isArray(msg.parts) && msg.parts.length > 0) {
+ if (metadata) {
+ return { ...msg, metadata } as UIMessage;
+ }
+ return msg as UIMessage;
+ }
+
+ // If message has legacy content string, convert to parts format
+ if (typeof msg.content === "string" && msg.content.length > 0) {
+ return {
+ id: msg.id,
+ role: msg.role,
+ parts: [{ type: "text", text: msg.content }],
+ ...(metadata && { metadata }),
+ } as UIMessage;
+ }
+
+ // Return as-is if neither format matches (will be filtered by validation)
+ return msg as UIMessage;
+}
+
export function usePersistedChat(isAuthenticated: boolean) {
// Check if cache is expired
const isCacheExpired = useCallback(() => {
@@ -31,42 +69,58 @@ export function usePersistedChat(isAuthenticated: boolean) {
}, []);
// Validate a message to ensure it won't cause conversion errors
- const isValidMessage = useCallback((msg: Message): boolean => {
- // Check if message has parts (newer structure)
- if (msg.parts && Array.isArray(msg.parts)) {
- // Check each part for validity
- return msg.parts.every((part) => {
- // Text parts are always valid
- if (part.type === "text") {
- return true;
- }
+ const isValidMessage = useCallback((msg: UIMessage): boolean => {
+ // UIMessage always has parts array in AI SDK 6+
+ if (!msg.parts || !Array.isArray(msg.parts) || msg.parts.length === 0) {
+ return false;
+ }
- // Tool invocation parts must be complete (have result) if state is "call" or "result"
- if (part.type === "tool-invocation") {
- const invocation = part as any;
- // If it's in "call" or "result" state, it must have a result
- if (invocation.state === "call" || invocation.state === "result") {
- const content = invocation.result?.content;
- // Ensure content exists and is not an empty array
- return (
- content && (Array.isArray(content) ? content.length > 0 : true)
- );
- }
- // partial-call state is okay without result
- return true;
- }
+ // Invalid states that indicate incomplete tool calls
+ const incompleteToolStates = new Set([
+ "input-streaming",
+ "input-available",
+ "approval-requested",
+ ]);
- // Other part types are assumed valid
+ return msg.parts.every((part) => {
+ // Text parts are always valid
+ if (part.type === "text") {
return true;
- });
- }
+ }
- // Check if message has content (legacy structure)
- if (msg.content && typeof msg.content === "string") {
- return msg.content.trim() !== "";
- }
+ // AI SDK 6.x uses "tool-" format (e.g., "tool-whoami")
+ // Filter out incomplete tool calls that shouldn't be persisted
+ if (part.type.startsWith("tool-")) {
+ const { state } = part as { state?: string };
+ return !incompleteToolStates.has(state ?? "");
+ }
+
+ // Legacy "tool-invocation" format (AI SDK 4.x)
+ // Structure: { type: "tool-invocation", toolInvocation: { state, result, ... } }
+ if (part.type === "tool-invocation") {
+ const invocation = (
+ part as {
+ toolInvocation?: {
+ state?: string;
+ result?: { content?: unknown };
+ };
+ }
+ ).toolInvocation;
+ if (!invocation) return true; // No invocation data, allow it
+ // "call" or "result" state requires valid content
+ if (invocation.state === "call" || invocation.state === "result") {
+ const content = invocation.result?.content;
+ return (
+ content != null && (!Array.isArray(content) || content.length > 0)
+ );
+ }
+ // partial-call state is okay without result
+ return true;
+ }
- return false;
+ // Other part types (reasoning, file, source-url, etc.) are valid
+ return true;
+ });
}, []);
// Load initial messages from localStorage
@@ -84,11 +138,12 @@ export function usePersistedChat(isAuthenticated: boolean) {
try {
const stored = localStorage.getItem(CHAT_STORAGE_KEY);
if (stored) {
- const parsed = JSON.parse(stored) as Message[];
+ const parsed = JSON.parse(stored) as LegacyMessage[];
// Validate the data structure
if (Array.isArray(parsed) && parsed.length > 0) {
- // Filter out any invalid or incomplete messages
- const validMessages = parsed.filter(isValidMessage);
+ // Migrate legacy messages and filter out any invalid ones
+ const migratedMessages = parsed.map(migrateMessage);
+ const validMessages = migratedMessages.filter(isValidMessage);
if (validMessages.length > 0) {
// Update timestamp since we're loading existing messages
updateTimestamp();
@@ -108,7 +163,7 @@ export function usePersistedChat(isAuthenticated: boolean) {
// Function to save messages
const saveMessages = useCallback(
- (messages: Message[]) => {
+ (messages: UIMessage[]) => {
if (!isAuthenticated || messages.length === 0) return;
try {
diff --git a/packages/mcp-cloudflare/src/server/routes/chat.ts b/packages/mcp-cloudflare/src/server/routes/chat.ts
index edb054a7..a32b11cc 100644
--- a/packages/mcp-cloudflare/src/server/routes/chat.ts
+++ b/packages/mcp-cloudflare/src/server/routes/chat.ts
@@ -1,7 +1,12 @@
import { Hono, type Context } from "hono";
import { openai } from "@ai-sdk/openai";
-import { streamText, type ToolSet } from "ai";
-import { experimental_createMCPClient } from "ai";
+import {
+ streamText,
+ type ToolSet,
+ stepCountIs,
+ convertToModelMessages,
+} from "ai";
+import { experimental_createMCPClient } from "@ai-sdk/mcp";
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
import type { Env } from "../types";
import { logInfo, logIssue, logWarn } from "@sentry/mcp-core/telem/logging";
@@ -299,9 +304,15 @@ export default new Hono<{ Bindings: Env }>().post("/", async (c) => {
}
}
+ // Convert UIMessage[] (parts-based) to ModelMessage[] (content-based) for streamText
+ const modelMessages = await convertToModelMessages(messages, {
+ tools,
+ ignoreIncompleteToolCalls: true,
+ });
+
const result = streamText({
model: openai("gpt-4o"),
- messages,
+ messages: modelMessages,
tools,
system: `You are an AI assistant designed EXCLUSIVELY for testing the Sentry MCP service. Your sole purpose is to help users test MCP functionality with their real Sentry account data - nothing more, nothing less.
@@ -325,15 +336,15 @@ Start conversations by exploring what's available in their account. Use tools li
- \`get_issue_details\` to dive deep into specific errors
Remember: You're a test assistant, not a general-purpose helper. Stay focused on testing the MCP integration with their real data.`,
- maxTokens: 2000,
- maxSteps: 10,
+ maxOutputTokens: 2000,
+ stopWhen: stepCountIs(10),
experimental_telemetry: {
isEnabled: true,
},
});
// Clean up MCP client when the response stream ends
- const response = result.toDataStreamResponse();
+ const response = result.toUIMessageStreamResponse();
// Note: In a production environment, you might want to implement proper cleanup
// This is a simplified approach for the demo
diff --git a/packages/mcp-cloudflare/src/server/routes/metadata.ts b/packages/mcp-cloudflare/src/server/routes/metadata.ts
index edfc85b5..707360a0 100644
--- a/packages/mcp-cloudflare/src/server/routes/metadata.ts
+++ b/packages/mcp-cloudflare/src/server/routes/metadata.ts
@@ -5,7 +5,7 @@
* without requiring a chat stream to be initialized.
*/
import { Hono } from "hono";
-import { experimental_createMCPClient } from "ai";
+import { experimental_createMCPClient } from "@ai-sdk/mcp";
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
import type { Env } from "../types";
import { logIssue, logWarn } from "@sentry/mcp-core/telem/logging";
diff --git a/packages/mcp-cloudflare/src/server/types/chat.ts b/packages/mcp-cloudflare/src/server/types/chat.ts
index 810d8633..987acb64 100644
--- a/packages/mcp-cloudflare/src/server/types/chat.ts
+++ b/packages/mcp-cloudflare/src/server/types/chat.ts
@@ -2,6 +2,7 @@
* Type definitions for Chat API
*/
import { z } from "zod";
+import type { UIMessage } from "ai";
// Shared schemas for authentication data across chat routes
export const AuthDataSchema = z.object({
@@ -51,13 +52,9 @@ export interface ErrorResponse {
eventId?: string;
}
-// Request types
+// Request types - uses UIMessage format from AI SDK 6.x (parts-based)
export interface ChatRequest {
- messages: Array<{
- role: "user" | "assistant" | "system";
- content: string;
- data?: any; // Additional metadata for messages
- }>;
+ messages: UIMessage[];
}
// MCP types
diff --git a/packages/mcp-core/package.json b/packages/mcp-core/package.json
index 06d3cc25..fa478a67 100644
--- a/packages/mcp-core/package.json
+++ b/packages/mcp-core/package.json
@@ -11,9 +11,7 @@
"author": "Sentry",
"description": "Sentry MCP Core - Shared code for MCP transports",
"homepage": "https://github.com/getsentry/sentry-mcp",
- "keywords": [
- "sentry"
- ],
+ "keywords": ["sentry"],
"bugs": {
"url": "https://github.com/getsentry/sentry-mcp/issues"
},
@@ -21,9 +19,7 @@
"type": "git",
"url": "git@github.com:getsentry/sentry-mcp.git"
},
- "files": [
- "./dist/*"
- ],
+ "files": ["./dist/*"],
"exports": {
"./api-client": {
"types": "./dist/api-client/index.ts",
@@ -144,6 +140,8 @@
"validate-skills": "tsx scripts/validate-skills-mapping.ts"
},
"devDependencies": {
+ "@ai-sdk/provider": "^3.0.6",
+ "@ai-sdk/provider-utils": "^4.0.11",
"@sentry/mcp-server-mocks": "workspace:*",
"@sentry/mcp-server-tsconfig": "workspace:*",
"msw": "catalog:",
@@ -153,6 +151,7 @@
},
"dependencies": {
"@ai-sdk/anthropic": "catalog:",
+ "@ai-sdk/mcp": "catalog:",
"@ai-sdk/openai": "catalog:",
"@logtape/logtape": "^1.1.1",
"@logtape/sentry": "^1.1.1",
diff --git a/packages/mcp-core/src/internal/agents/anthropic-provider.ts b/packages/mcp-core/src/internal/agents/anthropic-provider.ts
index eee7292d..f2627381 100644
--- a/packages/mcp-core/src/internal/agents/anthropic-provider.ts
+++ b/packages/mcp-core/src/internal/agents/anthropic-provider.ts
@@ -1,5 +1,5 @@
import { createAnthropic } from "@ai-sdk/anthropic";
-import type { LanguageModelV1 } from "ai";
+import type { LanguageModel } from "ai";
import { USER_AGENT } from "../../version";
// Default configuration constants
@@ -24,7 +24,7 @@ export function setAnthropicBaseUrl(baseUrl: string | undefined): void {
* - ANTHROPIC_MODEL: Model to use (default: "claude-sonnet-4-5") - env var OK
* - Base URL: Must be set via setAnthropicBaseUrl() - NOT from env vars (security risk)
*/
-export function getAnthropicModel(model?: string): LanguageModelV1 {
+export function getAnthropicModel(model?: string): LanguageModel {
const defaultModel = process.env.ANTHROPIC_MODEL || DEFAULT_ANTHROPIC_MODEL;
const factory = createAnthropic({
diff --git a/packages/mcp-core/src/internal/agents/callEmbeddedAgent.ts b/packages/mcp-core/src/internal/agents/callEmbeddedAgent.ts
index 22a1d389..5609f367 100644
--- a/packages/mcp-core/src/internal/agents/callEmbeddedAgent.ts
+++ b/packages/mcp-core/src/internal/agents/callEmbeddedAgent.ts
@@ -1,4 +1,4 @@
-import { generateText, Output, type Tool, APICallError } from "ai";
+import { generateText, Output, type Tool, APICallError, stepCountIs } from "ai";
import { getAgentProvider } from "./provider-factory";
import { UserInputError, LLMProviderError } from "../../errors";
import type { z } from "zod";
@@ -45,7 +45,7 @@ export async function callEmbeddedAgent<
system,
prompt,
tools,
- maxSteps: 5,
+ stopWhen: stepCountIs(5),
// Only include temperature if provider specifies one (e.g., GPT-5 requires temperature=1)
...(provider.getTemperature() !== undefined && {
temperature: provider.getTemperature(),
@@ -63,7 +63,7 @@ export async function callEmbeddedAgent<
for (const toolCall of event.toolCalls) {
capturedToolCalls.push({
toolName: toolCall.toolName,
- args: toolCall.args,
+ args: toolCall.input,
});
}
}
diff --git a/packages/mcp-core/src/internal/agents/openai-provider.test.ts b/packages/mcp-core/src/internal/agents/openai-provider.test.ts
index 852201e5..6b914bc6 100644
--- a/packages/mcp-core/src/internal/agents/openai-provider.test.ts
+++ b/packages/mcp-core/src/internal/agents/openai-provider.test.ts
@@ -1,4 +1,5 @@
import { describe, it, expect, beforeEach, afterEach } from "vitest";
+import type { LanguageModelV3 } from "@ai-sdk/provider";
import { getOpenAIModel, setOpenAIBaseUrl } from "./openai-provider.js";
describe("openai-provider", () => {
@@ -21,7 +22,7 @@ describe("openai-provider", () => {
describe("base URL configuration", () => {
it("uses default base URL when not configured", () => {
- const model = getOpenAIModel();
+ const model = getOpenAIModel() as LanguageModelV3;
expect(model).toBeDefined();
expect(model.modelId).toBe("gpt-5");
@@ -30,7 +31,7 @@ describe("openai-provider", () => {
it("uses configured base URL", () => {
setOpenAIBaseUrl("https://custom-openai.example.com");
- const model = getOpenAIModel();
+ const model = getOpenAIModel() as LanguageModelV3;
expect(model).toBeDefined();
expect(model.modelId).toBe("gpt-5");
@@ -39,13 +40,13 @@ describe("openai-provider", () => {
describe("model override", () => {
it("uses default model when not specified", () => {
- const model = getOpenAIModel();
+ const model = getOpenAIModel() as LanguageModelV3;
expect(model.modelId).toBe("gpt-5");
});
it("uses specified model when provided", () => {
- const model = getOpenAIModel("gpt-4");
+ const model = getOpenAIModel("gpt-4") as LanguageModelV3;
expect(model.modelId).toBe("gpt-4");
});
@@ -53,7 +54,7 @@ describe("openai-provider", () => {
it("uses OPENAI_MODEL env var when set", () => {
process.env.OPENAI_MODEL = "gpt-4-turbo";
- const model = getOpenAIModel();
+ const model = getOpenAIModel() as LanguageModelV3;
expect(model.modelId).toBe("gpt-4-turbo");
});
diff --git a/packages/mcp-core/src/internal/agents/openai-provider.ts b/packages/mcp-core/src/internal/agents/openai-provider.ts
index b4a2260e..f14e62a2 100644
--- a/packages/mcp-core/src/internal/agents/openai-provider.ts
+++ b/packages/mcp-core/src/internal/agents/openai-provider.ts
@@ -1,5 +1,5 @@
import { createOpenAI } from "@ai-sdk/openai";
-import type { LanguageModelV1 } from "ai";
+import type { LanguageModel } from "ai";
import { USER_AGENT } from "../../version";
// Default configuration constants
@@ -33,7 +33,7 @@ export function setOpenAIBaseUrl(baseUrl: string | undefined): void {
* - Vercel AI Gateway: "openai/gpt-4o", "anthropic/claude-sonnet-4.5", etc.
* - Other providers: Check their documentation
*/
-export function getOpenAIModel(model?: string): LanguageModelV1 {
+export function getOpenAIModel(model?: string): LanguageModel {
const defaultModel = process.env.OPENAI_MODEL || DEFAULT_OPENAI_MODEL;
const factory = createOpenAI({
diff --git a/packages/mcp-core/src/internal/agents/tools/utils.ts b/packages/mcp-core/src/internal/agents/tools/utils.ts
index dd90fdc4..5b051491 100644
--- a/packages/mcp-core/src/internal/agents/tools/utils.ts
+++ b/packages/mcp-core/src/internal/agents/tools/utils.ts
@@ -126,12 +126,12 @@ export function agentTool(config: {
return tool({
description: config.description,
- parameters: config.parameters,
+ inputSchema: config.parameters,
execute: async (
- params: TParameters,
+ input: TParameters,
): Promise> => {
try {
- const result = await config.execute(params);
+ const result = await config.execute(input);
return { result };
} catch (error) {
return handleAgentToolError(error);
diff --git a/packages/mcp-core/src/internal/agents/types.ts b/packages/mcp-core/src/internal/agents/types.ts
index df7fa72f..c6166dcb 100644
--- a/packages/mcp-core/src/internal/agents/types.ts
+++ b/packages/mcp-core/src/internal/agents/types.ts
@@ -1,4 +1,4 @@
-import type { LanguageModelV1, JSONValue } from "ai";
+import type { LanguageModel, JSONValue } from "ai";
/**
* Provider options type matching AI SDK's ProviderOptions (LanguageModelV1ProviderMetadata)
@@ -19,7 +19,7 @@ export interface EmbeddedAgentProvider {
readonly type: AgentProviderType;
/** Get a language model instance, optionally with a model override */
- getModel(modelOverride?: string): LanguageModelV1;
+ getModel(modelOverride?: string): LanguageModel;
/** Get provider-specific options for generateText calls */
getProviderOptions(): ProviderOptions;
diff --git a/packages/mcp-core/src/tools/search-events.test.ts b/packages/mcp-core/src/tools/search-events.test.ts
index 9cbad67f..2650758d 100644
--- a/packages/mcp-core/src/tools/search-events.test.ts
+++ b/packages/mcp-core/src/tools/search-events.test.ts
@@ -14,11 +14,15 @@ vi.mock("@ai-sdk/openai", () => {
};
});
-vi.mock("ai", () => ({
- generateText: vi.fn(),
- tool: vi.fn(() => ({ execute: vi.fn() })),
- Output: { object: vi.fn(() => ({})) },
-}));
+vi.mock("ai", async (importOriginal) => {
+ const actual = await importOriginal();
+ return {
+ ...actual,
+ generateText: vi.fn(),
+ tool: vi.fn(() => ({ execute: vi.fn() })),
+ Output: { object: vi.fn(() => ({})) },
+ };
+});
describe("search_events", () => {
const mockGenerateText = vi.mocked(generateText);
diff --git a/packages/mcp-core/src/tools/search-issue-events.test.ts b/packages/mcp-core/src/tools/search-issue-events.test.ts
index 5c2226f9..08452cb4 100644
--- a/packages/mcp-core/src/tools/search-issue-events.test.ts
+++ b/packages/mcp-core/src/tools/search-issue-events.test.ts
@@ -15,11 +15,15 @@ vi.mock("@ai-sdk/openai", () => {
};
});
-vi.mock("ai", () => ({
- generateText: vi.fn(),
- tool: vi.fn(() => ({ execute: vi.fn() })),
- Output: { object: vi.fn(() => ({})) },
-}));
+vi.mock("ai", async (importOriginal) => {
+ const actual = await importOriginal();
+ return {
+ ...actual,
+ generateText: vi.fn(),
+ tool: vi.fn(() => ({ execute: vi.fn() })),
+ Output: { object: vi.fn(() => ({})) },
+ };
+});
describe("search_issue_events", () => {
const mockGenerateText = vi.mocked(generateText);
diff --git a/packages/mcp-core/src/tools/search-issues.test.ts b/packages/mcp-core/src/tools/search-issues.test.ts
index f6eddb81..b1271316 100644
--- a/packages/mcp-core/src/tools/search-issues.test.ts
+++ b/packages/mcp-core/src/tools/search-issues.test.ts
@@ -14,11 +14,15 @@ vi.mock("@ai-sdk/openai", () => {
};
});
-vi.mock("ai", () => ({
- generateText: vi.fn(),
- tool: vi.fn(() => ({ execute: vi.fn() })),
- Output: { object: vi.fn(() => ({})) },
-}));
+vi.mock("ai", async (importOriginal) => {
+ const actual = await importOriginal();
+ return {
+ ...actual,
+ generateText: vi.fn(),
+ tool: vi.fn(() => ({ execute: vi.fn() })),
+ Output: { object: vi.fn(() => ({})) },
+ };
+});
describe("search_issues", () => {
const mockGenerateText = vi.mocked(generateText);
diff --git a/packages/mcp-core/src/tools/use-sentry/handler.ts b/packages/mcp-core/src/tools/use-sentry/handler.ts
index 4d5b65c9..9e9ef9b6 100644
--- a/packages/mcp-core/src/tools/use-sentry/handler.ts
+++ b/packages/mcp-core/src/tools/use-sentry/handler.ts
@@ -1,6 +1,6 @@
import { z } from "zod";
import { InMemoryTransport } from "@modelcontextprotocol/sdk/inMemory.js";
-import { experimental_createMCPClient } from "ai";
+import { experimental_createMCPClient } from "@ai-sdk/mcp";
import { defineTool } from "../../internal/tool-helpers/define";
import type { ServerContext } from "../../types";
import { useSentryAgent } from "./agent";
diff --git a/packages/mcp-core/src/tools/use-sentry/tool-wrapper.test.ts b/packages/mcp-core/src/tools/use-sentry/tool-wrapper.test.ts
index 00b6906f..4cf0ef11 100644
--- a/packages/mcp-core/src/tools/use-sentry/tool-wrapper.test.ts
+++ b/packages/mcp-core/src/tools/use-sentry/tool-wrapper.test.ts
@@ -1,9 +1,16 @@
import { describe, it, expect } from "vitest";
import { z } from "zod";
+import type { ToolExecutionOptions } from "@ai-sdk/provider-utils";
import { wrapToolForAgent } from "./tool-wrapper";
import type { ServerContext } from "../../types";
import type { ToolConfig } from "../types";
+// Helper to create minimal ToolExecutionOptions for testing
+const createToolOptions = (): ToolExecutionOptions => ({
+ toolCallId: "test-call-id",
+ messages: [],
+});
+
// Create a simple mock tool for testing
const mockTool: ToolConfig<{
organizationSlug: z.ZodOptional;
@@ -46,14 +53,14 @@ describe("wrapToolForAgent", () => {
const wrappedTool = wrapToolForAgent(mockTool, { context });
// Call the wrapped tool
- // AI SDK tools expect a toolContext parameter (messages, abortSignal, etc.)
- const result = await wrappedTool.execute(
+ // AI SDK 6: execute takes (input, options) directly
+ const result = (await wrappedTool.execute!(
{
organizationSlug: "my-org",
someParam: "test-value",
},
- {} as any, // Empty tool context for testing
- );
+ createToolOptions(),
+ )) as { result?: unknown; error?: string };
// Verify the tool was called with correct params
expect(result.result).toBeDefined();
@@ -76,12 +83,12 @@ describe("wrapToolForAgent", () => {
const wrappedTool = wrapToolForAgent(mockTool, { context });
// Call without providing organizationSlug
- const result = await wrappedTool.execute(
+ const result = (await wrappedTool.execute!(
{
someParam: "test-value",
},
- {} as any,
- );
+ createToolOptions(),
+ )) as { result?: unknown; error?: string };
// Verify the constraint was injected
expect(result.result).toBeDefined();
@@ -105,12 +112,12 @@ describe("wrapToolForAgent", () => {
const wrappedTool = wrapToolForAgent(mockTool, { context });
// Call without providing projectSlug
- const result = await wrappedTool.execute(
+ const result = (await wrappedTool.execute!(
{
someParam: "test-value",
},
- {} as any,
- );
+ createToolOptions(),
+ )) as { result?: unknown; error?: string };
// Verify both constraints were injected
expect(result.result).toBeDefined();
@@ -134,13 +141,13 @@ describe("wrapToolForAgent", () => {
const wrappedTool = wrapToolForAgent(mockTool, { context });
// Provide organizationSlug explicitly (should NOT override since constraint injection doesn't override)
- const result = await wrappedTool.execute(
+ const result = (await wrappedTool.execute!(
{
organizationSlug: "agent-provided-org",
someParam: "test-value",
},
- {} as any,
- );
+ createToolOptions(),
+ )) as { result?: unknown; error?: string };
expect(result.result).toBeDefined();
const parsed = JSON.parse(result.result as string);
@@ -177,7 +184,10 @@ describe("wrapToolForAgent", () => {
const wrappedTool = wrapToolForAgent(errorTool, { context });
// Call the tool that throws an error
- const result = await wrappedTool.execute({ param: "test" }, {} as any);
+ const result = (await wrappedTool.execute!(
+ { param: "test" },
+ createToolOptions(),
+ )) as { result?: unknown; error?: string };
// Verify the error was caught and returned in structured format
// Generic errors are wrapped as "System Error" by agentTool for security
diff --git a/packages/mcp-server-evals/package.json b/packages/mcp-server-evals/package.json
index f81d4105..388f0ec0 100644
--- a/packages/mcp-server-evals/package.json
+++ b/packages/mcp-server-evals/package.json
@@ -15,6 +15,7 @@
"eval:ci": "vitest run --coverage --reporter=vitest-evals/reporter --reporter=junit --reporter=json --outputFile.json=eval-results.json --outputFile.junit=eval.junit.xml"
},
"dependencies": {
+ "@ai-sdk/mcp": "catalog:",
"@ai-sdk/openai": "catalog:",
"@modelcontextprotocol/sdk": "catalog:",
"@sentry/mcp-core": "workspace:*",
diff --git a/packages/mcp-server-evals/src/evals/utils/toolPredictionScorer.ts b/packages/mcp-server-evals/src/evals/utils/toolPredictionScorer.ts
index cf45d39e..dcfaf1bb 100644
--- a/packages/mcp-server-evals/src/evals/utils/toolPredictionScorer.ts
+++ b/packages/mcp-server-evals/src/evals/utils/toolPredictionScorer.ts
@@ -1,8 +1,8 @@
import { openai } from "@ai-sdk/openai";
import { generateObject, type LanguageModel } from "ai";
import { z } from "zod";
-import { experimental_createMCPClient } from "ai";
-import { Experimental_StdioMCPTransport } from "ai/mcp-stdio";
+import { experimental_createMCPClient } from "@ai-sdk/mcp";
+import { Experimental_StdioMCPTransport } from "@ai-sdk/mcp/mcp-stdio";
// Cache for available tools to avoid reconnecting for each test
let cachedTools: string[] | null = null;
diff --git a/packages/mcp-test-client/package.json b/packages/mcp-test-client/package.json
index be6a4c17..180ac410 100644
--- a/packages/mcp-test-client/package.json
+++ b/packages/mcp-test-client/package.json
@@ -17,6 +17,7 @@
"typecheck": "tsc --noEmit"
},
"dependencies": {
+ "@ai-sdk/mcp": "catalog:",
"@ai-sdk/openai": "catalog:",
"@modelcontextprotocol/sdk": "catalog:",
"@sentry/core": "catalog:",
diff --git a/packages/mcp-test-client/src/agent.ts b/packages/mcp-test-client/src/agent.ts
index e6b18574..de0b22d8 100644
--- a/packages/mcp-test-client/src/agent.ts
+++ b/packages/mcp-test-client/src/agent.ts
@@ -1,5 +1,5 @@
import { openai } from "@ai-sdk/openai";
-import { streamText } from "ai";
+import { streamText, stepCountIs } from "ai";
import { startNewTrace, startSpan } from "@sentry/core";
import type { MCPConnection } from "./types.js";
import { DEFAULT_MODEL } from "./constants.js";
@@ -73,7 +73,7 @@ export async function runAgent(
system: SYSTEM_PROMPT,
messages: [{ role: "user", content: userPrompt }],
tools,
- maxSteps,
+ stopWhen: stepCountIs(maxSteps),
experimental_telemetry: {
isEnabled: true,
},
@@ -90,7 +90,7 @@ export async function runAgent(
const toolCall = toolCalls[i];
const toolResult = toolResults?.[i];
- logTool(toolCall.toolName, toolCall.args);
+ logTool(toolCall.toolName, toolCall.input);
// Show the actual tool result if available
if (toolResult?.result) {
diff --git a/packages/mcp-test-client/src/mcp-test-client-remote.ts b/packages/mcp-test-client/src/mcp-test-client-remote.ts
index 126e5ccd..8ee394a8 100644
--- a/packages/mcp-test-client/src/mcp-test-client-remote.ts
+++ b/packages/mcp-test-client/src/mcp-test-client-remote.ts
@@ -1,4 +1,4 @@
-import { experimental_createMCPClient } from "ai";
+import { experimental_createMCPClient } from "@ai-sdk/mcp";
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
import { startNewTrace, startSpan } from "@sentry/core";
import { OAuthClient } from "./auth/oauth.js";
diff --git a/packages/mcp-test-client/src/mcp-test-client.ts b/packages/mcp-test-client/src/mcp-test-client.ts
index b6edfc88..e7e22019 100644
--- a/packages/mcp-test-client/src/mcp-test-client.ts
+++ b/packages/mcp-test-client/src/mcp-test-client.ts
@@ -1,5 +1,5 @@
-import { experimental_createMCPClient } from "ai";
-import { Experimental_StdioMCPTransport } from "ai/mcp-stdio";
+import { experimental_createMCPClient } from "@ai-sdk/mcp";
+import { Experimental_StdioMCPTransport } from "@ai-sdk/mcp/mcp-stdio";
import { fileURLToPath } from "node:url";
import { dirname, join } from "node:path";
import { startNewTrace, startSpan } from "@sentry/core";
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index df449dd7..de110f35 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -7,14 +7,17 @@ settings:
catalogs:
default:
'@ai-sdk/anthropic':
- specifier: ^1.2.12
- version: 1.2.12
+ specifier: ^3.0.33
+ version: 3.0.33
+ '@ai-sdk/mcp':
+ specifier: ^1.0.16
+ version: 1.0.16
'@ai-sdk/openai':
- specifier: ^1.3.22
- version: 1.3.22
+ specifier: ^3.0.23
+ version: 3.0.23
'@ai-sdk/react':
- specifier: ^1.2.12
- version: 1.2.12
+ specifier: ^3.0.66
+ version: 3.0.66
'@biomejs/biome':
specifier: ^1.9.4
version: 1.9.4
@@ -24,9 +27,6 @@ catalogs:
'@cloudflare/workers-types':
specifier: ^4.20251014.0
version: 4.20251014.0
- '@modelcontextprotocol/sdk':
- specifier: ^1.21.0
- version: 1.22.0
'@radix-ui/react-accordion':
specifier: ^1.2.11
version: 1.2.11
@@ -70,11 +70,11 @@ catalogs:
specifier: ^3.2.4
version: 3.2.4
agents:
- specifier: ^0.2.23
- version: 0.2.23
+ specifier: ^0.3.6
+ version: 0.3.6
ai:
- specifier: ^4.3.16
- version: 4.3.16
+ specifier: ^6.0.64
+ version: 6.0.64
better-sqlite3:
specifier: ^11.10.0
version: 11.10.0
@@ -166,6 +166,9 @@ catalogs:
specifier: ^3.24.6
version: 3.25.0
+overrides:
+ '@modelcontextprotocol/sdk': ^1.25.3
+
importers:
.:
@@ -216,18 +219,21 @@ importers:
packages/mcp-cloudflare:
dependencies:
+ '@ai-sdk/mcp':
+ specifier: 'catalog:'
+ version: 1.0.16(zod@3.25.76)
'@ai-sdk/openai':
specifier: 'catalog:'
- version: 1.3.22(zod@3.25.76)
+ version: 3.0.23(zod@3.25.76)
'@ai-sdk/react':
specifier: 'catalog:'
- version: 1.2.12(react@19.1.0)(zod@3.25.76)
+ version: 3.0.66(react@19.1.0)(zod@3.25.76)
'@cloudflare/workers-oauth-provider':
specifier: 'catalog:'
version: 0.0.12
'@modelcontextprotocol/sdk':
- specifier: 'catalog:'
- version: 1.22.0(@cfworker/json-schema@4.1.1)
+ specifier: ^1.25.3
+ version: 1.25.3(@cfworker/json-schema@4.1.1)(hono@4.11.4)(zod@3.25.76)
'@radix-ui/react-accordion':
specifier: 'catalog:'
version: 1.2.11(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
@@ -242,10 +248,10 @@ importers:
version: 10.35.0(react@19.1.0)
agents:
specifier: 'catalog:'
- version: 0.2.23(@cloudflare/workers-types@4.20251014.0)(react@19.1.0)(typescript@5.8.3)
+ version: 0.3.6(@ai-sdk/openai@3.0.23(zod@3.25.76))(@ai-sdk/react@3.0.66(react@19.1.0)(zod@3.25.76))(@cloudflare/ai-chat@0.0.4)(@cloudflare/codemode@0.0.5)(@cloudflare/workers-types@4.20251014.0)(ai@6.0.64(zod@3.25.76))(hono@4.11.4)(react@19.1.0)(zod@3.25.76)
ai:
specifier: 'catalog:'
- version: 4.3.16(react@19.1.0)(zod@3.25.76)
+ version: 6.0.64(zod@3.25.76)
asciinema-player:
specifier: ^3.10.0
version: 3.12.1
@@ -290,7 +296,7 @@ importers:
version: 1.3.5
workers-mcp:
specifier: 'catalog:'
- version: 0.1.0-3(@cfworker/json-schema@4.1.1)
+ version: 0.1.0-3(@cfworker/json-schema@4.1.1)(zod@3.25.76)
zod:
specifier: 'catalog:'
version: 3.25.76
@@ -351,10 +357,13 @@ importers:
dependencies:
'@ai-sdk/anthropic':
specifier: 'catalog:'
- version: 1.2.12(zod@3.25.76)
+ version: 3.0.33(zod@3.25.76)
+ '@ai-sdk/mcp':
+ specifier: 'catalog:'
+ version: 1.0.16(zod@3.25.76)
'@ai-sdk/openai':
specifier: 'catalog:'
- version: 1.3.22(zod@3.25.76)
+ version: 3.0.23(zod@3.25.76)
'@logtape/logtape':
specifier: ^1.1.1
version: 1.1.1
@@ -362,14 +371,14 @@ importers:
specifier: ^1.1.1
version: 1.1.1(@logtape/logtape@1.1.1)
'@modelcontextprotocol/sdk':
- specifier: 'catalog:'
- version: 1.22.0(@cfworker/json-schema@4.1.1)
+ specifier: ^1.25.3
+ version: 1.25.3(@cfworker/json-schema@4.1.1)(hono@4.11.4)(zod@3.25.76)
'@sentry/core':
specifier: 'catalog:'
version: 10.35.0
ai:
specifier: 'catalog:'
- version: 4.3.16(react@19.1.0)(zod@3.25.76)
+ version: 6.0.64(zod@3.25.76)
dotenv:
specifier: 'catalog:'
version: 16.6.1
@@ -377,6 +386,12 @@ importers:
specifier: 'catalog:'
version: 3.25.76
devDependencies:
+ '@ai-sdk/provider':
+ specifier: ^3.0.6
+ version: 3.0.6
+ '@ai-sdk/provider-utils':
+ specifier: ^4.0.11
+ version: 4.0.11(zod@3.25.76)
'@sentry/mcp-server-mocks':
specifier: workspace:*
version: link:../mcp-server-mocks
@@ -399,8 +414,8 @@ importers:
packages/mcp-server:
dependencies:
'@modelcontextprotocol/sdk':
- specifier: 'catalog:'
- version: 1.22.0(@cfworker/json-schema@4.1.1)
+ specifier: ^1.25.3
+ version: 1.25.3(@cfworker/json-schema@4.1.1)(hono@4.11.4)(zod@3.25.76)
'@sentry/core':
specifier: 'catalog:'
version: 10.35.0
@@ -441,12 +456,15 @@ importers:
packages/mcp-server-evals:
dependencies:
+ '@ai-sdk/mcp':
+ specifier: 'catalog:'
+ version: 1.0.16(zod@3.25.76)
'@ai-sdk/openai':
specifier: 'catalog:'
- version: 1.3.22(zod@3.25.76)
+ version: 3.0.23(zod@3.25.76)
'@modelcontextprotocol/sdk':
- specifier: 'catalog:'
- version: 1.22.0(@cfworker/json-schema@4.1.1)
+ specifier: ^1.25.3
+ version: 1.25.3(@cfworker/json-schema@4.1.1)(hono@4.11.4)(zod@3.25.76)
'@sentry/mcp-core':
specifier: workspace:*
version: link:../mcp-core
@@ -461,7 +479,7 @@ importers:
version: link:../mcp-server-tsconfig
ai:
specifier: 'catalog:'
- version: 4.3.16(react@19.1.0)(zod@3.25.76)
+ version: 6.0.64(zod@3.25.76)
dotenv:
specifier: 'catalog:'
version: 16.6.1
@@ -498,12 +516,15 @@ importers:
packages/mcp-test-client:
dependencies:
+ '@ai-sdk/mcp':
+ specifier: 'catalog:'
+ version: 1.0.16(zod@3.25.76)
'@ai-sdk/openai':
specifier: 'catalog:'
- version: 1.3.22(zod@3.25.76)
+ version: 3.0.23(zod@3.25.76)
'@modelcontextprotocol/sdk':
- specifier: 'catalog:'
- version: 1.22.0(@cfworker/json-schema@4.1.1)
+ specifier: ^1.25.3
+ version: 1.25.3(@cfworker/json-schema@4.1.1)(hono@4.11.4)(zod@3.25.76)
'@sentry/core':
specifier: 'catalog:'
version: 10.35.0
@@ -515,7 +536,7 @@ importers:
version: 10.35.0
ai:
specifier: 'catalog:'
- version: 4.3.16(react@19.1.0)(zod@3.25.76)
+ version: 6.0.64(zod@3.25.76)
chalk:
specifier: 'catalog:'
version: 5.4.1
@@ -553,65 +574,45 @@ importers:
packages:
- '@ai-sdk/anthropic@1.2.12':
- resolution: {integrity: sha512-YSzjlko7JvuiyQFmI9RN1tNZdEiZxc+6xld/0tq/VkJaHpEzGAb1yiNxxvmYVcjvfu/PcvCxAAYXmTYQQ63IHQ==}
- engines: {node: '>=18'}
- peerDependencies:
- zod: ^3.0.0
-
- '@ai-sdk/gateway@2.0.7':
- resolution: {integrity: sha512-/AI5AKi4vOK9SEb8Z1dfXkhsJ5NAfWsoJQc96B/mzn2KIrjw5occOjIwD06scuhV9xWlghCoXJT1sQD9QH/tyg==}
+ '@ai-sdk/anthropic@3.0.33':
+ resolution: {integrity: sha512-MvQzipOAK99Ct3nX6d8oz2AJ9c8kbiU2uSpGYK3F5oBBa8Gy04+LbYBoB9oH8bbZJxFafkgwBq1+k8xpU7vVxQ==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.25.76 || ^4.1.8
- '@ai-sdk/openai@1.3.22':
- resolution: {integrity: sha512-QwA+2EkG0QyjVR+7h6FE7iOu2ivNqAVMm9UJZkVxxTk5OIq5fFJDTEI/zICEMuHImTTXR2JjsL6EirJ28Jc4cw==}
+ '@ai-sdk/gateway@3.0.29':
+ resolution: {integrity: sha512-zf6yXT+7DcVGWG7ntxVCYC48X/opsWlO5ePvgH8W9DaEVUtkemqKUEzBqowQ778PkZo8sqMnRfD0+fi9HamRRQ==}
engines: {node: '>=18'}
peerDependencies:
- zod: ^3.0.0
+ zod: ^3.25.76 || ^4.1.8
- '@ai-sdk/openai@2.0.64':
- resolution: {integrity: sha512-+1mqxn42uB32DPZ6kurSyGAmL3MgCaDpkYU7zNDWI4NLy3Zg97RxTsI1jBCGIqkEVvRZKJlIMYtb89OvMnq3AQ==}
+ '@ai-sdk/mcp@1.0.16':
+ resolution: {integrity: sha512-4doH7H4o7N4UQZy4+yzGZg+Lzl71wveKvkJE8wux1nOG9BRUghQALo/P3OtyPMLuLgMODVcUzsex5fcRAZr8Yg==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.25.76 || ^4.1.8
- '@ai-sdk/provider-utils@2.2.8':
- resolution: {integrity: sha512-fqhG+4sCVv8x7nFzYnFo19ryhAa3w096Kmc3hWxMQfW/TubPOmt3A6tYZhl4mUfQWWQMsuSkLrtjlWuXBVSGQA==}
+ '@ai-sdk/openai@3.0.23':
+ resolution: {integrity: sha512-vFfFadJH+hbrgI4lhC9H/r8qPzuFJFUwZNS8oMI8KujO/woovbE1EWOOGMRGtNVL8PrhhxBfgJzvOKdux3c1gw==}
engines: {node: '>=18'}
peerDependencies:
- zod: ^3.23.8
+ zod: ^3.25.76 || ^4.1.8
- '@ai-sdk/provider-utils@3.0.16':
- resolution: {integrity: sha512-lsWQY9aDXHitw7C1QRYIbVGmgwyT98TF3MfM8alNIXKpdJdi+W782Rzd9f1RyOfgRmZ08gJ2EYNDhWNK7RqpEA==}
+ '@ai-sdk/provider-utils@4.0.11':
+ resolution: {integrity: sha512-y/WOPpcZaBjvNaogy83mBsCRPvbtaK0y1sY9ckRrrbTGMvG2HC/9Y/huqNXKnLAxUIME2PGa2uvF2CDwIsxoXQ==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.25.76 || ^4.1.8
- '@ai-sdk/provider@1.1.3':
- resolution: {integrity: sha512-qZMxYJ0qqX/RfnuIaab+zp8UAeJn/ygXXAffR5I4N0n1IrvA6qBsjc8hXLmBiMV2zoXlifkacF7sEFnYnjBcqg==}
- engines: {node: '>=18'}
-
- '@ai-sdk/provider@2.0.0':
- resolution: {integrity: sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA==}
- engines: {node: '>=18'}
-
- '@ai-sdk/react@1.2.12':
- resolution: {integrity: sha512-jK1IZZ22evPZoQW3vlkZ7wvjYGYF+tRBKXtrcolduIkQ/m/sOAVcVeVDUDvh1T91xCnWCdUGCPZg2avZ90mv3g==}
+ '@ai-sdk/provider@3.0.6':
+ resolution: {integrity: sha512-hSfoJtLtpMd7YxKM+iTqlJ0ZB+kJ83WESMiWuWrNVey3X8gg97x0OdAAaeAeclZByCX3UdPOTqhvJdK8qYA3ww==}
engines: {node: '>=18'}
- peerDependencies:
- react: ^18 || ^19 || ^19.0.0-rc
- zod: ^3.23.8
- peerDependenciesMeta:
- zod:
- optional: true
- '@ai-sdk/ui-utils@1.2.11':
- resolution: {integrity: sha512-3zcwCc8ezzFlwp3ZD15wAPjf2Au4s3vAbKsXQVyhxODHcmu0iyPO2Eua6D/vicq/AUm/BAo60r97O6HU+EI0+w==}
+ '@ai-sdk/react@3.0.66':
+ resolution: {integrity: sha512-bYxfXaNErVDiUaNlvNXaX+3oKLAeEyHiReJd54i+JTD3HEgRuazHfggzL0MidPnFJmlSZDHRRTGXxhMYh426QA==}
engines: {node: '>=18'}
peerDependencies:
- zod: ^3.23.8
+ react: ^18 || ~19.0.1 || ~19.1.2 || ^19.2.1
'@ampproject/remapping@2.3.0':
resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==}
@@ -797,6 +798,21 @@ packages:
'@clack/prompts@0.8.2':
resolution: {integrity: sha512-6b9Ab2UiZwJYA9iMyboYyW9yJvAO9V753ZhS+DHKEjZRKAxPPOb7MXXu84lsPFG+vZt6FRFniZ8rXi+zCIw4yQ==}
+ '@cloudflare/ai-chat@0.0.4':
+ resolution: {integrity: sha512-NGQRt34X/UI+mx9fss7LmTTNBDVlFrtu+7JFoaykLUI738w9gjDplsMbOenCRbSr7UW4ngHGnv3YWUmV99eEnQ==}
+ peerDependencies:
+ agents: ^0.3.4
+ ai: ^6.0.0
+ react: ^19.0.0
+ zod: ^3.25.0 || ^4.0.0
+
+ '@cloudflare/codemode@0.0.5':
+ resolution: {integrity: sha512-00KNtk0tJBkhJ+DdfDqCqWNDyQ9O3pGOrMZSutXF0MQDCjFIjLO3ZDZ5c4FlMmhsysvcj8TyJYzetaJAY4H+cA==}
+ peerDependencies:
+ agents: ^0.3.6
+ ai: ^6.0.0
+ zod: ^3.25.0 || ^4.0.0
+
'@cloudflare/kv-asset-handler@0.4.0':
resolution: {integrity: sha512-+tv3z+SPp+gqTIcImN9o0hqE9xyfQjI1XD9pL6NuKjua9B1y7mNYv0S9cP+QEbA4ppVgGZEmKOvHX5G5Ei1CVA==}
engines: {node: '>=18.0.0'}
@@ -1200,6 +1216,12 @@ packages:
cpu: [x64]
os: [win32]
+ '@hono/node-server@1.19.9':
+ resolution: {integrity: sha512-vHL6w3ecZsky+8P5MD+eFfaGTyCeOHUIFYMGpQGbrBTSmNNoxv0if69rEZ5giu36weC5saFuznL411gRX7bJDw==}
+ engines: {node: '>=18.14.1'}
+ peerDependencies:
+ hono: ^4
+
'@img/sharp-darwin-arm64@0.33.5':
resolution: {integrity: sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ==}
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
@@ -1379,11 +1401,12 @@ packages:
peerDependencies:
'@logtape/logtape': ^1.1.1
- '@modelcontextprotocol/sdk@1.22.0':
- resolution: {integrity: sha512-VUpl106XVTCpDmTBil2ehgJZjhyLY2QZikzF8NvTXtLRF1CvO5iEE2UNZdVIUer35vFOwMKYeUGbjJtvPWan3g==}
+ '@modelcontextprotocol/sdk@1.25.3':
+ resolution: {integrity: sha512-vsAMBMERybvYgKbg/l4L1rhS7VXV1c0CtyJg72vwxONVX0l4ZfKVAnZEWTQixJGTzKnELjQ59e4NbdFDALRiAQ==}
engines: {node: '>=18'}
peerDependencies:
'@cfworker/json-schema': ^4.1.1
+ zod: ^3.25 || ^4.0
peerDependenciesMeta:
'@cfworker/json-schema':
optional: true
@@ -2096,8 +2119,8 @@ packages:
'@speed-highlight/core@1.2.7':
resolution: {integrity: sha512-0dxmVj4gxg3Jg879kvFS/msl4s9F3T9UXC1InxgOf7t5NvcPD97u/WTA5vL/IxWHMn7qSxBozqrnnE2wvl1m8g==}
- '@standard-schema/spec@1.0.0':
- resolution: {integrity: sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==}
+ '@standard-schema/spec@1.1.0':
+ resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==}
'@tailwindcss/node@4.1.11':
resolution: {integrity: sha512-yzhzuGRmv5QyU9qLNg4GTlYI6STedBWRE7NjxP45CsFYYq9taI0zJXZBMqIC/c8fViNLhmrbpSFS57EoxUmD6Q==}
@@ -2224,9 +2247,6 @@ packages:
'@types/deep-eql@4.0.2':
resolution: {integrity: sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==}
- '@types/diff-match-patch@1.0.36':
- resolution: {integrity: sha512-xFdR6tkm0MWvBfO8xXCSsinYxHcqkQUlcHeSpMC2ukzOb6lwQAfDmW+Qt0AvlGd8HpsS28qKsB+oPeJn9I39jg==}
-
'@types/estree-jsx@1.0.5':
resolution: {integrity: sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==}
@@ -2304,8 +2324,8 @@ packages:
'@ungap/structured-clone@1.3.0':
resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==}
- '@vercel/oidc@3.0.3':
- resolution: {integrity: sha512-yNEQvPcVrK9sIe637+I0jD6leluPxzwJKx/Haw6F4H77CdDsszUn5V3o96LPziXkSNE2B83+Z3mjqGKBK/R6Gg==}
+ '@vercel/oidc@3.1.0':
+ resolution: {integrity: sha512-Fw28YZpRnA3cAHHDlkt7xQHiJ0fcL+NRcIqsocZQUSmbzeIKRpwttJjik5ZGanXP+vlA4SbTg+AbA3bP363l+w==}
engines: {node: '>= 20'}
'@vitejs/plugin-react@4.6.0':
@@ -2379,31 +2399,31 @@ packages:
resolution: {integrity: sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==}
engines: {node: '>= 6.0.0'}
- agents@0.2.23:
- resolution: {integrity: sha512-YctplbuIuLocBc/uwFDD+pFk0QMhFf3SZXcl3pkgLYvSL2y1KKC6vpJyWTPu8pqzayspfys9RmEtxzfTtvcDUg==}
+ agents@0.3.6:
+ resolution: {integrity: sha512-gJtXDGV2jhPI/WzZOYAM5GJleOq7U2o7fnenE89RJ3Y5klm29O1Pk4sQoLVASO2sTucPNtJRMztQCGL2352B+g==}
hasBin: true
peerDependencies:
- react: '*'
+ '@ai-sdk/openai': ^3.0.0
+ '@ai-sdk/react': ^3.0.0
+ '@cloudflare/ai-chat': ^0.0.4
+ '@cloudflare/codemode': ^0.0.5
+ ai: ^6.0.0
+ react: ^19.0.0
viem: '>=2.0.0'
x402: ^0.7.1
+ zod: ^3.25.0 || ^4.0.0
peerDependenciesMeta:
+ '@ai-sdk/openai':
+ optional: true
+ '@ai-sdk/react':
+ optional: true
viem:
optional: true
x402:
optional: true
- ai@4.3.16:
- resolution: {integrity: sha512-KUDwlThJ5tr2Vw0A1ZkbDKNME3wzWhuVfAOwIvFUzl1TPVDFAXDFTXio3p+jaKneB+dKNCvFFlolYmmgHttG1g==}
- engines: {node: '>=18'}
- peerDependencies:
- react: ^18 || ^19 || ^19.0.0-rc
- zod: ^3.23.8
- peerDependenciesMeta:
- react:
- optional: true
-
- ai@5.0.89:
- resolution: {integrity: sha512-8Nq+ZojGacQrupoJEQLrTDzT5VtR3gyp5AaqFSV3tzsAXlYQ9Igb7QE3yeoEdzOk5IRfDwWL7mDCUD+oBg1hDA==}
+ ai@6.0.64:
+ resolution: {integrity: sha512-eH+6FC2THf0rXamPOBAPGNBuMvqv4gE5+IKoWVBrO1TkrjwRiuP3+P6yl7/HcXzN9JLbVe3lL8yaTIfn0ZwvxQ==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.25.76 || ^4.1.8
@@ -2716,9 +2736,9 @@ packages:
resolution: {integrity: sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==}
engines: {node: '>=10'}
- cron-schedule@5.0.4:
- resolution: {integrity: sha512-nH0a49E/kSVk6BeFgKZy4uUsy6D2A16p120h5bYD9ILBhQu7o2sJFH+WI4R731TSBQ0dB1Ik7inB/dRAB4C8QQ==}
- engines: {node: '>=18'}
+ cron-schedule@6.0.0:
+ resolution: {integrity: sha512-BoZaseYGXOo5j5HUwTaegIog3JJbuH4BbrY9A1ArLjXpy+RWb3mV28F/9Gv1dDA7E2L8kngWva4NWisnLTyfgQ==}
+ engines: {node: '>=20'}
cross-spawn@7.0.6:
resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==}
@@ -2790,9 +2810,6 @@ packages:
devlop@1.1.0:
resolution: {integrity: sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==}
- diff-match-patch@1.0.5:
- resolution: {integrity: sha512-IayShXAgj/QMXgB0IWmKx+rOPuGMhqm5w6jvFxmVenXKIzRqTAAsbBPT3kWQeGANj3jGgvcvv4yK6SxqYmikgw==}
-
diff@8.0.2:
resolution: {integrity: sha512-sSuxWU5j5SR9QQji/o2qMvqRNYRDOcBTgsJ/DeCf4iSN4gW+gNMXM7wFIP+fdXZxoNiAnHUTGjCr+TSWXdRDKg==}
engines: {node: '>=0.3.1'}
@@ -3285,6 +3302,9 @@ packages:
resolution: {integrity: sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A==}
hasBin: true
+ jose@6.1.3:
+ resolution: {integrity: sha512-0TpaTfihd4QMNwrz/ob2Bp7X04yuxJkjRGi4aKmOqwhov54i6u79oCv7T+C7lo70MKH6BesI3vscD1yb/yzKXQ==}
+
js-base64@3.7.8:
resolution: {integrity: sha512-hNngCeKxIUQiEUN3GPJOkz4wF/YvdUdbNL9hsBcMQTkKzboD7T/q3OYOuuPZLUE6dBxSGpwhk5mwuDud7JVAow==}
@@ -3331,6 +3351,9 @@ packages:
json-schema-traverse@1.0.0:
resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==}
+ json-schema-typed@8.0.2:
+ resolution: {integrity: sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==}
+
json-schema@0.4.0:
resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==}
@@ -3339,11 +3362,6 @@ packages:
engines: {node: '>=6'}
hasBin: true
- jsondiffpatch@0.6.0:
- resolution: {integrity: sha512-3QItJOXp2AP1uv7waBkao5nCvhEv+QmJAd38Ybq7wNI74Q+BBmnLn4EDKz6yI9xGAIQoUF87qHt+kc1IVxB4zQ==}
- engines: {node: ^18.0.0 || >=20.0.0}
- hasBin: true
-
jsonfile@6.1.0:
resolution: {integrity: sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==}
@@ -3877,13 +3895,13 @@ packages:
resolution: {integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==}
engines: {node: '>= 0.8'}
- partyserver@0.0.75:
- resolution: {integrity: sha512-i/18vvdxuGjx+rpQ+fDdExlvQoRb7EfTF+6b+kA2ILEpHemtpLWV8NdgDrOPEklRNdCc/4WlzDtYn05d17aZAQ==}
+ partyserver@0.1.1:
+ resolution: {integrity: sha512-g23st+jmE9UgfmDW/EYvUMsyEXqdtRdsOGFkDkWopxYeY6IrW7EbGJ63Nxz8K27kWY4fbr/WWFuY+ocfa/TiDA==}
peerDependencies:
'@cloudflare/workers-types': ^4.20240729.0
- partysocket@1.1.6:
- resolution: {integrity: sha512-LkEk8N9hMDDsDT0iDK0zuwUDFVrVMUXFXCeN3850Ng8wtjPqPBeJlwdeY6ROlJSEh3tPoTTasXoSBYH76y118w==}
+ partysocket@1.1.10:
+ resolution: {integrity: sha512-ACfn0P6lQuj8/AqB4L5ZDFcIEbpnIteNNObrlxqV1Ge80GTGhjuJ2sNKwNQlFzhGi4kI7fP/C1Eqh8TR78HjDQ==}
path-exists@4.0.0:
resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==}
@@ -4196,9 +4214,6 @@ packages:
scheduler@0.26.0:
resolution: {integrity: sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA==}
- secure-json-parse@2.7.0:
- resolution: {integrity: sha512-6aU+Rwsezw7VR8/nyvKTx8QpWH9FrcYiXXlqC4z5d5XQBDRqtbfsRjnwGyqbi3gddNtWHuEk9OANUotL26qKUw==}
-
semver@6.3.1:
resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==}
hasBin: true
@@ -4915,69 +4930,52 @@ packages:
snapshots:
- '@ai-sdk/anthropic@1.2.12(zod@3.25.76)':
- dependencies:
- '@ai-sdk/provider': 1.1.3
- '@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
- zod: 3.25.76
-
- '@ai-sdk/gateway@2.0.7(zod@3.25.76)':
+ '@ai-sdk/anthropic@3.0.33(zod@3.25.76)':
dependencies:
- '@ai-sdk/provider': 2.0.0
- '@ai-sdk/provider-utils': 3.0.16(zod@3.25.76)
- '@vercel/oidc': 3.0.3
+ '@ai-sdk/provider': 3.0.6
+ '@ai-sdk/provider-utils': 4.0.11(zod@3.25.76)
zod: 3.25.76
- '@ai-sdk/openai@1.3.22(zod@3.25.76)':
+ '@ai-sdk/gateway@3.0.29(zod@3.25.76)':
dependencies:
- '@ai-sdk/provider': 1.1.3
- '@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
+ '@ai-sdk/provider': 3.0.6
+ '@ai-sdk/provider-utils': 4.0.11(zod@3.25.76)
+ '@vercel/oidc': 3.1.0
zod: 3.25.76
- '@ai-sdk/openai@2.0.64(zod@3.25.76)':
+ '@ai-sdk/mcp@1.0.16(zod@3.25.76)':
dependencies:
- '@ai-sdk/provider': 2.0.0
- '@ai-sdk/provider-utils': 3.0.16(zod@3.25.76)
+ '@ai-sdk/provider': 3.0.6
+ '@ai-sdk/provider-utils': 4.0.11(zod@3.25.76)
+ pkce-challenge: 5.0.0
zod: 3.25.76
- '@ai-sdk/provider-utils@2.2.8(zod@3.25.76)':
+ '@ai-sdk/openai@3.0.23(zod@3.25.76)':
dependencies:
- '@ai-sdk/provider': 1.1.3
- nanoid: 3.3.11
- secure-json-parse: 2.7.0
+ '@ai-sdk/provider': 3.0.6
+ '@ai-sdk/provider-utils': 4.0.11(zod@3.25.76)
zod: 3.25.76
- '@ai-sdk/provider-utils@3.0.16(zod@3.25.76)':
+ '@ai-sdk/provider-utils@4.0.11(zod@3.25.76)':
dependencies:
- '@ai-sdk/provider': 2.0.0
- '@standard-schema/spec': 1.0.0
+ '@ai-sdk/provider': 3.0.6
+ '@standard-schema/spec': 1.1.0
eventsource-parser: 3.0.6
zod: 3.25.76
- '@ai-sdk/provider@1.1.3':
- dependencies:
- json-schema: 0.4.0
-
- '@ai-sdk/provider@2.0.0':
+ '@ai-sdk/provider@3.0.6':
dependencies:
json-schema: 0.4.0
- '@ai-sdk/react@1.2.12(react@19.1.0)(zod@3.25.76)':
+ '@ai-sdk/react@3.0.66(react@19.1.0)(zod@3.25.76)':
dependencies:
- '@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
- '@ai-sdk/ui-utils': 1.2.11(zod@3.25.76)
+ '@ai-sdk/provider-utils': 4.0.11(zod@3.25.76)
+ ai: 6.0.64(zod@3.25.76)
react: 19.1.0
swr: 2.3.4(react@19.1.0)
throttleit: 2.1.0
- optionalDependencies:
- zod: 3.25.76
-
- '@ai-sdk/ui-utils@1.2.11(zod@3.25.76)':
- dependencies:
- '@ai-sdk/provider': 1.1.3
- '@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
- zod: 3.25.76
- zod-to-json-schema: 3.25.0(zod@3.25.76)
+ transitivePeerDependencies:
+ - zod
'@ampproject/remapping@2.3.0':
dependencies:
@@ -5185,6 +5183,22 @@ snapshots:
picocolors: 1.1.1
sisteransi: 1.0.5
+ '@cloudflare/ai-chat@0.0.4(agents@0.3.6)(ai@6.0.64(zod@3.25.76))(react@19.1.0)(zod@3.25.76)':
+ dependencies:
+ agents: 0.3.6(@ai-sdk/openai@3.0.23(zod@3.25.76))(@ai-sdk/react@3.0.66(react@19.1.0)(zod@3.25.76))(@cloudflare/ai-chat@0.0.4)(@cloudflare/codemode@0.0.5)(@cloudflare/workers-types@4.20251014.0)(ai@6.0.64(zod@3.25.76))(hono@4.11.4)(react@19.1.0)(zod@3.25.76)
+ ai: 6.0.64(zod@3.25.76)
+ react: 19.1.0
+ zod: 3.25.76
+
+ '@cloudflare/codemode@0.0.5(agents@0.3.6)(ai@6.0.64(zod@3.25.76))(typescript@5.8.3)(zod@3.25.76)':
+ dependencies:
+ agents: 0.3.6(@ai-sdk/openai@3.0.23(zod@3.25.76))(@ai-sdk/react@3.0.66(react@19.1.0)(zod@3.25.76))(@cloudflare/ai-chat@0.0.4)(@cloudflare/codemode@0.0.5)(@cloudflare/workers-types@4.20251014.0)(ai@6.0.64(zod@3.25.76))(hono@4.11.4)(react@19.1.0)(zod@3.25.76)
+ ai: 6.0.64(zod@3.25.76)
+ zod: 3.25.76
+ zod-to-ts: 2.0.0(typescript@5.8.3)(zod@3.25.76)
+ transitivePeerDependencies:
+ - typescript
+
'@cloudflare/kv-asset-handler@0.4.0':
dependencies:
mime: 3.0.0
@@ -5457,6 +5471,10 @@ snapshots:
'@esbuild/win32-x64@0.25.5':
optional: true
+ '@hono/node-server@1.19.9(hono@4.11.4)':
+ dependencies:
+ hono: 4.11.4
+
'@img/sharp-darwin-arm64@0.33.5':
optionalDependencies:
'@img/sharp-libvips-darwin-arm64': 1.0.4
@@ -5632,8 +5650,9 @@ snapshots:
'@logtape/logtape': 1.1.1
'@sentry/core': 9.34.0
- '@modelcontextprotocol/sdk@1.22.0(@cfworker/json-schema@4.1.1)':
+ '@modelcontextprotocol/sdk@1.25.3(@cfworker/json-schema@4.1.1)(hono@4.11.4)(zod@3.25.76)':
dependencies:
+ '@hono/node-server': 1.19.9(hono@4.11.4)
ajv: 8.17.1
ajv-formats: 3.0.1(ajv@8.17.1)
content-type: 1.0.5
@@ -5643,6 +5662,8 @@ snapshots:
eventsource-parser: 3.0.6
express: 5.1.0
express-rate-limit: 7.5.1(express@5.1.0)
+ jose: 6.1.3
+ json-schema-typed: 8.0.2
pkce-challenge: 5.0.0
raw-body: 3.0.1
zod: 3.25.76
@@ -5650,6 +5671,7 @@ snapshots:
optionalDependencies:
'@cfworker/json-schema': 4.1.1
transitivePeerDependencies:
+ - hono
- supports-color
'@mswjs/interceptors@0.39.2':
@@ -6368,7 +6390,7 @@ snapshots:
'@speed-highlight/core@1.2.7': {}
- '@standard-schema/spec@1.0.0': {}
+ '@standard-schema/spec@1.1.0': {}
'@tailwindcss/node@4.1.11':
dependencies:
@@ -6491,8 +6513,6 @@ snapshots:
'@types/deep-eql@4.0.2': {}
- '@types/diff-match-patch@1.0.36': {}
-
'@types/estree-jsx@1.0.5':
dependencies:
'@types/estree': 1.0.8
@@ -6572,7 +6592,7 @@ snapshots:
'@ungap/structured-clone@1.3.0': {}
- '@vercel/oidc@3.0.3': {}
+ '@vercel/oidc@3.1.0': {}
'@vitejs/plugin-react@4.6.0(vite@6.3.5(@types/node@24.0.10)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.20.3)(yaml@2.8.0))':
dependencies:
@@ -6678,45 +6698,36 @@ snapshots:
transitivePeerDependencies:
- supports-color
- agents@0.2.23(@cloudflare/workers-types@4.20251014.0)(react@19.1.0)(typescript@5.8.3):
+ agents@0.3.6(@ai-sdk/openai@3.0.23(zod@3.25.76))(@ai-sdk/react@3.0.66(react@19.1.0)(zod@3.25.76))(@cloudflare/ai-chat@0.0.4)(@cloudflare/codemode@0.0.5)(@cloudflare/workers-types@4.20251014.0)(ai@6.0.64(zod@3.25.76))(hono@4.11.4)(react@19.1.0)(zod@3.25.76):
dependencies:
- '@ai-sdk/openai': 2.0.64(zod@3.25.76)
'@cfworker/json-schema': 4.1.1
- '@modelcontextprotocol/sdk': 1.22.0(@cfworker/json-schema@4.1.1)
- ai: 5.0.89(zod@3.25.76)
- cron-schedule: 5.0.4
+ '@cloudflare/ai-chat': 0.0.4(agents@0.3.6)(ai@6.0.64(zod@3.25.76))(react@19.1.0)(zod@3.25.76)
+ '@cloudflare/codemode': 0.0.5(agents@0.3.6)(ai@6.0.64(zod@3.25.76))(typescript@5.8.3)(zod@3.25.76)
+ '@modelcontextprotocol/sdk': 1.25.3(@cfworker/json-schema@4.1.1)(hono@4.11.4)(zod@3.25.76)
+ ai: 6.0.64(zod@3.25.76)
+ cron-schedule: 6.0.0
json-schema: 0.4.0
json-schema-to-typescript: 15.0.4
mimetext: 3.0.27
nanoid: 5.1.6
- partyserver: 0.0.75(@cloudflare/workers-types@4.20251014.0)
- partysocket: 1.1.6
+ partyserver: 0.1.1(@cloudflare/workers-types@4.20251014.0)
+ partysocket: 1.1.10
react: 19.1.0
yargs: 18.0.0
zod: 3.25.76
- zod-to-ts: 2.0.0(typescript@5.8.3)(zod@3.25.76)
+ optionalDependencies:
+ '@ai-sdk/openai': 3.0.23(zod@3.25.76)
+ '@ai-sdk/react': 3.0.66(react@19.1.0)(zod@3.25.76)
transitivePeerDependencies:
- '@cloudflare/workers-types'
+ - hono
- supports-color
- - typescript
-
- ai@4.3.16(react@19.1.0)(zod@3.25.76):
- dependencies:
- '@ai-sdk/provider': 1.1.3
- '@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
- '@ai-sdk/react': 1.2.12(react@19.1.0)(zod@3.25.76)
- '@ai-sdk/ui-utils': 1.2.11(zod@3.25.76)
- '@opentelemetry/api': 1.9.0
- jsondiffpatch: 0.6.0
- zod: 3.25.76
- optionalDependencies:
- react: 19.1.0
- ai@5.0.89(zod@3.25.76):
+ ai@6.0.64(zod@3.25.76):
dependencies:
- '@ai-sdk/gateway': 2.0.7(zod@3.25.76)
- '@ai-sdk/provider': 2.0.0
- '@ai-sdk/provider-utils': 3.0.16(zod@3.25.76)
+ '@ai-sdk/gateway': 3.0.29(zod@3.25.76)
+ '@ai-sdk/provider': 3.0.6
+ '@ai-sdk/provider-utils': 4.0.11(zod@3.25.76)
'@opentelemetry/api': 1.9.0
zod: 3.25.76
@@ -7014,7 +7025,7 @@ snapshots:
path-type: 4.0.0
yaml: 1.10.2
- cron-schedule@5.0.4: {}
+ cron-schedule@6.0.0: {}
cross-spawn@7.0.6:
dependencies:
@@ -7065,8 +7076,6 @@ snapshots:
dependencies:
dequal: 2.0.3
- diff-match-patch@1.0.5: {}
-
diff@8.0.2: {}
dotenv-cli@8.0.0:
@@ -7580,6 +7589,8 @@ snapshots:
jiti@2.4.2: {}
+ jose@6.1.3: {}
+
js-base64@3.7.8: {}
js-tokens@4.0.0: {}
@@ -7640,16 +7651,12 @@ snapshots:
json-schema-traverse@1.0.0: {}
+ json-schema-typed@8.0.2: {}
+
json-schema@0.4.0: {}
json5@2.2.3: {}
- jsondiffpatch@0.6.0:
- dependencies:
- '@types/diff-match-patch': 1.0.36
- chalk: 5.4.1
- diff-match-patch: 1.0.5
-
jsonfile@6.1.0:
dependencies:
universalify: 2.0.1
@@ -8400,12 +8407,12 @@ snapshots:
parseurl@1.3.3: {}
- partyserver@0.0.75(@cloudflare/workers-types@4.20251014.0):
+ partyserver@0.1.1(@cloudflare/workers-types@4.20251014.0):
dependencies:
'@cloudflare/workers-types': 4.20251014.0
nanoid: 5.1.6
- partysocket@1.1.6:
+ partysocket@1.1.10:
dependencies:
event-target-polyfill: 0.0.4
@@ -8772,8 +8779,6 @@ snapshots:
scheduler@0.26.0: {}
- secure-json-parse@2.7.0: {}
-
semver@6.3.1: {}
semver@7.7.2: {}
@@ -9478,10 +9483,10 @@ snapshots:
'@cloudflare/workerd-linux-arm64': 1.20251011.0
'@cloudflare/workerd-windows-64': 1.20251011.0
- workers-mcp@0.1.0-3(@cfworker/json-schema@4.1.1):
+ workers-mcp@0.1.0-3(@cfworker/json-schema@4.1.1)(zod@3.25.76):
dependencies:
'@clack/prompts': 0.8.2
- '@modelcontextprotocol/sdk': 1.22.0(@cfworker/json-schema@4.1.1)
+ '@modelcontextprotocol/sdk': 1.25.3(@cfworker/json-schema@4.1.1)(hono@4.11.4)(zod@3.25.76)
'@silvia-odwyer/photon-node': 0.3.4
chalk: 5.4.1
fs-extra: 11.3.0
@@ -9497,6 +9502,7 @@ snapshots:
- '@75lb/nature'
- '@cfworker/json-schema'
- supports-color
+ - zod
wrangler@4.45.0(@cloudflare/workers-types@4.20251014.0):
dependencies:
diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml
index 40107daf..4b684246 100644
--- a/pnpm-workspace.yaml
+++ b/pnpm-workspace.yaml
@@ -2,15 +2,16 @@ packages:
- packages/*
catalog:
- "@ai-sdk/anthropic": ^1.2.12
- "@ai-sdk/openai": ^1.3.22
- "@ai-sdk/react": ^1.2.12
+ "@ai-sdk/anthropic": ^3.0.33
+ "@ai-sdk/mcp": ^1.0.16
+ "@ai-sdk/openai": ^3.0.23
+ "@ai-sdk/react": ^3.0.66
"@biomejs/biome": ^1.9.4
"@cloudflare/vite-plugin": ^1.13.15
"@cloudflare/vitest-pool-workers": ^0.8.47
"@cloudflare/workers-oauth-provider": ^0.0.12
"@cloudflare/workers-types": ^4.20251014.0
- "@modelcontextprotocol/sdk": ^1.21.0
+ "@modelcontextprotocol/sdk": ^1.25.3
"@radix-ui/react-accordion": ^1.2.11
"@radix-ui/react-slot": ^1.2.3
"@sentry/cloudflare": 10.35.0
@@ -25,8 +26,8 @@ catalog:
"@types/react-dom": ^19.1.6
"@vitejs/plugin-react": ^4.6.0
"@vitest/coverage-v8": ^3.2.4
- agents: ^0.2.23
- ai: ^4.3.16
+ agents: ^0.3.6
+ ai: ^6.0.64
better-sqlite3: ^11.10.0
chalk: ^5.4.1
class-variance-authority: ^0.7.1