-
Chat Model
+
+
{title}
+ {description && (
+
{description}
+ )}
+
{requiresApiKey && (
@@ -254,14 +269,14 @@ export function ChatModelWizard({
value={provider}
onChange={(event) => {
const nextProvider = event.target.value;
- updateSettings("chatModel.provider", nextProvider);
- updateSettings("chatModel.model", "");
+ updateSettings(`${settingsKey}.provider`, nextProvider);
+ updateSettings(`${settingsKey}.model`, "");
if (nextProvider === "ollama") {
- updateSettings("chatModel.baseUrl", "http://localhost:11434/v1");
- updateSettings("chatModel.apiKey", "");
+ updateSettings(`${settingsKey}.baseUrl`, "http://localhost:11434/v1");
+ updateSettings(`${settingsKey}.apiKey`, "");
} else {
- updateSettings("chatModel.baseUrl", "");
+ updateSettings(`${settingsKey}.baseUrl`, "");
}
}}
className="w-full rounded-md border bg-background px-3 py-2 text-sm"
@@ -287,7 +302,7 @@ export function ChatModelWizard({
updateSettings("chatModel.apiKey", event.target.value)}
+ onChange={(event) => updateSettings(`${settingsKey}.apiKey`, event.target.value)}
placeholder={
providerConfig?.envKey
? `Enter key or set ${providerConfig.envKey} in .env`
@@ -323,8 +338,8 @@ export function ChatModelWizard({
Base URL
updateSettings("chatModel.baseUrl", event.target.value)}
+ value={modelConfig.baseUrl || ""}
+ onChange={(event) => updateSettings(`${settingsKey}.baseUrl`, event.target.value)}
placeholder={
provider === "ollama"
? "http://localhost:11434/v1"
@@ -349,7 +364,7 @@ export function ChatModelWizard({
loading={loading}
error={error}
disabled={!hasApiKey}
- onChange={(value) => updateSettings("chatModel.model", value)}
+ onChange={(value) => updateSettings(`${settingsKey}.model`, value)}
placeholder="Select model..."
/>
@@ -367,9 +382,9 @@ export function ChatModelWizard({
step="0.1"
min="0"
max="2"
- value={settings.chatModel.temperature || 0.7}
+ value={modelConfig.temperature || 0.7}
onChange={(event) =>
- updateSettings("chatModel.temperature", parseFloat(event.target.value))
+ updateSettings(`${settingsKey}.temperature`, parseFloat(event.target.value))
}
disabled={!model}
className="max-w-[120px]"
@@ -379,6 +394,64 @@ export function ChatModelWizard({
);
}
+// ---------------------------------------------------------------------------
+// Exported wizards
+// ---------------------------------------------------------------------------
+
+export function ChatModelWizard({
+ settings,
+ updateSettings,
+}: {
+ settings: AppSettings;
+ updateSettings: UpdateSettingsFn;
+}) {
+ return (
+
+ );
+}
+
+export function UtilityModelWizard({
+ settings,
+ updateSettings,
+}: {
+ settings: AppSettings;
+ updateSettings: UpdateSettingsFn;
+}) {
+ return (
+
+ );
+}
+
+export function MultimediaModelWizard({
+ settings,
+ updateSettings,
+}: {
+ settings: AppSettings;
+ updateSettings: UpdateSettingsFn;
+}) {
+ return (
+
+ );
+}
+
export function EmbeddingsModelWizard({
settings,
updateSettings,
@@ -458,7 +531,10 @@ export function EmbeddingsModelWizard({
return (
-
Embeddings Model
+
+
Embeddings Model
+
Model for vector embeddings (memory & RAG)
+
{requiresApiKey && (
diff --git a/src/lib/storage/settings-store.ts b/src/lib/storage/settings-store.ts
index 5866489..a07bf60 100644
--- a/src/lib/storage/settings-store.ts
+++ b/src/lib/storage/settings-store.ts
@@ -16,20 +16,26 @@ async function ensureDir(dir: string) {
export const DEFAULT_SETTINGS: AppSettings = {
chatModel: {
- provider: "openai",
- model: "gpt-4o",
+ provider: "openrouter",
+ model: "anthropic/claude-opus-4-6",
temperature: 0.7,
maxTokens: 4096,
},
utilityModel: {
- provider: "openai",
- model: "gpt-4o-mini",
+ provider: "openrouter",
+ model: "anthropic/claude-sonnet-4-6",
temperature: 0.3,
maxTokens: 2048,
},
+ multimediaModel: {
+ provider: "openrouter",
+ model: "google/gemini-2.5-pro-preview-05-06",
+ temperature: 0.5,
+ maxTokens: 4096,
+ },
embeddingsModel: {
- provider: "openai",
- model: "text-embedding-3-small",
+ provider: "openrouter",
+ model: "openai/text-embedding-3-small",
dimensions: 1536,
},
codeExecution: {
diff --git a/src/lib/types.ts b/src/lib/types.ts
index efbced2..69c8d67 100644
--- a/src/lib/types.ts
+++ b/src/lib/types.ts
@@ -16,6 +16,7 @@ export interface ModelConfig {
export interface AppSettings {
chatModel: ModelConfig;
utilityModel: ModelConfig;
+ multimediaModel: ModelConfig;
embeddingsModel: {
provider: "openai" | "openrouter" | "google" | "ollama" | "custom" | "mock";
model: string;
@@ -192,6 +193,7 @@ export interface KnowledgeFile {
export interface AgentConfig {
chatModel: ModelConfig;
utilityModel: ModelConfig;
+ multimediaModel: ModelConfig;
embeddingsModel: AppSettings["embeddingsModel"];
memorySubdir: string;
knowledgeSubdirs: string[];
From 764997b461cd3885b132b0fa70654f5b805a04cf Mon Sep 17 00:00:00 2001
From: Claude
Date: Fri, 27 Feb 2026 19:13:21 +0000
Subject: [PATCH 16/40] feat: switch default embedding model to
qwen3-embedding-8b
13x cheaper than OpenAI, #1 on MTEB multilingual, 32K context.
Using 1536 dims via MRL for storage/quality balance.
Also add Qwen3 and Gemini embedding models to known dimensions map.
https://claude.ai/code/session_01XjFNsYgKjJrENstcSbcoVT
---
src/components/settings/model-wizards.tsx | 4 ++++
src/lib/storage/settings-store.ts | 2 +-
2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/src/components/settings/model-wizards.tsx b/src/components/settings/model-wizards.tsx
index 9b23716..be57a7a 100644
--- a/src/components/settings/model-wizards.tsx
+++ b/src/components/settings/model-wizards.tsx
@@ -526,6 +526,10 @@ export function EmbeddingsModelWizard({
"gte-large": 1024,
"gte-base": 768,
"mpnet-base": 768,
+ "qwen3-embedding-8b": 4096,
+ "qwen3-embedding-4b": 2048,
+ "qwen3-embedding-0.6b": 1024,
+ "gemini-embedding": 3072,
};
return (
diff --git a/src/lib/storage/settings-store.ts b/src/lib/storage/settings-store.ts
index a07bf60..f4f168d 100644
--- a/src/lib/storage/settings-store.ts
+++ b/src/lib/storage/settings-store.ts
@@ -35,7 +35,7 @@ export const DEFAULT_SETTINGS: AppSettings = {
},
embeddingsModel: {
provider: "openrouter",
- model: "openai/text-embedding-3-small",
+ model: "qwen/qwen3-embedding-8b",
dimensions: 1536,
},
codeExecution: {
From 44f3edad718971149a62f3eadcaf0f28a50f3426 Mon Sep 17 00:00:00 2001
From: Claude
Date: Fri, 27 Feb 2026 19:21:31 +0000
Subject: [PATCH 17/40] feat: update multimedia model to gemini-3.1-pro-preview
Google's latest SOTA reasoning model with multimodal capabilities.
1M context, audio/image/video input support.
https://claude.ai/code/session_01XjFNsYgKjJrENstcSbcoVT
---
src/lib/storage/settings-store.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/lib/storage/settings-store.ts b/src/lib/storage/settings-store.ts
index f4f168d..24b3645 100644
--- a/src/lib/storage/settings-store.ts
+++ b/src/lib/storage/settings-store.ts
@@ -29,7 +29,7 @@ export const DEFAULT_SETTINGS: AppSettings = {
},
multimediaModel: {
provider: "openrouter",
- model: "google/gemini-2.5-pro-preview-05-06",
+ model: "google/gemini-3.1-pro-preview",
temperature: 0.5,
maxTokens: 4096,
},
From a3eca282b240e012a593685072eaa7ef88480102 Mon Sep 17 00:00:00 2001
From: Claude
Date: Fri, 27 Feb 2026 19:28:58 +0000
Subject: [PATCH 18/40] =?UTF-8?q?feat:=20swap=20chat/utility=20models=20?=
=?UTF-8?q?=E2=80=94=20Sonnet=20as=20main,=20Opus=20for=20heavy=20tasks?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
- chatModel: claude-sonnet-4-6 (everyday conversations, ~5x cheaper)
- utilityModel: claude-opus-4-6 (complex reasoning, coding, 8K output)
https://claude.ai/code/session_01XjFNsYgKjJrENstcSbcoVT
---
src/lib/storage/settings-store.ts | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/lib/storage/settings-store.ts b/src/lib/storage/settings-store.ts
index 24b3645..59c93f9 100644
--- a/src/lib/storage/settings-store.ts
+++ b/src/lib/storage/settings-store.ts
@@ -17,15 +17,15 @@ async function ensureDir(dir: string) {
export const DEFAULT_SETTINGS: AppSettings = {
chatModel: {
provider: "openrouter",
- model: "anthropic/claude-opus-4-6",
+ model: "anthropic/claude-sonnet-4-6",
temperature: 0.7,
maxTokens: 4096,
},
utilityModel: {
provider: "openrouter",
- model: "anthropic/claude-sonnet-4-6",
+ model: "anthropic/claude-opus-4-6",
temperature: 0.3,
- maxTokens: 2048,
+ maxTokens: 8192,
},
multimediaModel: {
provider: "openrouter",
From ae00e6a1d5f73ca0525dba739beea759fce38c1b Mon Sep 17 00:00:00 2001
From: Claude
Date: Fri, 27 Feb 2026 20:06:14 +0000
Subject: [PATCH 19/40] =?UTF-8?q?feat:=20wire=20multi-model=20routing=20?=
=?UTF-8?q?=E2=80=94=20Opus=20for=20subordinate=20agents,=20Gemini=20for?=
=?UTF-8?q?=20images?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
- Subordinate agents now use utilityModel (Opus) instead of chatModel
- Image attachments auto-route to multimediaModel (Gemini) with vision
- Added multimodal message building (base64 images via AI SDK ImagePart)
- Chat API, external handler, and Telegram all forward attachments
- Regular text messages continue using chatModel (Sonnet) as before
https://claude.ai/code/session_01XjFNsYgKjJrENstcSbcoVT
---
src/app/api/chat/route.ts | 3 +-
src/app/api/integrations/telegram/route.ts | 19 ++++-
src/lib/agent/agent.ts | 91 ++++++++++++++++-----
src/lib/external/handle-external-message.ts | 4 +-
4 files changed, 92 insertions(+), 25 deletions(-)
diff --git a/src/app/api/chat/route.ts b/src/app/api/chat/route.ts
index 839a55a..7bfd5a2 100644
--- a/src/app/api/chat/route.ts
+++ b/src/app/api/chat/route.ts
@@ -15,7 +15,7 @@ export async function POST(req: NextRequest) {
try {
await ensureCronSchedulerStarted();
const body = await req.json();
- const { chatId, projectId, currentPath } = body;
+ const { chatId, projectId, currentPath, attachments } = body;
let message: string | undefined = body.message;
// Support AI SDK's DefaultChatTransport format which sends a `messages` array
@@ -82,6 +82,7 @@ export async function POST(req: NextRequest) {
userMessage: message,
projectId,
currentPath: typeof currentPath === "string" ? currentPath : undefined,
+ attachments: Array.isArray(attachments) ? attachments : undefined,
});
// Record usage stats (fire-and-forget, don't block the response)
diff --git a/src/app/api/integrations/telegram/route.ts b/src/app/api/integrations/telegram/route.ts
index 3de1276..275b965 100644
--- a/src/app/api/integrations/telegram/route.ts
+++ b/src/app/api/integrations/telegram/route.ts
@@ -663,6 +663,7 @@ export async function POST(req: NextRequest) {
name: string;
path: string;
size: number;
+ type: string;
}
| null = null;
@@ -684,6 +685,7 @@ export async function POST(req: NextRequest) {
name: saved.name,
path: saved.path,
size: saved.size,
+ type: saved.type,
};
}
@@ -736,10 +738,24 @@ export async function POST(req: NextRequest) {
}
try {
+ // Build image attachments for vision model routing
+ const imageAttachments =
+ incomingSavedFile && incomingSavedFile.type.startsWith("image/")
+ ? [
+ {
+ name: incomingSavedFile.name,
+ type: incomingSavedFile.type,
+ path: incomingSavedFile.path,
+ },
+ ]
+ : undefined;
+
const result = await handleExternalMessage({
sessionId,
message: incomingSavedFile
- ? `${incomingText}\n\nAttached file: ${incomingSavedFile.name}`
+ ? imageAttachments
+ ? incomingText || "Describe this image."
+ : `${incomingText}\n\nAttached file: ${incomingSavedFile.name}`
: incomingText,
projectId: externalContext?.projectId ?? defaultProjectId,
chatId: externalContext?.chatId,
@@ -751,6 +767,7 @@ export async function POST(req: NextRequest) {
replyToMessageId: messageId ?? null,
},
},
+ attachments: imageAttachments,
});
// Record usage stats for linked app user
diff --git a/src/lib/agent/agent.ts b/src/lib/agent/agent.ts
index 41af8fd..a8b0fbc 100644
--- a/src/lib/agent/agent.ts
+++ b/src/lib/agent/agent.ts
@@ -3,6 +3,7 @@ import {
generateText,
stepCountIs,
type ModelMessage,
+ type UserContent,
type ToolExecutionOptions,
type ToolSet,
} from "ai";
@@ -13,8 +14,9 @@ import { getChat, saveChat } from "@/lib/storage/chat-store";
import { createAgentTools } from "@/lib/tools/tool";
import { getProjectMcpTools } from "@/lib/mcp/client";
import type { AgentContext } from "@/lib/agent/types";
-import type { ChatMessage } from "@/lib/types";
+import type { Attachment, ChatMessage } from "@/lib/types";
import { publishUiSyncEvent } from "@/lib/realtime/event-bus";
+import fs from "fs/promises";
const LLM_LOG_BORDER = "═".repeat(60);
@@ -283,6 +285,41 @@ function convertModelMessageToChatMessages(msg: ModelMessage, now: string): Chat
}];
}
+/**
+ * Check whether the given attachments include any images.
+ */
+function hasImages(attachments?: Attachment[]): boolean {
+ return !!attachments?.some((a) => a.type.startsWith("image/"));
+}
+
+/**
+ * Build a multimodal user message content array from text + image attachments.
+ * Falls back to a plain string when there are no image attachments.
+ */
+async function buildUserContent(
+ text: string,
+ attachments?: Attachment[]
+): Promise {
+ if (!hasImages(attachments)) {
+ return text;
+ }
+
+ const parts: UserContent = [{ type: "text", text }];
+
+ for (const att of attachments!) {
+ if (att.type.startsWith("image/") && att.path) {
+ const imageData = await fs.readFile(att.path);
+ parts.push({
+ type: "image",
+ image: imageData,
+ mediaType: att.type,
+ });
+ }
+ }
+
+ return parts;
+}
+
function logLLMRequest(options: {
model: string;
system: string;
@@ -325,9 +362,13 @@ export async function runAgent(options: {
projectId?: string;
currentPath?: string;
agentNumber?: number;
+ attachments?: Attachment[];
}) {
const settings = await getSettings();
- const model = createModel(settings.chatModel);
+ const modelConfig = hasImages(options.attachments)
+ ? settings.multimediaModel
+ : settings.chatModel;
+ const model = createModel(modelConfig);
// Build context
const context: AgentContext = {
@@ -376,19 +417,20 @@ export async function runAgent(options: {
tools: toolNames,
});
- // Append user message to history
+ // Append user message to history (multimodal if image attachments present)
+ const userContent = await buildUserContent(options.userMessage, options.attachments);
const messages: ModelMessage[] = [
...context.history,
- { role: "user", content: options.userMessage },
+ { role: "user", content: userContent },
];
logLLMRequest({
- model: `${settings.chatModel.provider}/${settings.chatModel.model}`,
+ model: `${modelConfig.provider}/${modelConfig.model}`,
system: systemPrompt,
messages,
toolNames,
- temperature: settings.chatModel.temperature,
- maxTokens: settings.chatModel.maxTokens,
+ temperature: modelConfig.temperature,
+ maxTokens: modelConfig.maxTokens,
label: "LLM Request (stream)",
});
@@ -399,8 +441,8 @@ export async function runAgent(options: {
messages,
tools,
stopWhen: stepCountIs(15), // Allow up to 15 tool call rounds
- temperature: settings.chatModel.temperature ?? 0.7,
- maxOutputTokens: settings.chatModel.maxTokens ?? 4096,
+ temperature: modelConfig.temperature ?? 0.7,
+ maxOutputTokens: modelConfig.maxTokens ?? 4096,
onFinish: async (event) => {
if (mcpCleanup) {
try {
@@ -464,9 +506,13 @@ export async function runAgentText(options: {
currentPath?: string;
agentNumber?: number;
runtimeData?: Record;
+ attachments?: Attachment[];
}): Promise {
const settings = await getSettings();
- const model = createModel(settings.chatModel);
+ const modelConfig = hasImages(options.attachments)
+ ? settings.multimediaModel
+ : settings.chatModel;
+ const model = createModel(modelConfig);
const context: AgentContext = {
chatId: options.chatId,
@@ -507,18 +553,19 @@ export async function runAgentText(options: {
tools: toolNames,
});
+ const userContent = await buildUserContent(options.userMessage, options.attachments);
const messages: ModelMessage[] = [
...context.history,
- { role: "user", content: options.userMessage },
+ { role: "user", content: userContent },
];
logLLMRequest({
- model: `${settings.chatModel.provider}/${settings.chatModel.model}`,
+ model: `${modelConfig.provider}/${modelConfig.model}`,
system: systemPrompt,
messages,
toolNames,
- temperature: settings.chatModel.temperature,
- maxTokens: settings.chatModel.maxTokens,
+ temperature: modelConfig.temperature,
+ maxTokens: modelConfig.maxTokens,
label: "LLM Request (non-stream)",
});
@@ -529,8 +576,8 @@ export async function runAgentText(options: {
messages,
tools,
stopWhen: stepCountIs(15),
- temperature: settings.chatModel.temperature ?? 0.7,
- maxOutputTokens: settings.chatModel.maxTokens ?? 4096,
+ temperature: modelConfig.temperature ?? 0.7,
+ maxOutputTokens: modelConfig.maxTokens ?? 4096,
});
const text = generated.text ?? "";
@@ -598,7 +645,7 @@ export async function runSubordinateAgent(options: {
parentHistory: ModelMessage[];
}): Promise {
const settings = await getSettings();
- const model = createModel(settings.chatModel);
+ const model = createModel(settings.utilityModel);
const context: AgentContext = {
chatId: `subordinate-${Date.now()}`,
@@ -644,12 +691,12 @@ export async function runSubordinateAgent(options: {
];
logLLMRequest({
- model: `${settings.chatModel.provider}/${settings.chatModel.model}`,
+ model: `${settings.utilityModel.provider}/${settings.utilityModel.model}`,
system: systemPrompt,
messages,
toolNames,
- temperature: settings.chatModel.temperature,
- maxTokens: settings.chatModel.maxTokens,
+ temperature: settings.utilityModel.temperature,
+ maxTokens: settings.utilityModel.maxTokens,
label: "LLM Request (subordinate)",
});
@@ -660,8 +707,8 @@ export async function runSubordinateAgent(options: {
messages,
tools,
stopWhen: stepCountIs(10),
- temperature: settings.chatModel.temperature ?? 0.7,
- maxOutputTokens: settings.chatModel.maxTokens ?? 4096,
+ temperature: settings.utilityModel.temperature ?? 0.7,
+ maxOutputTokens: settings.utilityModel.maxTokens ?? 4096,
});
return text;
} finally {
diff --git a/src/lib/external/handle-external-message.ts b/src/lib/external/handle-external-message.ts
index 158c89a..a098d34 100644
--- a/src/lib/external/handle-external-message.ts
+++ b/src/lib/external/handle-external-message.ts
@@ -7,7 +7,7 @@ import {
saveExternalSession,
type ExternalSession,
} from "@/lib/storage/external-session-store";
-import type { ChatMessage } from "@/lib/types";
+import type { Attachment, ChatMessage } from "@/lib/types";
export interface HandleExternalMessageInput {
sessionId: string;
@@ -16,6 +16,7 @@ export interface HandleExternalMessageInput {
chatId?: string;
currentPath?: string;
runtimeData?: Record;
+ attachments?: Attachment[];
}
interface SwitchProjectSignal {
@@ -258,6 +259,7 @@ export async function handleExternalMessage(
projectId: resolvedProjectId,
currentPath: currentPath || undefined,
runtimeData: input.runtimeData,
+ attachments: input.attachments,
});
const afterChat = await getChat(resolvedChatId);
From 7ba250731f0926694d398f3f623c14b6649b45f0 Mon Sep 17 00:00:00 2001
From: Claude
Date: Sat, 28 Feb 2026 06:17:00 +0000
Subject: [PATCH 20/40] fix: send error messages to Telegram user for all
failure types
Previously, only ExternalMessageError exceptions were caught and
reported back to the Telegram user. All other errors (LLM API failures,
missing API keys, timeouts) were silently swallowed, causing the bot to
not respond at all. Now the catch-all block sends the error description
to the user in Telegram so they can diagnose the issue.
https://claude.ai/code/session_01XjFNsYgKjJrENstcSbcoVT
---
src/app/api/integrations/telegram/route.ts | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/src/app/api/integrations/telegram/route.ts b/src/app/api/integrations/telegram/route.ts
index 275b965..9be93c5 100644
--- a/src/app/api/integrations/telegram/route.ts
+++ b/src/app/api/integrations/telegram/route.ts
@@ -790,7 +790,23 @@ export async function POST(req: NextRequest) {
await sendTelegramMessage(botToken, chatId, `Ошибка: ${errorMessage}`, messageId);
return Response.json({ ok: true, handledError: true, status: error.status });
}
- throw error;
+
+ // Catch-all: send a user-visible error message for unexpected failures
+ // (e.g. LLM API errors, missing API keys, timeouts, etc.)
+ console.error("Telegram agent processing error:", error);
+ const fallbackMessage =
+ error instanceof Error ? error.message : "Внутренняя ошибка сервера.";
+ try {
+ await sendTelegramMessage(
+ botToken,
+ chatId,
+ `Ошибка обработки: ${fallbackMessage}`,
+ messageId
+ );
+ } catch (sendError) {
+ console.error("Failed to send error message to Telegram:", sendError);
+ }
+ return Response.json({ ok: true, handledError: true, internalError: true });
}
} catch (error) {
if (
From fa03ecd5c5ac517bf73adca74c109c30fc3e1fdb Mon Sep 17 00:00:00 2001
From: Claude
Date: Sat, 28 Feb 2026 06:33:37 +0000
Subject: [PATCH 21/40] fix: auto-register Telegram webhook on startup and
auto-allow first user
The Telegram bot was not responding because:
1. The webhook was never registered with Telegram API (setWebhook not called)
2. Empty allowedUserIds list blocked all users without any way to self-onboard
Changes:
- Add instrumentation.ts that auto-registers the Telegram webhook on app
startup when TELEGRAM_BOT_TOKEN, TELEGRAM_WEBHOOK_SECRET, and a base URL
are available. Also auto-detects base URL from deployment platforms
(Vercel, Railway, Render, Fly.io) when APP_BASE_URL is not set.
- Auto-allow the first user who messages the bot in a private chat when
the allowedUserIds list is completely empty (no security boundary exists
when nobody is configured).
https://claude.ai/code/session_01JuWqDF92mQPfXk7fVH4q3K
---
instrumentation.ts | 91 ++++++++++++++++++++++
src/app/api/integrations/telegram/route.ts | 64 ++++++++-------
2 files changed, 128 insertions(+), 27 deletions(-)
create mode 100644 instrumentation.ts
diff --git a/instrumentation.ts b/instrumentation.ts
new file mode 100644
index 0000000..c2a6f66
--- /dev/null
+++ b/instrumentation.ts
@@ -0,0 +1,91 @@
+export async function register() {
+ if (process.env.NEXT_RUNTIME !== "nodejs") return;
+
+ const botToken = (process.env.TELEGRAM_BOT_TOKEN ?? "").trim();
+ const webhookSecret = (process.env.TELEGRAM_WEBHOOK_SECRET ?? "").trim();
+
+ if (!botToken || !webhookSecret) return;
+
+ const baseUrl = inferBaseUrl();
+ if (!baseUrl) {
+ console.warn(
+ "[Telegram] Skipping auto-webhook: no APP_BASE_URL or deployment URL detected"
+ );
+ return;
+ }
+
+ const webhookUrl = `${baseUrl}/api/integrations/telegram`;
+
+ try {
+ const infoRes = await fetch(
+ `https://api.telegram.org/bot${botToken}/getWebhookInfo`
+ );
+ const info = (await infoRes.json()) as {
+ ok?: boolean;
+ result?: { url?: string };
+ };
+
+ if (info.ok && info.result?.url === webhookUrl) {
+ console.log("[Telegram] Webhook already registered:", webhookUrl);
+ return;
+ }
+
+ const res = await fetch(
+ `https://api.telegram.org/bot${botToken}/setWebhook`,
+ {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ url: webhookUrl,
+ secret_token: webhookSecret,
+ drop_pending_updates: false,
+ }),
+ }
+ );
+
+ const data = (await res.json()) as {
+ ok?: boolean;
+ description?: string;
+ };
+
+ if (data.ok) {
+ console.log("[Telegram] Webhook auto-registered:", webhookUrl);
+ } else {
+ console.warn(
+ "[Telegram] Webhook auto-registration failed:",
+ data.description
+ );
+ }
+ } catch (error) {
+ console.warn("[Telegram] Webhook auto-registration error:", error);
+ }
+}
+
+function inferBaseUrl(): string {
+ const explicit = (process.env.APP_BASE_URL ?? "").trim().replace(/\/+$/, "");
+ if (explicit) return explicit;
+
+ // Vercel
+ const vercelUrl = (process.env.VERCEL_URL ?? "").trim();
+ if (vercelUrl) return `https://${vercelUrl}`;
+
+ // Railway
+ const railwayUrl = (
+ process.env.RAILWAY_PUBLIC_DOMAIN ??
+ process.env.RAILWAY_STATIC_URL ??
+ ""
+ ).trim();
+ if (railwayUrl) {
+ return railwayUrl.startsWith("http") ? railwayUrl : `https://${railwayUrl}`;
+ }
+
+ // Render
+ const renderUrl = (process.env.RENDER_EXTERNAL_URL ?? "").trim();
+ if (renderUrl) return renderUrl;
+
+ // Fly.io
+ const flyApp = (process.env.FLY_APP_NAME ?? "").trim();
+ if (flyApp) return `https://${flyApp}.fly.dev`;
+
+ return "";
+}
diff --git a/src/app/api/integrations/telegram/route.ts b/src/app/api/integrations/telegram/route.ts
index 275b965..260972f 100644
--- a/src/app/api/integrations/telegram/route.ts
+++ b/src/app/api/integrations/telegram/route.ts
@@ -18,6 +18,7 @@ import {
consumeTelegramAccessCode,
getTelegramIntegrationRuntimeConfig,
normalizeTelegramUserId,
+ saveTelegramIntegrationStoredSettings,
} from "@/lib/storage/telegram-integration-store";
import { saveChatFile } from "@/lib/storage/chat-files-store";
import { createChat, getChat } from "@/lib/storage/chat-store";
@@ -563,44 +564,53 @@ export async function POST(req: NextRequest) {
}
if (!allowedUserIds.has(fromUserId)) {
- const accessCode = extractAccessCodeCandidate(text);
- const granted =
- accessCode &&
- (await consumeTelegramAccessCode({
- code: accessCode,
- userId: fromUserId,
- }));
+ // Auto-allow the first user in private chat when no users are configured
+ if (allowedUserIds.size === 0 && chatType === "private") {
+ await saveTelegramIntegrationStoredSettings({
+ allowedUserIds: [fromUserId],
+ });
+ allowedUserIds.add(fromUserId);
+ console.log(`[Telegram] Auto-allowed first user: ${fromUserId}`);
+ } else {
+ const accessCode = extractAccessCodeCandidate(text);
+ const granted =
+ accessCode &&
+ (await consumeTelegramAccessCode({
+ code: accessCode,
+ userId: fromUserId,
+ }));
+
+ if (granted) {
+ await sendTelegramMessage(
+ botToken,
+ chatId,
+ "Доступ выдан. Теперь можно отправлять сообщения агенту.",
+ messageId
+ );
+ return Response.json({
+ ok: true,
+ accessGranted: true,
+ userId: fromUserId,
+ });
+ }
- if (granted) {
await sendTelegramMessage(
botToken,
chatId,
- "Доступ выдан. Теперь можно отправлять сообщения агенту.",
+ [
+ "Доступ запрещён: ваш user_id не в списке разрешённых.",
+ "Отправьте код активации командой /code <код> или /start <код>.",
+ `Ваш user_id: ${fromUserId}`,
+ ].join("\n"),
messageId
);
return Response.json({
ok: true,
- accessGranted: true,
+ ignored: true,
+ reason: "user_not_allowed",
userId: fromUserId,
});
}
-
- await sendTelegramMessage(
- botToken,
- chatId,
- [
- "Доступ запрещён: ваш user_id не в списке разрешённых.",
- "Отправьте код активации командой /code <код> или /start <код>.",
- `Ваш user_id: ${fromUserId}`,
- ].join("\n"),
- messageId
- );
- return Response.json({
- ok: true,
- ignored: true,
- reason: "user_not_allowed",
- userId: fromUserId,
- });
}
// Resolve app user linked to this Telegram account (if any)
From 43930ae40bb2cfbf208b680eb1f1a4f200b5f5a7 Mon Sep 17 00:00:00 2001
From: Claude
Date: Sat, 28 Feb 2026 06:40:06 +0000
Subject: [PATCH 22/40] fix: eliminate duplicate chat responses and unnecessary
code execution
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Two issues fixed:
1. Simple questions (weather, jokes, etc.) triggered code_execution unnecessarily
because the system prompt pushed the model to always use tools. Updated prompts
to instruct the model to respond directly with text for simple questions.
2. The model produced duplicate response bubbles because the `response` tool caused
an extra agent round-trip — the model would call response tool, get the result
back, then generate another text message. Fixed by:
- Removing the response tool entirely (model now responds with text directly)
- Merging consecutive assistant messages in both storage (onFinish) and
rendering (chatMessagesToUIMessages) to prevent duplicate bubbles
https://claude.ai/code/session_016D6aN4HLJeTQkNSYRrprsv
---
src/components/chat/chat-panel.tsx | 16 +++++++++---
src/components/chat/tool-output.tsx | 4 ---
src/lib/agent/agent.ts | 39 ++++++++++++++++++++++++++---
src/lib/agent/prompts.ts | 15 +++++------
src/lib/tools/tool.ts | 14 -----------
src/prompts/system.md | 14 ++++++++---
src/prompts/tool-response.md | 11 --------
7 files changed, 67 insertions(+), 46 deletions(-)
delete mode 100644 src/prompts/tool-response.md
diff --git a/src/components/chat/chat-panel.tsx b/src/components/chat/chat-panel.tsx
index 895ffe4..261e9e4 100644
--- a/src/components/chat/chat-panel.tsx
+++ b/src/components/chat/chat-panel.tsx
@@ -11,7 +11,10 @@ import type { ChatMessage } from "@/lib/types";
import { useBackgroundSync } from "@/hooks/use-background-sync";
import { generateClientId } from "@/lib/utils";
-/** Convert stored ChatMessage to UIMessage (parts format for useChat) */
+/** Convert stored ChatMessage to UIMessage (parts format for useChat).
+ * Consecutive assistant messages are merged into a single UIMessage so the
+ * multi-step agent loop does not produce duplicate bubbles in the UI.
+ */
function chatMessagesToUIMessages(chatMessages: ChatMessage[]): UIMessage[] {
const result: UIMessage[] = [];
@@ -53,8 +56,15 @@ function chatMessagesToUIMessages(chatMessages: ChatMessage[]): UIMessage[] {
parts.push({ type: "text" as const, text: m.content });
}
- // Only add message if it has content
- if (parts.length > 0) {
+ if (parts.length === 0) continue;
+
+ // Merge into the previous assistant message when possible so that
+ // multi-step agent turns (tool call → result → next text) appear as a
+ // single message bubble instead of duplicated responses.
+ const prev = result.length > 0 ? result[result.length - 1] : null;
+ if (prev && prev.role === "assistant") {
+ prev.parts = [...prev.parts, ...parts];
+ } else {
result.push({
id: m.id,
role: "assistant",
diff --git a/src/components/chat/tool-output.tsx b/src/components/chat/tool-output.tsx
index 5ce8930..b6df061 100644
--- a/src/components/chat/tool-output.tsx
+++ b/src/components/chat/tool-output.tsx
@@ -65,7 +65,6 @@ const TOOL_LABELS: Record = {
get_current_project: "Current Project",
switch_project: "Switch Project",
create_project: "Create Project",
- response: "Response",
};
export function ToolOutput({ toolName, args, result }: ToolOutputProps) {
@@ -73,9 +72,6 @@ export function ToolOutput({ toolName, args, result }: ToolOutputProps) {
const Icon = TOOL_ICONS[toolName] || Terminal;
const label = TOOL_LABELS[toolName] || toolName;
- // Don't render the response tool visually
- if (toolName === "response") return null;
-
return (