Skip to content

Commit ccf2685

Browse files
authored
improvement(copilot): update copilot to match copilot repo (#2829)
* Ux * Fix lint * Clean up model options * Codex
1 parent 5eca660 commit ccf2685

File tree

10 files changed

+88
-107
lines changed

10 files changed

+88
-107
lines changed

apps/sim/app/api/copilot/chat/route.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,9 @@ const ChatMessageSchema = z.object({
5252
'gpt-5.1-high',
5353
'gpt-5-codex',
5454
'gpt-5.1-codex',
55+
'gpt-5.2',
56+
'gpt-5.2-codex',
57+
'gpt-5.2-pro',
5558
'gpt-4o',
5659
'gpt-4.1',
5760
'o3',

apps/sim/app/api/copilot/user-models/route.ts

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,14 @@ const DEFAULT_ENABLED_MODELS: Record<string, boolean> = {
1515
'gpt-5-medium': false,
1616
'gpt-5-high': false,
1717
'gpt-5.1-fast': false,
18-
'gpt-5.1': true,
19-
'gpt-5.1-medium': true,
18+
'gpt-5.1': false,
19+
'gpt-5.1-medium': false,
2020
'gpt-5.1-high': false,
2121
'gpt-5-codex': false,
22-
'gpt-5.1-codex': true,
22+
'gpt-5.1-codex': false,
23+
'gpt-5.2': false,
24+
'gpt-5.2-codex': true,
25+
'gpt-5.2-pro': true,
2326
o3: true,
2427
'claude-4-sonnet': false,
2528
'claude-4.5-haiku': true,

apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/smooth-streaming.tsx

Lines changed: 26 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -2,29 +2,9 @@ import { memo, useEffect, useRef, useState } from 'react'
22
import CopilotMarkdownRenderer from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/markdown-renderer'
33

44
/**
5-
* Minimum delay between characters (fast catch-up mode)
5+
* Character animation delay in milliseconds
66
*/
7-
const MIN_DELAY = 1
8-
9-
/**
10-
* Maximum delay between characters (when waiting for content)
11-
*/
12-
const MAX_DELAY = 12
13-
14-
/**
15-
* Default delay when streaming normally
16-
*/
17-
const DEFAULT_DELAY = 4
18-
19-
/**
20-
* How far behind (in characters) before we speed up
21-
*/
22-
const CATCH_UP_THRESHOLD = 20
23-
24-
/**
25-
* How close to content before we slow down
26-
*/
27-
const SLOW_DOWN_THRESHOLD = 5
7+
const CHARACTER_DELAY = 3
288

299
/**
3010
* StreamingIndicator shows animated dots during message streaming
@@ -54,50 +34,21 @@ interface SmoothStreamingTextProps {
5434
isStreaming: boolean
5535
}
5636

57-
/**
58-
* Calculates adaptive delay based on how far behind animation is from actual content
59-
*
60-
* @param displayedLength - Current displayed content length
61-
* @param totalLength - Total available content length
62-
* @returns Delay in milliseconds
63-
*/
64-
function calculateAdaptiveDelay(displayedLength: number, totalLength: number): number {
65-
const charsRemaining = totalLength - displayedLength
66-
67-
if (charsRemaining > CATCH_UP_THRESHOLD) {
68-
// Far behind - speed up to catch up
69-
// Scale from MIN_DELAY to DEFAULT_DELAY based on how far behind
70-
const catchUpFactor = Math.min(1, (charsRemaining - CATCH_UP_THRESHOLD) / 50)
71-
return MIN_DELAY + (DEFAULT_DELAY - MIN_DELAY) * (1 - catchUpFactor)
72-
}
73-
74-
if (charsRemaining <= SLOW_DOWN_THRESHOLD) {
75-
// Close to content edge - slow down to feel natural
76-
// The closer we are, the slower we go (up to MAX_DELAY)
77-
const slowFactor = 1 - charsRemaining / SLOW_DOWN_THRESHOLD
78-
return DEFAULT_DELAY + (MAX_DELAY - DEFAULT_DELAY) * slowFactor
79-
}
80-
81-
// Normal streaming speed
82-
return DEFAULT_DELAY
83-
}
84-
8537
/**
8638
* SmoothStreamingText component displays text with character-by-character animation
87-
* Creates a smooth streaming effect for AI responses with adaptive speed
88-
*
89-
* Uses adaptive pacing: speeds up when catching up, slows down near content edge
39+
* Creates a smooth streaming effect for AI responses
9040
*
9141
* @param props - Component props
9242
* @returns Streaming text with smooth animation
9343
*/
9444
export const SmoothStreamingText = memo(
9545
({ content, isStreaming }: SmoothStreamingTextProps) => {
96-
const [displayedContent, setDisplayedContent] = useState('')
46+
// Initialize with full content when not streaming to avoid flash on page load
47+
const [displayedContent, setDisplayedContent] = useState(() => (isStreaming ? '' : content))
9748
const contentRef = useRef(content)
98-
const rafRef = useRef<number | null>(null)
99-
const indexRef = useRef(0)
100-
const lastFrameTimeRef = useRef<number>(0)
49+
const timeoutRef = useRef<NodeJS.Timeout | null>(null)
50+
// Initialize index based on streaming state
51+
const indexRef = useRef(isStreaming ? 0 : content.length)
10152
const isAnimatingRef = useRef(false)
10253

10354
useEffect(() => {
@@ -110,51 +61,42 @@ export const SmoothStreamingText = memo(
11061
}
11162

11263
if (isStreaming) {
113-
if (indexRef.current < content.length && !isAnimatingRef.current) {
114-
isAnimatingRef.current = true
115-
lastFrameTimeRef.current = performance.now()
116-
117-
const animateText = (timestamp: number) => {
64+
if (indexRef.current < content.length) {
65+
const animateText = () => {
11866
const currentContent = contentRef.current
11967
const currentIndex = indexRef.current
120-
const elapsed = timestamp - lastFrameTimeRef.current
12168

122-
// Calculate adaptive delay based on how far behind we are
123-
const delay = calculateAdaptiveDelay(currentIndex, currentContent.length)
124-
125-
if (elapsed >= delay) {
126-
if (currentIndex < currentContent.length) {
127-
const newDisplayed = currentContent.slice(0, currentIndex + 1)
128-
setDisplayedContent(newDisplayed)
129-
indexRef.current = currentIndex + 1
130-
lastFrameTimeRef.current = timestamp
131-
}
132-
}
133-
134-
if (indexRef.current < currentContent.length) {
135-
rafRef.current = requestAnimationFrame(animateText)
69+
if (currentIndex < currentContent.length) {
70+
const newDisplayed = currentContent.slice(0, currentIndex + 1)
71+
setDisplayedContent(newDisplayed)
72+
indexRef.current = currentIndex + 1
73+
timeoutRef.current = setTimeout(animateText, CHARACTER_DELAY)
13674
} else {
13775
isAnimatingRef.current = false
13876
}
13977
}
14078

141-
rafRef.current = requestAnimationFrame(animateText)
142-
} else if (indexRef.current < content.length && isAnimatingRef.current) {
143-
// Animation already running, it will pick up new content automatically
79+
if (!isAnimatingRef.current) {
80+
if (timeoutRef.current) {
81+
clearTimeout(timeoutRef.current)
82+
}
83+
isAnimatingRef.current = true
84+
animateText()
85+
}
14486
}
14587
} else {
14688
// Streaming ended - show full content immediately
147-
if (rafRef.current) {
148-
cancelAnimationFrame(rafRef.current)
89+
if (timeoutRef.current) {
90+
clearTimeout(timeoutRef.current)
14991
}
15092
setDisplayedContent(content)
15193
indexRef.current = content.length
15294
isAnimatingRef.current = false
15395
}
15496

15597
return () => {
156-
if (rafRef.current) {
157-
cancelAnimationFrame(rafRef.current)
98+
if (timeoutRef.current) {
99+
clearTimeout(timeoutRef.current)
158100
}
159101
isAnimatingRef.current = false
160102
}

apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/copilot-message/components/thinking-block.tsx

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,12 +46,14 @@ interface SmoothThinkingTextProps {
4646
*/
4747
const SmoothThinkingText = memo(
4848
({ content, isStreaming }: SmoothThinkingTextProps) => {
49-
const [displayedContent, setDisplayedContent] = useState('')
49+
// Initialize with full content when not streaming to avoid flash on page load
50+
const [displayedContent, setDisplayedContent] = useState(() => (isStreaming ? '' : content))
5051
const [showGradient, setShowGradient] = useState(false)
5152
const contentRef = useRef(content)
5253
const textRef = useRef<HTMLDivElement>(null)
5354
const rafRef = useRef<number | null>(null)
54-
const indexRef = useRef(0)
55+
// Initialize index based on streaming state
56+
const indexRef = useRef(isStreaming ? 0 : content.length)
5557
const lastFrameTimeRef = useRef<number>(0)
5658
const isAnimatingRef = useRef(false)
5759

apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/tool-call/tool-call.tsx

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1952,7 +1952,12 @@ export function ToolCall({ toolCall: toolCallProp, toolCallId, onStateChange }:
19521952
}, [params])
19531953

19541954
// Skip rendering some internal tools
1955-
if (toolCall.name === 'checkoff_todo' || toolCall.name === 'mark_todo_in_progress') return null
1955+
if (
1956+
toolCall.name === 'checkoff_todo' ||
1957+
toolCall.name === 'mark_todo_in_progress' ||
1958+
toolCall.name === 'tool_search_tool_regex'
1959+
)
1960+
return null
19561961

19571962
// Special rendering for subagent tools - show as thinking text with tool calls at top level
19581963
const SUBAGENT_TOOLS = [

apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/components/model-selector/model-selector.tsx

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -32,13 +32,6 @@ function getModelIconComponent(modelValue: string) {
3232
return <IconComponent className='h-3.5 w-3.5' />
3333
}
3434

35-
/**
36-
* Checks if a model should display the MAX badge
37-
*/
38-
function isMaxModel(modelValue: string): boolean {
39-
return modelValue === 'claude-4.5-sonnet' || modelValue === 'claude-4.5-opus'
40-
}
41-
4235
/**
4336
* Model selector dropdown for choosing AI model.
4437
* Displays model icon and label.
@@ -139,11 +132,6 @@ export function ModelSelector({ selectedModel, isNearTop, onModelSelect }: Model
139132
>
140133
{getModelIconComponent(option.value)}
141134
<span>{option.label}</span>
142-
{isMaxModel(option.value) && (
143-
<Badge size='sm' className='ml-auto'>
144-
MAX
145-
</Badge>
146-
)}
147135
</PopoverItem>
148136
))}
149137
</PopoverScrollArea>

apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/constants.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -238,8 +238,8 @@ export const MODEL_OPTIONS = [
238238
{ value: 'claude-4.5-opus', label: 'Claude 4.5 Opus' },
239239
{ value: 'claude-4.5-sonnet', label: 'Claude 4.5 Sonnet' },
240240
{ value: 'claude-4.5-haiku', label: 'Claude 4.5 Haiku' },
241-
{ value: 'gpt-5.1-codex', label: 'GPT 5.1 Codex' },
242-
{ value: 'gpt-5.1-medium', label: 'GPT 5.1 Medium' },
241+
{ value: 'gpt-5.2-codex', label: 'GPT 5.2 Codex' },
242+
{ value: 'gpt-5.2-pro', label: 'GPT 5.2 Pro' },
243243
{ value: 'gemini-3-pro', label: 'Gemini 3 Pro' },
244244
] as const
245245

apps/sim/lib/copilot/api.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,9 @@ export interface SendMessageRequest {
7777
| 'gpt-5.1-high'
7878
| 'gpt-5-codex'
7979
| 'gpt-5.1-codex'
80+
| 'gpt-5.2'
81+
| 'gpt-5.2-codex'
82+
| 'gpt-5.2-pro'
8083
| 'gpt-4o'
8184
| 'gpt-4.1'
8285
| 'o3'

apps/sim/stores/panel/copilot/store.ts

Lines changed: 35 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -422,7 +422,8 @@ function abortAllInProgressTools(set: any, get: () => CopilotStore) {
422422
* Loads messages from DB for UI rendering.
423423
* Messages are stored exactly as they render, so we just need to:
424424
* 1. Register client tool instances for any tool calls
425-
* 2. Return the messages as-is
425+
* 2. Clear any streaming flags (messages loaded from DB are never actively streaming)
426+
* 3. Return the messages
426427
*/
427428
function normalizeMessagesForUI(messages: CopilotMessage[]): CopilotMessage[] {
428429
try {
@@ -438,23 +439,54 @@ function normalizeMessagesForUI(messages: CopilotMessage[]): CopilotMessage[] {
438439
}
439440
}
440441

441-
// Register client tool instances for all tool calls so they can be looked up
442+
// Register client tool instances and clear streaming flags for all tool calls
442443
for (const message of messages) {
443444
if (message.contentBlocks) {
444445
for (const block of message.contentBlocks as any[]) {
445446
if (block?.type === 'tool_call' && block.toolCall) {
446447
registerToolCallInstances(block.toolCall)
448+
clearStreamingFlags(block.toolCall)
447449
}
448450
}
449451
}
452+
// Also clear from toolCalls array (legacy format)
453+
if (message.toolCalls) {
454+
for (const toolCall of message.toolCalls) {
455+
clearStreamingFlags(toolCall)
456+
}
457+
}
450458
}
451-
// Return messages as-is - they're already in the correct format for rendering
452459
return messages
453460
} catch {
454461
return messages
455462
}
456463
}
457464

465+
/**
466+
* Recursively clears streaming flags from a tool call and its nested subagent tool calls.
467+
* This ensures messages loaded from DB don't appear to be streaming.
468+
*/
469+
function clearStreamingFlags(toolCall: any): void {
470+
if (!toolCall) return
471+
472+
// Always set subAgentStreaming to false - messages loaded from DB are never streaming
473+
toolCall.subAgentStreaming = false
474+
475+
// Clear nested subagent tool calls
476+
if (Array.isArray(toolCall.subAgentBlocks)) {
477+
for (const block of toolCall.subAgentBlocks) {
478+
if (block?.type === 'subagent_tool_call' && block.toolCall) {
479+
clearStreamingFlags(block.toolCall)
480+
}
481+
}
482+
}
483+
if (Array.isArray(toolCall.subAgentToolCalls)) {
484+
for (const subTc of toolCall.subAgentToolCalls) {
485+
clearStreamingFlags(subTc)
486+
}
487+
}
488+
}
489+
458490
/**
459491
* Recursively registers client tool instances for a tool call and its nested subagent tool calls.
460492
*/

apps/sim/stores/panel/copilot/types.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,9 @@ export interface CopilotState {
106106
| 'gpt-5.1-high'
107107
| 'gpt-5-codex'
108108
| 'gpt-5.1-codex'
109+
| 'gpt-5.2'
110+
| 'gpt-5.2-codex'
111+
| 'gpt-5.2-pro'
109112
| 'gpt-4o'
110113
| 'gpt-4.1'
111114
| 'o3'

0 commit comments

Comments
 (0)