Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions .changeset/fresh-doodles-relax.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
---
'@directus/ai': minor
'@directus/api': minor
'@directus/app': minor
---

Attached prompts, content items, and visual editor elements to AI Assistant Context

:::notice
To use this feature, update [@directus/visual-editing](https://github.com/directus/visual-editing) to v1.2.0+ on your website.
:::
5 changes: 5 additions & 0 deletions .changeset/olive-trains-eat.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
'@directus/env': patch
---

Fixed LDAP DN properties casted as arrays
5 changes: 5 additions & 0 deletions .changeset/solid-ghosts-see.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
'@directus/app': minor
---

Changed permission-blocked fields from disabled to non-editable appearance
5 changes: 3 additions & 2 deletions api/src/ai/chat/controllers/chat.post.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ export const aiChatPostHandler: RequestHandler = async (req, res, _next) => {
throw new InvalidPayloadError({ reason: fromZodError(parseResult.error).message });
}

const { provider, model, messages: rawMessages, tools: requestedTools, toolApprovals } = parseResult.data;
const { provider, model, messages: rawMessages, tools: requestedTools, toolApprovals, context } = parseResult.data;

const aiSettings = res.locals['ai'].settings;

Expand Down Expand Up @@ -68,9 +68,10 @@ export const aiChatPostHandler: RequestHandler = async (req, res, _next) => {
const stream = await createUiStream(validationResult.data, {
provider,
model,
tools: tools,
tools,
aiSettings,
systemPrompt: res.locals['ai'].systemPrompt,
...(context && { context }),
onUsage: (usage) => {
res.write(`data: ${JSON.stringify({ type: 'data-usage', data: usage })}\n\n`);
},
Expand Down
26 changes: 22 additions & 4 deletions api/src/ai/chat/lib/create-ui-stream.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,19 +16,22 @@ import {
getProviderOptions,
} from '../../providers/index.js';
import { SYSTEM_PROMPT } from '../constants/system-prompt.js';
import type { ChatContext } from '../models/chat-request.js';
import { formatContextForSystemPrompt } from '../utils/format-context.js';

export interface CreateUiStreamOptions {
provider: ProviderType;
model: string;
tools: { [x: string]: Tool };
aiSettings: AISettings;
systemPrompt?: string;
context?: ChatContext;
onUsage?: (usage: Pick<LanguageModelUsage, 'inputTokens' | 'outputTokens' | 'totalTokens'>) => void | Promise<void>;
}

export const createUiStream = async (
messages: UIMessage[],
{ provider, model, tools, aiSettings, systemPrompt, onUsage }: CreateUiStreamOptions,
{ provider, model, tools, aiSettings, systemPrompt, context, onUsage }: CreateUiStreamOptions,
): Promise<StreamTextResult<Record<string, Tool<any, any>>, any>> => {
const configs = buildProviderConfigs(aiSettings);
const providerConfig = configs.find((c) => c.type === provider);
Expand All @@ -39,17 +42,32 @@ export const createUiStream = async (

const registry = createAIProviderRegistry(configs, aiSettings);

systemPrompt ||= SYSTEM_PROMPT;

const baseSystemPrompt = systemPrompt || SYSTEM_PROMPT;
const contextBlock = context ? formatContextForSystemPrompt(context) : null;
const providerOptions = getProviderOptions(provider, model, aiSettings);
// Compute the full system prompt once to avoid re-computing on each step
const fullSystemPrompt = contextBlock ? baseSystemPrompt + contextBlock : baseSystemPrompt;

const stream = streamText({
system: systemPrompt,
system: baseSystemPrompt,
model: registry.languageModel(`${provider}:${model}`),
messages: await convertToModelMessages(messages),
stopWhen: [stepCountIs(10)],
providerOptions,
tools,
/**
* prepareStep is called before each AI step to prepare the system prompt.
* When context exists, we override the system prompt to include context attachments.
* This allows the initial system prompt to be simple while ensuring all steps
* (including tool continuation steps) receive the full context.
*/
prepareStep: () => {
if (contextBlock) {
return { system: fullSystemPrompt };
}

return {};
},
onFinish({ usage }) {
if (onUsage) {
const { inputTokens, outputTokens, totalTokens } = usage;
Expand Down
66 changes: 66 additions & 0 deletions api/src/ai/chat/models/chat-request.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,78 @@ export type ChatRequestTool = z.infer<typeof ChatRequestTool>;
export const ToolApprovalMode = z.enum(['always', 'ask', 'disabled']);
export type ToolApprovalMode = z.infer<typeof ToolApprovalMode>;

const ItemContextData = z.object({
collection: z.string(),
key: z.union([z.string(), z.number()]),
});

const VisualElementContextData = z.object({
key: z.string(),
collection: z.string(),
item: z.union([z.string(), z.number()]),
fields: z.array(z.string()).optional(),
rect: z
.object({
top: z.number(),
left: z.number(),
width: z.number(),
height: z.number(),
})
.optional(),
});

const PromptContextData = z.object({
text: z.string(),
prompt: z.record(z.string(), z.unknown()),
values: z.record(z.string(), z.string()),
});

export const ContextAttachment = z.discriminatedUnion('type', [
z.object({
type: z.literal('item'),
display: z.string(),
data: ItemContextData,
snapshot: z.record(z.string(), z.unknown()),
}),
z.object({
type: z.literal('visual-element'),
display: z.string(),
data: VisualElementContextData,
snapshot: z.record(z.string(), z.unknown()),
}),
z.object({
type: z.literal('prompt'),
display: z.string(),
data: PromptContextData,
snapshot: z.record(z.string(), z.unknown()),
}),
]);

export type ContextAttachment = z.infer<typeof ContextAttachment>;

export const PageContext = z.object({
path: z.string(),
collection: z.string().optional(),
item: z.union([z.string(), z.number()]).optional(),
module: z.string().optional(),
});

export type PageContext = z.infer<typeof PageContext>;

export const ChatContext = z.object({
attachments: z.array(ContextAttachment).max(10).optional(),
page: PageContext.optional(),
});

export type ChatContext = z.infer<typeof ChatContext>;

export const ChatRequest = z.intersection(
z.discriminatedUnion('provider', [ProviderOpenAi, ProviderAnthropic, ProviderGoogle, ProviderOpenAiCompatible]),
z.object({
tools: z.array(ChatRequestTool),
messages: z.array(z.looseObject({})),
toolApprovals: z.record(z.string(), ToolApprovalMode).optional(),
context: ChatContext.optional(),
}),
);

Expand Down
Loading
Loading