Skip to content

Commit 3626db3

Browse files
committed
feat: support configuring chat context in AI assistant
This is important for reports with many apps, or many checks. The model limits can be easily hit.
1 parent 3865a74 commit 3626db3

File tree

8 files changed

+489
-311
lines changed

8 files changed

+489
-311
lines changed

report-app/report-server.ts

Lines changed: 24 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,12 @@ import {fileURLToPath} from 'node:url';
1010
import {chatWithReportAI} from '../runner/reporting/report-ai-chat';
1111
import {convertV2ReportToV3Report} from '../runner/reporting/migrations/v2_to_v3';
1212
import {FetchedLocalReports, fetchReportsFromDisk} from '../runner/reporting/report-local-disk';
13-
import {AiChatRequest, AIConfigState, RunInfo} from '../runner/shared-interfaces';
13+
import {
14+
AiChatRequest,
15+
AIConfigState,
16+
IndividualAssessmentState,
17+
RunInfo,
18+
} from '../runner/shared-interfaces';
1419

1520
// This will result in a lot of loading and would slow down the serving,
1621
// so it's loaded lazily below.
@@ -55,8 +60,19 @@ async function fetchAndMigrateReports(id: string): Promise<RunInfo[] | null> {
5560
return null;
5661
}
5762

63+
let checkID = 0;
5864
// Convert potential older v2 reports.
59-
return result.map(r => convertV2ReportToV3Report(r));
65+
return result.map(run => {
66+
run = convertV2ReportToV3Report(run);
67+
run = {
68+
...run,
69+
results: run.results.map(check => ({
70+
id: `${id}-${checkID++}`,
71+
...check,
72+
})),
73+
};
74+
return run;
75+
});
6076
}
6177

6278
// Endpoint for fetching a specific report group.
@@ -89,16 +105,19 @@ app.post('/api/reports/:id/chat', async (req, res) => {
89105
}
90106

91107
try {
92-
const {prompt, pastMessages, model} = req.body as AiChatRequest;
93-
const assessments = reports.flatMap(run => run.results);
108+
const {prompt, pastMessages, model, context, openAppIDs} = req.body as AiChatRequest;
109+
const allAssessments = reports.flatMap(run => run.results);
110+
94111
const abortController = new AbortController();
95112
const summary = await chatWithReportAI(
96113
await (llm ?? getOrCreateGenkitLlmRunner()),
97114
prompt,
98115
abortController.signal,
99-
assessments,
116+
allAssessments,
100117
pastMessages,
101118
model,
119+
context,
120+
openAppIDs,
102121
);
103122
res.json(summary);
104123
} catch (e) {

0 commit comments

Comments
 (0)