From 3faaed571ea0771f9b0ced90eafd58cc7212c18f Mon Sep 17 00:00:00 2001 From: Stephen Toub Date: Sun, 1 Mar 2026 18:03:19 -0500 Subject: [PATCH 1/7] Add OpenTelemetry GenAI instrumentation to Copilot SDK Introduced built-in OpenTelemetry tracing and metrics for agent turns and tool executions, following GenAI semantic conventions (v1.40). --- README.md | 1 + docs/opentelemetry-instrumentation.md | 744 +++---- dotnet/README.md | 3 + dotnet/src/Client.cs | 39 +- dotnet/src/CopilotTelemetry.cs | 1958 +++++++++++++++++ dotnet/src/OpenTelemetryConsts.cs | 188 ++ dotnet/src/Session.cs | 88 +- dotnet/src/Types.cs | 59 + dotnet/test/GitHub.Copilot.SDK.Test.csproj | 1 + dotnet/test/Harness/E2ETestContext.cs | 3 +- dotnet/test/OpenTelemetryTests.cs | 804 +++++++ go/README.md | 3 + go/client.go | 75 +- go/copilot_telemetry.go | 1840 ++++++++++++++++ go/copilot_telemetry_consts.go | 122 + go/copilot_telemetry_test.go | 1343 +++++++++++ go/go.mod | 18 +- go/go.sum | 33 + go/session.go | 81 +- go/types.go | 13 + nodejs/README.md | 3 + nodejs/package-lock.json | 107 + nodejs/package.json | 14 + nodejs/src/client.ts | 74 +- nodejs/src/copilot-telemetry.ts | 1808 +++++++++++++++ nodejs/src/index.ts | 1 + nodejs/src/session.ts | 113 +- nodejs/src/types.ts | 41 + nodejs/test/client.test.ts | 4 +- nodejs/test/opentelemetry.test.ts | 1555 +++++++++++++ python/README.md | 3 + python/copilot/__init__.py | 2 + python/copilot/_copilot_telemetry.py | 1744 +++++++++++++++ python/copilot/client.py | 79 +- python/copilot/session.py | 124 +- python/copilot/types.py | 33 + python/pyproject.toml | 3 + python/test_opentelemetry.py | 1759 +++++++++++++++ test/scenarios/auth/byok-anthropic/go/go.mod | 15 +- test/scenarios/auth/byok-anthropic/go/go.sum | 31 + test/scenarios/auth/byok-azure/go/go.mod | 15 +- test/scenarios/auth/byok-azure/go/go.sum | 31 + test/scenarios/auth/byok-ollama/go/go.mod | 15 +- test/scenarios/auth/byok-ollama/go/go.sum | 31 + test/scenarios/auth/byok-openai/go/go.mod | 15 +- test/scenarios/auth/byok-openai/go/go.sum | 31 + test/scenarios/auth/gh-app/go/go.mod | 15 +- test/scenarios/auth/gh-app/go/go.sum | 31 + .../bundling/app-backend-to-server/go/go.mod | 15 +- .../bundling/app-backend-to-server/go/go.sum | 31 + .../bundling/app-direct-server/go/go.mod | 15 +- .../bundling/app-direct-server/go/go.sum | 31 + .../bundling/container-proxy/go/go.mod | 15 +- .../bundling/container-proxy/go/go.sum | 31 + .../bundling/fully-bundled/go/go.mod | 15 +- .../bundling/fully-bundled/go/go.sum | 31 + test/scenarios/callbacks/hooks/go/go.mod | 15 +- test/scenarios/callbacks/hooks/go/go.sum | 31 + .../scenarios/callbacks/permissions/go/go.mod | 15 +- .../scenarios/callbacks/permissions/go/go.sum | 31 + test/scenarios/callbacks/user-input/go/go.mod | 15 +- test/scenarios/callbacks/user-input/go/go.sum | 31 + test/scenarios/modes/default/go/go.mod | 15 +- test/scenarios/modes/default/go/go.sum | 31 + test/scenarios/modes/minimal/go/go.mod | 15 +- test/scenarios/modes/minimal/go/go.sum | 31 + test/scenarios/prompts/attachments/go/go.mod | 15 +- test/scenarios/prompts/attachments/go/go.sum | 31 + .../prompts/reasoning-effort/go/go.mod | 15 +- .../prompts/reasoning-effort/go/go.sum | 31 + .../prompts/system-message/go/go.mod | 15 +- .../prompts/system-message/go/go.sum | 31 + .../sessions/concurrent-sessions/go/go.mod | 15 +- .../sessions/concurrent-sessions/go/go.sum | 31 + .../sessions/infinite-sessions/go/go.mod | 15 +- .../sessions/infinite-sessions/go/go.sum | 31 + .../sessions/session-resume/go/go.mod | 15 +- .../sessions/session-resume/go/go.sum | 31 + test/scenarios/sessions/streaming/go/go.mod | 15 +- test/scenarios/sessions/streaming/go/go.sum | 31 + test/scenarios/tools/custom-agents/go/go.mod | 15 +- test/scenarios/tools/custom-agents/go/go.sum | 31 + test/scenarios/tools/mcp-servers/go/go.mod | 15 +- test/scenarios/tools/mcp-servers/go/go.sum | 31 + test/scenarios/tools/no-tools/go/go.mod | 15 +- test/scenarios/tools/no-tools/go/go.sum | 31 + test/scenarios/tools/skills/go/go.mod | 15 +- test/scenarios/tools/skills/go/go.sum | 31 + test/scenarios/tools/tool-filtering/go/go.mod | 15 +- test/scenarios/tools/tool-filtering/go/go.sum | 31 + .../tools/virtual-filesystem/go/go.mod | 15 +- .../tools/virtual-filesystem/go/go.sum | 31 + test/scenarios/transport/reconnect/go/go.mod | 15 +- test/scenarios/transport/reconnect/go/go.sum | 31 + test/scenarios/transport/stdio/go/go.mod | 15 +- test/scenarios/transport/stdio/go/go.sum | 31 + test/scenarios/transport/tcp/go/go.mod | 15 +- test/scenarios/transport/tcp/go/go.sum | 31 + ..._subagent_invoke_agent_and_chat_spans.yaml | 144 ++ 99 files changed, 15824 insertions(+), 583 deletions(-) create mode 100644 dotnet/src/CopilotTelemetry.cs create mode 100644 dotnet/src/OpenTelemetryConsts.cs create mode 100644 dotnet/test/OpenTelemetryTests.cs create mode 100644 go/copilot_telemetry.go create mode 100644 go/copilot_telemetry_consts.go create mode 100644 go/copilot_telemetry_test.go create mode 100644 nodejs/src/copilot-telemetry.ts create mode 100644 nodejs/test/opentelemetry.test.ts create mode 100644 python/copilot/_copilot_telemetry.py create mode 100644 python/test_opentelemetry.py create mode 100644 test/snapshots/otel/records_subagent_invoke_agent_and_chat_spans.yaml diff --git a/README.md b/README.md index be9b4694b..afa5e9f14 100644 --- a/README.md +++ b/README.md @@ -109,6 +109,7 @@ Please use the [GitHub Issues](https://github.com/github/copilot-sdk/issues) pag - **[Getting Started](./docs/getting-started.md)** – Tutorial to get up and running - **[Authentication](./docs/auth/index.md)** – GitHub OAuth, BYOK, and more +- **[OpenTelemetry Instrumentation](./docs/opentelemetry-instrumentation.md)** – Built-in tracing and metrics following GenAI semantic conventions - **[Cookbook](https://github.com/github/awesome-copilot/blob/main/cookbook/copilot-sdk)** – Practical recipes for common tasks across all languages - **[More Resources](https://github.com/github/awesome-copilot/blob/main/collections/copilot-sdk.md)** – Additional examples, tutorials, and community resources diff --git a/docs/opentelemetry-instrumentation.md b/docs/opentelemetry-instrumentation.md index f0e1b2556..5f8c1dfc9 100644 --- a/docs/opentelemetry-instrumentation.md +++ b/docs/opentelemetry-instrumentation.md @@ -1,570 +1,394 @@ -# OpenTelemetry Instrumentation for Copilot SDK +# OpenTelemetry Instrumentation -This guide shows how to add OpenTelemetry tracing to your Copilot SDK applications using GenAI semantic conventions. +The Copilot SDK includes built-in OpenTelemetry instrumentation following the [OpenTelemetry Semantic Conventions for Generative AI systems (v1.40)](https://opentelemetry.io/docs/specs/semconv/gen-ai/). Telemetry is **opt-in** — enable it by providing a `TelemetryConfig` when creating a client. The SDK automatically creates spans, records metrics, and emits span events for agent invocations and tool executions. -## Overview +## Quick Start -The Copilot SDK emits session events as your agent processes requests. You can instrument your application to convert these events into OpenTelemetry spans and attributes following the [OpenTelemetry GenAI Semantic Conventions v1.34.0](https://opentelemetry.io/docs/specs/semconv/gen-ai/). +
+Node.js / TypeScript -## Installation +Install the OpenTelemetry SDK packages (the `@opentelemetry/api` peer dependency is included with the Copilot SDK): ```bash -pip install opentelemetry-sdk opentelemetry-api +npm install @opentelemetry/sdk-trace-node @opentelemetry/sdk-trace-base @opentelemetry/sdk-metrics ``` -For exporting to observability backends: +```typescript +import { CopilotClient } from "@github/copilot-sdk"; +import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node"; +import { SimpleSpanProcessor, ConsoleSpanExporter } from "@opentelemetry/sdk-trace-base"; -```bash -# Console output -pip install opentelemetry-sdk +// 1. Set up OpenTelemetry (your exporter of choice) +const provider = new NodeTracerProvider(); +provider.addSpanProcessor(new SimpleSpanProcessor(new ConsoleSpanExporter())); +provider.register(); -# Azure Monitor -pip install azure-monitor-opentelemetry +// 2. Enable built-in telemetry on the client +const client = new CopilotClient({ + telemetry: {}, // defaults are fine — or customize below +}); +await client.start(); -# OTLP (Jaeger, Prometheus, etc.) -pip install opentelemetry-exporter-otlp +// 3. Use the SDK as usual — spans and metrics are emitted automatically +const session = await client.createSession({ model: "gpt-5" }); +const response = await session.sendAndWait({ prompt: "Hello!" }); + +await session.destroy(); +await client.stop(); ``` -## Basic Setup +
+ +
+Python -### 1. Initialize OpenTelemetry +Install the OpenTelemetry SDK packages: + +```bash +pip install opentelemetry-sdk opentelemetry-api +``` ```python +import asyncio +from copilot import CopilotClient from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter -# Setup tracer provider -tracer_provider = TracerProvider() -trace.set_tracer_provider(tracer_provider) +# 1. Set up OpenTelemetry +provider = TracerProvider() +provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter())) +trace.set_tracer_provider(provider) + +# 2. Enable built-in telemetry on the client +client = CopilotClient({"telemetry": {}}) +await client.start() -# Add exporter (console example) -span_exporter = ConsoleSpanExporter() -tracer_provider.add_span_processor(SimpleSpanProcessor(span_exporter)) +# 3. Use the SDK as usual +session = await client.create_session({"model": "gpt-5"}) +response = await session.send_and_wait({"prompt": "Hello!"}) -# Get a tracer -tracer = trace.get_tracer(__name__) +await session.destroy() +await client.stop() ``` -### 2. Create Spans Around Agent Operations +
-```python -from copilot import CopilotClient, PermissionHandler -from copilot.generated.session_events import SessionEventType -from opentelemetry import trace, context -from opentelemetry.trace import SpanKind +
+Go -# Initialize client and start the CLI server -client = CopilotClient() -await client.start() +Install the OpenTelemetry SDK packages: -tracer = trace.get_tracer(__name__) +```bash +go get go.opentelemetry.io/otel +go get go.opentelemetry.io/otel/sdk/trace +go get go.opentelemetry.io/otel/exporters/stdout/stdouttrace +``` -# Create a span for the agent invocation -span_attrs = { - "gen_ai.operation.name": "invoke_agent", - "gen_ai.provider.name": "github.copilot", - "gen_ai.agent.name": "my-agent", - "gen_ai.request.model": "gpt-5", -} +```go +package main -span = tracer.start_span( - name="invoke_agent my-agent", - kind=SpanKind.CLIENT, - attributes=span_attrs -) -token = context.attach(trace.set_span_in_context(span)) +import ( + "context" + "log" -try: - # Create a session (model is set here, not on the client) - session = await client.create_session({ - "model": "gpt-5", - "on_permission_request": PermissionHandler.approve_all, - }) + copilot "github.com/github/copilot-sdk/go" + "go.opentelemetry.io/otel" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" +) - # Subscribe to events via callback - def handle_event(event): - if event.type == SessionEventType.ASSISTANT_USAGE: - if event.data.model: - span.set_attribute("gen_ai.response.model", event.data.model) +func main() { + // 1. Set up OpenTelemetry + exporter, _ := stdouttrace.New() + tp := sdktrace.NewTracerProvider(sdktrace.WithBatcher(exporter)) + otel.SetTracerProvider(tp) + defer tp.Shutdown(context.Background()) - unsubscribe = session.on(handle_event) + // 2. Enable built-in telemetry on the client + client := copilot.NewClient(&copilot.ClientOptions{ + Telemetry: &copilot.TelemetryConfig{}, + }) + if err := client.Start(context.Background()); err != nil { + log.Fatal(err) + } + defer client.Stop() - # Send a message (returns a message ID) - await session.send({"prompt": "Hello, world!"}) + // 3. Use the SDK as usual + session, _ := client.CreateSession(context.Background(), &copilot.SessionConfig{ + Model: "gpt-5", + }) + defer session.Destroy() - # Or send and wait for the session to become idle - response = await session.send_and_wait({"prompt": "Hello, world!"}) -finally: - context.detach(token) - span.end() - await client.stop() + session.SendAndWait(context.Background(), copilot.MessageOptions{ + Prompt: "Hello!", + }) +} ``` -## Copilot SDK Event to GenAI Attribute Mapping +
-The Copilot SDK emits `SessionEventType` events during agent execution. Subscribe to these events using `session.on(handler)`, which returns an unsubscribe function. Here's how to map these events to GenAI semantic convention attributes: +
+.NET -### Core Session Events +Install the OpenTelemetry SDK packages: -| SessionEventType | GenAI Attributes | Description | -|------------------|------------------|-------------| -| `SESSION_START` | - | Session initialization (mark span start) | -| `SESSION_IDLE` | - | Session completed (mark span end) | -| `SESSION_ERROR` | `error.type`, `error.message` | Error occurred | +```bash +dotnet add package OpenTelemetry +dotnet add package OpenTelemetry.Exporter.Console +``` -### Assistant Events + +```csharp +using GitHub.Copilot.SDK; +using OpenTelemetry; +using OpenTelemetry.Trace; + +// 1. Set up OpenTelemetry — add the SDK's ActivitySource +using var tracerProvider = Sdk.CreateTracerProviderBuilder() + .AddSource("github.copilot.sdk") // matches the default source name + .AddConsoleExporter() + .Build(); + +// 2. Enable built-in telemetry on the client +await using var client = new CopilotClient(new CopilotClientOptions +{ + Telemetry = new TelemetryConfig() +}); +await client.StartAsync(); + +// 3. Use the SDK as usual +await using var session = await client.CreateSessionAsync(new SessionConfig +{ + Model = "gpt-5" +}); +await session.SendAndWaitAsync(new MessageOptions { Prompt = "Hello!" }); +``` -| SessionEventType | GenAI Attributes | Description | -|------------------|------------------|-------------| -| `ASSISTANT_TURN_START` | - | Assistant begins processing | -| `ASSISTANT_TURN_END` | - | Assistant finished processing | -| `ASSISTANT_MESSAGE` | `gen_ai.output.messages` (event) | Final assistant message with complete content | -| `ASSISTANT_MESSAGE_DELTA` | - | Streaming message chunk (optional to trace) | -| `ASSISTANT_USAGE` | `gen_ai.usage.input_tokens`
`gen_ai.usage.output_tokens`
`gen_ai.response.model` | Token usage and model information | -| `ASSISTANT_REASONING` | - | Reasoning content (optional to trace) | -| `ASSISTANT_INTENT` | - | Assistant's understood intent | +
-### Tool Execution Events +## Configuration -| SessionEventType | GenAI Attributes / Span | Description | -|------------------|-------------------------|-------------| -| `TOOL_EXECUTION_START` | Create child span:
- `gen_ai.tool.name`
- `gen_ai.tool.call.id`
- `gen_ai.operation.name`: `execute_tool`
- `gen_ai.tool.call.arguments` (opt-in) | Tool execution begins | -| `TOOL_EXECUTION_COMPLETE` | On child span:
- `gen_ai.tool.call.result` (opt-in)
- `error.type` (if failed)
End child span | Tool execution finished | -| `TOOL_EXECUTION_PARTIAL_RESULT` | - | Streaming tool result | +All languages accept the same two options: -### Model and Context Events +| Option | Default | Description | +|--------|---------|-------------| +| `enableSensitiveData` | `false` | Include potentially sensitive data (message content, tool arguments/results, system instructions) in telemetry. Falls back to the `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT` environment variable when not set. | +| `sourceName` | `"github.copilot.sdk"` | Name used for the tracer and meter. Use this to distinguish multiple SDK instances or match your OpenTelemetry pipeline filters. | -| SessionEventType | GenAI Attributes | Description | -|------------------|------------------|-------------| -| `SESSION_MODEL_CHANGE` | `gen_ai.request.model` | Model changed during session | -| `SESSION_CONTEXT_CHANGED` | - | Context window modified | -| `SESSION_TRUNCATION` | - | Context truncated | +> **Language-specific option casing:** +> Node.js uses `enableSensitiveData` / `sourceName` (camelCase). +> Python uses `enable_sensitive_data` / `source_name` (snake_case). +> Go uses `EnableSensitiveData` / `SourceName` (PascalCase). +> .NET uses `EnableSensitiveData` / `SourceName` (PascalCase). -## Detailed Event Mapping Examples +### Enabling Sensitive Data -### ASSISTANT_USAGE Event +By default, message content, tool arguments, tool results, and system instructions are **not** included in telemetry to protect potentially sensitive data. To include them: -When you receive an `ASSISTANT_USAGE` event, extract token usage: +**Option 1 — Per-client configuration:** -```python -from copilot.generated.session_events import SessionEventType - -def handle_usage(event): - if event.type == SessionEventType.ASSISTANT_USAGE: - data = event.data - if data.model: - span.set_attribute("gen_ai.response.model", data.model) - if data.input_tokens is not None: - span.set_attribute("gen_ai.usage.input_tokens", int(data.input_tokens)) - if data.output_tokens is not None: - span.set_attribute("gen_ai.usage.output_tokens", int(data.output_tokens)) - -unsubscribe = session.on(handle_usage) -await session.send({"prompt": "Hello"}) + +```typescript +// Node.js +const client = new CopilotClient({ + telemetry: { enableSensitiveData: true }, +}); ``` -**Event Data Structure:** ```python -@dataclass -class Usage: - input_tokens: float - output_tokens: float - cache_read_tokens: float - cache_write_tokens: float +# Python +client = CopilotClient({"telemetry": {"enable_sensitive_data": True}}) ``` -**Maps to GenAI Attributes:** -- `input_tokens` → `gen_ai.usage.input_tokens` -- `output_tokens` → `gen_ai.usage.output_tokens` -- Response model → `gen_ai.response.model` + +```go +// Go +client := copilot.NewClient(&copilot.ClientOptions{ + Telemetry: &copilot.TelemetryConfig{ + EnableSensitiveData: copilot.Bool(true), + }, +}) +``` -### TOOL_EXECUTION_START / COMPLETE Events + +```csharp +// .NET +var client = new CopilotClient(new CopilotClientOptions +{ + Telemetry = new TelemetryConfig { EnableSensitiveData = true } +}); +``` -Create child spans for each tool execution: +**Option 2 — Environment variable (applies to all clients):** -```python -from opentelemetry.trace import SpanKind -import json - -# Dictionary to track active tool spans -tool_spans = {} - -def handle_tool_events(event): - data = event.data - - if event.type == SessionEventType.TOOL_EXECUTION_START and data: - call_id = data.tool_call_id or str(uuid.uuid4()) - tool_name = data.tool_name or "unknown" - - tool_attrs = { - "gen_ai.tool.name": tool_name, - "gen_ai.operation.name": "execute_tool", - } - - if call_id: - tool_attrs["gen_ai.tool.call.id"] = call_id - - # Optional: include tool arguments (may contain sensitive data) - if data.arguments is not None: - try: - tool_attrs["gen_ai.tool.call.arguments"] = json.dumps(data.arguments) - except Exception: - tool_attrs["gen_ai.tool.call.arguments"] = str(data.arguments) - - tool_span = tracer.start_span( - name=f"execute_tool {tool_name}", - kind=SpanKind.CLIENT, - attributes=tool_attrs - ) - tool_token = context.attach(trace.set_span_in_context(tool_span)) - tool_spans[call_id] = (tool_span, tool_token) - - elif event.type == SessionEventType.TOOL_EXECUTION_COMPLETE and data: - call_id = data.tool_call_id - entry = tool_spans.pop(call_id, None) if call_id else None - - if entry: - tool_span, tool_token = entry - - # Optional: include tool result (may contain sensitive data) - if data.result is not None: - try: - result_str = json.dumps(data.result) - except Exception: - result_str = str(data.result) - # Truncate to 512 chars to avoid huge spans - tool_span.set_attribute("gen_ai.tool.call.result", result_str[:512]) - - # Mark as error if tool failed - if hasattr(data, "success") and data.success is False: - tool_span.set_attribute("error.type", "tool_error") - - context.detach(tool_token) - tool_span.end() - -unsubscribe = session.on(handle_tool_events) -await session.send({"prompt": "What's the weather?"}) +```bash +export OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true ``` -**Tool Event Data:** -- `tool_call_id` → `gen_ai.tool.call.id` -- `tool_name` → `gen_ai.tool.name` -- `arguments` → `gen_ai.tool.call.arguments` (opt-in) -- `result` → `gen_ai.tool.call.result` (opt-in) - -### ASSISTANT_MESSAGE Event +## Agent Attribution -Capture the final message as a span event: +You can associate sessions with a named agent for telemetry attribution using `agentName` and `agentDescription` on the session config. When set, the `invoke_agent` span includes `gen_ai.agent.name` and `gen_ai.agent.description` attributes. -```python -def handle_message(event): - if event.type == SessionEventType.ASSISTANT_MESSAGE and event.data: - if event.data.content: - # Add as a span event (opt-in for content recording) - span.add_event( - "gen_ai.output.messages", - attributes={ - "gen_ai.event.content": json.dumps({ - "role": "assistant", - "content": event.data.content - }) - } - ) - -unsubscribe = session.on(handle_message) -await session.send({"prompt": "Tell me a joke"}) + +```typescript +// Node.js +const session = await client.createSession({ + model: "gpt-5", + agentName: "weather-bot", + agentDescription: "An agent that provides weather forecasts", +}); ``` -## Complete Example - + ```python -import asyncio -import json -import uuid -from copilot import CopilotClient, PermissionHandler -from copilot.generated.session_events import SessionEventType -from opentelemetry import trace, context -from opentelemetry.trace import SpanKind -from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter +# Python +session = await client.create_session({ + "model": "gpt-5", + "agent_name": "weather-bot", + "agent_description": "An agent that provides weather forecasts", +}) +``` -# Setup OpenTelemetry -tracer_provider = TracerProvider() -trace.set_tracer_provider(tracer_provider) -tracer_provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter())) -tracer = trace.get_tracer(__name__) - -async def invoke_agent(prompt: str): - """Invoke agent with full OpenTelemetry instrumentation.""" - - # Create main span - span_attrs = { - "gen_ai.operation.name": "invoke_agent", - "gen_ai.provider.name": "github.copilot", - "gen_ai.agent.name": "example-agent", - "gen_ai.request.model": "gpt-5", - } + +```go +// Go +session, _ := client.CreateSession(ctx, &copilot.SessionConfig{ + Model: "gpt-5", + AgentName: "weather-bot", + AgentDescription: "An agent that provides weather forecasts", +}) +``` - span = tracer.start_span( - name="invoke_agent example-agent", - kind=SpanKind.CLIENT, - attributes=span_attrs - ) - token = context.attach(trace.set_span_in_context(span)) - tool_spans = {} - - try: - client = CopilotClient() - await client.start() - - session = await client.create_session({ - "model": "gpt-5", - "on_permission_request": PermissionHandler.approve_all, - }) - - # Subscribe to events via callback - def handle_event(event): - data = event.data - - # Handle usage events - if event.type == SessionEventType.ASSISTANT_USAGE and data: - if data.model: - span.set_attribute("gen_ai.response.model", data.model) - if data.input_tokens is not None: - span.set_attribute("gen_ai.usage.input_tokens", int(data.input_tokens)) - if data.output_tokens is not None: - span.set_attribute("gen_ai.usage.output_tokens", int(data.output_tokens)) - - # Handle tool execution - elif event.type == SessionEventType.TOOL_EXECUTION_START and data: - call_id = data.tool_call_id or str(uuid.uuid4()) - tool_name = data.tool_name or "unknown" - - tool_attrs = { - "gen_ai.tool.name": tool_name, - "gen_ai.operation.name": "execute_tool", - "gen_ai.tool.call.id": call_id, - } - - tool_span = tracer.start_span( - name=f"execute_tool {tool_name}", - kind=SpanKind.CLIENT, - attributes=tool_attrs - ) - tool_token = context.attach(trace.set_span_in_context(tool_span)) - tool_spans[call_id] = (tool_span, tool_token) - - elif event.type == SessionEventType.TOOL_EXECUTION_COMPLETE and data: - call_id = data.tool_call_id - entry = tool_spans.pop(call_id, None) if call_id else None - if entry: - tool_span, tool_token = entry - context.detach(tool_token) - tool_span.end() - - # Capture final message - elif event.type == SessionEventType.ASSISTANT_MESSAGE and data: - if data.content: - print(f"Assistant: {data.content}") - - unsubscribe = session.on(handle_event) - - # Send message and wait for completion - response = await session.send_and_wait({"prompt": prompt}) - - span.set_attribute("gen_ai.response.finish_reasons", ["stop"]) - unsubscribe() - - except Exception as e: - span.set_attribute("error.type", type(e).__name__) - raise - finally: - # Clean up any unclosed tool spans - for call_id, (tool_span, tool_token) in tool_spans.items(): - tool_span.set_attribute("error.type", "stream_aborted") - context.detach(tool_token) - tool_span.end() - - context.detach(token) - span.end() - await client.stop() - -# Run -asyncio.run(invoke_agent("What's 2+2?")) + +```csharp +// .NET +var session = await client.CreateSessionAsync(new SessionConfig +{ + Model = "gpt-5", + AgentName = "weather-bot", + AgentDescription = "An agent that provides weather forecasts", +}); ``` -## Required Span Attributes +## Emitted Telemetry -According to OpenTelemetry GenAI semantic conventions, these attributes are **required** for agent invocation spans: +### Spans -| Attribute | Description | Example | -|-----------|-------------|---------| -| `gen_ai.operation.name` | Operation type | `invoke_agent`, `chat`, `execute_tool` | -| `gen_ai.provider.name` | Provider identifier | `github.copilot` | -| `gen_ai.request.model` | Model used for request | `gpt-5`, `gpt-4.1` | +The SDK automatically creates the following spans: -## Recommended Span Attributes +#### `invoke_agent` (Client span) -These attributes are **recommended** for better observability: +Created on the first `send` / `sendAndWait` call after the session becomes idle and reused across subsequent `send` / `sendAndWait` calls in the same turn. Ends when a turn-ending event is emitted (e.g., `session.idle` or `session.error`). Named `invoke_agent {model}` when a model is known, or just `invoke_agent`. -| Attribute | Description | -|-----------|-------------| -| `gen_ai.agent.id` | Unique agent identifier | -| `gen_ai.agent.name` | Human-readable agent name | -| `gen_ai.response.model` | Actual model used in response | -| `gen_ai.usage.input_tokens` | Input tokens consumed | -| `gen_ai.usage.output_tokens` | Output tokens generated | -| `gen_ai.response.finish_reasons` | Completion reasons (e.g., `["stop"]`) | +| Attribute | Description | Condition | +|-----------|-------------|-----------| +| `gen_ai.operation.name` | `"invoke_agent"` | Always | +| `gen_ai.provider.name` | Provider name (e.g., `"github"`, `"openai"`, `"azure.ai.openai"`, `"anthropic"`) | Always | +| `gen_ai.agent.id` | Session ID | Always | +| `gen_ai.conversation.id` | Session ID | Always | +| `gen_ai.request.model` | Requested model name | When model is set | +| `gen_ai.response.model` | Actual model used (from usage event) | When reported | +| `gen_ai.agent.name` | Agent name | When `agentName` is set | +| `gen_ai.agent.description` | Agent description | When `agentDescription` is set | +| `gen_ai.usage.input_tokens` | Input token count | When reported | +| `gen_ai.usage.output_tokens` | Output token count | When reported | +| `gen_ai.response.finish_reasons` | `["stop"]` or `["error"]` | At span end | +| `server.address` | Provider host | When using custom provider | +| `server.port` | Provider port | When using custom provider | +| `error.type` | Error type name | On error | +| `gen_ai.input.messages` | JSON input messages | When `enableSensitiveData` is true | +| `gen_ai.output.messages` | JSON output messages | When `enableSensitiveData` is true | +| `gen_ai.system_instructions` | System message content | When `enableSensitiveData` is true | +| `gen_ai.tool.definitions` | JSON tool definitions | Always (non-sensitive) | -## Content Recording +#### `execute_tool` (Internal span) -Recording message content and tool arguments/results is **optional** and should be opt-in since it may contain sensitive data. +Created as a child of `invoke_agent` for each custom tool call. Named `execute_tool {toolName}`. -### Environment Variable Control +| Attribute | Description | Condition | +|-----------|-------------|-----------| +| `gen_ai.operation.name` | `"execute_tool"` | Always | +| `gen_ai.tool.name` | Tool name | Always | +| `gen_ai.tool.call.id` | Unique call ID | Always | +| `gen_ai.tool.type` | `"function"` | Always | +| `gen_ai.tool.description` | Tool description | When available | +| `gen_ai.tool.call.arguments` | JSON arguments | When `enableSensitiveData` is true | +| `gen_ai.tool.call.result` | JSON result | When `enableSensitiveData` is true | +| `error.type` | Error type name | On error | -```bash -# Enable content recording -export OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true -``` +### Metrics -### Checking at Runtime +The SDK records the following metrics (all using the configured `sourceName` as the meter name): - -```python -import os - -def should_record_content(): - return os.getenv("OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT", "false").lower() == "true" +| Metric | Type | Unit | Description | +|--------|------|------|-------------| +| `gen_ai.client.operation.duration` | Histogram (float) | `s` | Duration of `invoke_agent` and `execute_tool` operations | +| `gen_ai.client.token.usage` | Histogram (int) | `{token}` | Token usage per operation, with `gen_ai.token.type` attribute (`"input"` or `"output"`) | -# Only add content if enabled -if should_record_content() and event.data.content: - span.add_event("gen_ai.output.messages", ...) -``` +## Exporter Setup -## MCP (Model Context Protocol) Tool Conventions +The SDK uses the standard OpenTelemetry API — configure any exporter compatible with your language's OpenTelemetry SDK. -For MCP-based tools, add these additional attributes following the [OpenTelemetry MCP semantic conventions](https://opentelemetry.io/docs/specs/semconv/gen-ai/mcp/): +### OTLP (Jaeger, Grafana, etc.) -```python -tool_attrs = { - # Required - "mcp.method.name": "tools/call", - - # Recommended - "mcp.server.name": data.mcp_server_name, - "mcp.session.id": session.session_id, - - # GenAI attributes - "gen_ai.tool.name": data.mcp_tool_name, - "gen_ai.operation.name": "execute_tool", - "network.transport": "pipe", # Copilot SDK uses stdio -} -``` - -## Span Naming Conventions - -Follow these patterns for span names: - -| Operation | Span Name Pattern | Example | -|-----------|-------------------|---------| -| Agent invocation | `invoke_agent {agent_name}` | `invoke_agent weather-bot` | -| Chat | `chat` | `chat` | -| Tool execution | `execute_tool {tool_name}` | `execute_tool fetch_weather` | -| MCP tool | `tools/call {tool_name}` | `tools/call read_file` | +```bash +# Node.js +npm install @opentelemetry/exporter-trace-otlp-http -## Metrics +# Python +pip install opentelemetry-exporter-otlp -You can also export metrics for token usage and operation duration: +# Go +go get go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp -```python -from opentelemetry import metrics -from opentelemetry.sdk.metrics import MeterProvider -from opentelemetry.sdk.metrics.export import ConsoleMetricExporter, PeriodicExportingMetricReader - -# Setup metrics -reader = PeriodicExportingMetricReader(ConsoleMetricExporter()) -provider = MeterProvider(metric_readers=[reader]) -metrics.set_meter_provider(provider) - -meter = metrics.get_meter(__name__) - -# Create metrics -operation_duration = meter.create_histogram( - name="gen_ai.client.operation.duration", - description="Duration of GenAI operations", - unit="ms" -) +# .NET +dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol +``` -token_usage = meter.create_counter( - name="gen_ai.client.token.usage", - description="Token usage count" -) +### Azure Monitor -# Record metrics -operation_duration.record(123.45, attributes={ - "gen_ai.operation.name": "invoke_agent", - "gen_ai.request.model": "gpt-5", -}) + +```bash +# Python +pip install azure-monitor-opentelemetry -token_usage.add(150, attributes={ - "gen_ai.token.type": "input", - "gen_ai.operation.name": "invoke_agent", -}) +# .NET +dotnet add package Azure.Monitor.OpenTelemetry.Exporter ``` -## Azure Monitor Integration - -For production observability with Azure Monitor: - + ```python +# Python — Azure Monitor from azure.monitor.opentelemetry import configure_azure_monitor - -# Enable Azure Monitor -connection_string = "InstrumentationKey=..." -configure_azure_monitor(connection_string=connection_string) - -# Your instrumented code here +configure_azure_monitor(connection_string="InstrumentationKey=...") ``` -View traces in the Azure Portal under your Application Insights resource → Tracing. - -## Best Practices - -1. **Always close spans**: Use try/finally blocks to ensure spans are ended even on errors -2. **Set error attributes**: On exceptions, set `error.type` and optionally `error.message` -3. **Use child spans for tools**: Create separate spans for each tool execution -4. **Opt-in for content**: Only record message content and tool arguments when explicitly enabled -5. **Truncate large values**: Limit tool results and arguments to reasonable sizes (e.g., 512 chars) -6. **Set finish reasons**: Always set `gen_ai.response.finish_reasons` when the operation completes successfully -7. **Include model info**: Capture both request and response model names + +```csharp +// .NET — Azure Monitor +using var tracerProvider = Sdk.CreateTracerProviderBuilder() + .AddSource("github.copilot.sdk") + .AddAzureMonitorTraceExporter(o => o.ConnectionString = "InstrumentationKey=...") + .Build(); +``` ## Troubleshooting ### No spans appearing -1. Verify tracer provider is set: `trace.set_tracer_provider(provider)` -2. Add a span processor: `provider.add_span_processor(SimpleSpanProcessor(exporter))` -3. Ensure spans are ended: Check for missing `span.end()` calls - -### Tool spans not showing as children - -Make sure to attach the tool span to the parent context: - -```python -tool_token = context.attach(trace.set_span_in_context(tool_span)) -``` +1. Verify the OpenTelemetry provider is registered before creating the `CopilotClient`. +2. Ensure your exporter's source/activity filter includes the SDK's source name (default: `"github.copilot.sdk"`). For .NET, this means calling `.AddSource("github.copilot.sdk")` on the tracer provider builder. +3. Confirm `telemetry` is set on the client options — when omitted, no telemetry is emitted. -### Context warnings in async code +### Missing message content or tool arguments -You may see "Failed to detach context" warnings in async streaming code. These are expected and don't affect tracing correctness. +Sensitive attributes are gated behind `enableSensitiveData`. Set it to `true` in the `TelemetryConfig` or set the `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true` environment variable. ## References - [OpenTelemetry GenAI Semantic Conventions](https://opentelemetry.io/docs/specs/semconv/gen-ai/) - [OpenTelemetry MCP Semantic Conventions](https://opentelemetry.io/docs/specs/semconv/gen-ai/mcp/) -- [OpenTelemetry Python SDK](https://opentelemetry.io/docs/instrumentation/python/) -- [GenAI Semantic Conventions v1.34.0](https://opentelemetry.io/schemas/1.34.0) - [Copilot SDK Documentation](https://github.com/github/copilot-sdk) diff --git a/dotnet/README.md b/dotnet/README.md index fe226f77f..fa38addfa 100644 --- a/dotnet/README.md +++ b/dotnet/README.md @@ -79,6 +79,7 @@ new CopilotClient(CopilotClientOptions? options = null) - `Logger` - `ILogger` instance for SDK logging - `GitHubToken` - GitHub token for authentication. When provided, takes priority over other auth methods. - `UseLoggedInUser` - Whether to use logged-in user for authentication (default: true, but false when `GitHubToken` is provided). Cannot be used with `CliUrl`. +- `Telemetry` - OpenTelemetry instrumentation configuration (`TelemetryConfig`). When provided, enables automatic tracing and metrics following [GenAI semantic conventions](../docs/opentelemetry-instrumentation.md). #### Methods @@ -110,6 +111,8 @@ Create a new conversation session. - `Provider` - Custom API provider configuration (BYOK) - `Streaming` - Enable streaming of response chunks (default: false) - `InfiniteSessions` - Configure automatic context compaction (see below) +- `AgentName` - Agent name for telemetry attribution. +- `AgentDescription` - Agent description for telemetry attribution. - `OnUserInputRequest` - Handler for user input requests from the agent (enables ask_user tool). See [User Input Requests](#user-input-requests) section. - `Hooks` - Hook handlers for session lifecycle events. See [Session Hooks](#session-hooks) section. diff --git a/dotnet/src/Client.cs b/dotnet/src/Client.cs index 1f3a7fb43..f8856ae70 100644 --- a/dotnet/src/Client.cs +++ b/dotnet/src/Client.cs @@ -56,6 +56,7 @@ public partial class CopilotClient : IDisposable, IAsyncDisposable private readonly ConcurrentDictionary _sessions = new(); private readonly CopilotClientOptions _options; private readonly ILogger _logger; + private readonly CopilotTelemetry? _telemetry; private Task? _connectionTask; private bool _disposed; private readonly int? _optionsPort; @@ -123,6 +124,9 @@ public CopilotClient(CopilotClientOptions? options = null) } _logger = _options.Logger ?? NullLogger.Instance; + _telemetry = _options.Telemetry is { } telemetryConfig ? + new CopilotTelemetry(telemetryConfig) : + null; // Parse CliUrl if provided if (!string.IsNullOrEmpty(_options.CliUrl)) @@ -407,7 +411,9 @@ public async Task CreateSessionAsync(SessionConfig config, Cance var response = await InvokeRpcAsync( connection.Rpc, "session.create", [request], cancellationToken); - var session = new CopilotSession(response.SessionId, connection.Rpc, response.WorkspacePath); + var session = new CopilotSession(response.SessionId, connection.Rpc, _telemetry, response.WorkspacePath, + config.Model, config.Provider, config.SystemMessage, config.Tools, config.Streaming, + config.AgentName, config.AgentDescription); session.RegisterTools(config.Tools ?? []); session.RegisterPermissionHandler(config.OnPermissionRequest); if (config.OnUserInputRequest != null) @@ -499,7 +505,9 @@ public async Task ResumeSessionAsync(string sessionId, ResumeSes var response = await InvokeRpcAsync( connection.Rpc, "session.resume", [request], cancellationToken); - var session = new CopilotSession(response.SessionId, connection.Rpc, response.WorkspacePath); + var session = new CopilotSession(response.SessionId, connection.Rpc, _telemetry, response.WorkspacePath, + config.Model, config.Provider, config.SystemMessage, config.Tools, config.Streaming, + config.AgentName, config.AgentDescription); session.RegisterTools(config.Tools ?? []); session.RegisterPermissionHandler(config.OnPermissionRequest); if (config.OnUserInputRequest != null) @@ -1183,6 +1191,7 @@ public async ValueTask DisposeAsync() if (_disposed) return; _disposed = true; await ForceStopAsync(); + _telemetry?.Dispose(); } private class RpcHandler(CopilotClient client) @@ -1239,6 +1248,14 @@ public async Task OnToolCall(string sessionId, }); } + using var activity = client._telemetry?.StartExecuteToolActivity( + toolName, toolCallId, tool.Description, arguments, session.GetTelemetryToolCallParentContext(toolCallId)); + var telemetry = client._telemetry; + Stopwatch? stopwatch = telemetry is { OperationDurationHistogram.Enabled: true } ? + Stopwatch.StartNew() : + null; + Exception? operationError = null; + try { var invocation = new ToolInvocation @@ -1292,10 +1309,13 @@ public async Task OnToolCall(string sessionId, ? je.GetString()! : JsonSerializer.Serialize(result, tool.JsonSerializerOptions.GetTypeInfo(typeof(object))), }; + client._telemetry?.SetExecuteToolResult(activity, result); return new ToolCallResponse(toolResultObject); } catch (Exception ex) { + operationError = ex; + CopilotTelemetry.RecordError(activity, ex); return new ToolCallResponse(new() { // TODO: We should offer some way to control whether or not to expose detailed exception information to the LLM. @@ -1305,6 +1325,21 @@ public async Task OnToolCall(string sessionId, Error = ex.Message }); } + finally + { + if (stopwatch is not null && telemetry is not null) + { + telemetry.RecordOperationDuration( + stopwatch.Elapsed.TotalSeconds, + requestModel: null, + responseModel: null, + providerName: session.TelemetryProviderName, + serverAddress: session.TelemetryServerAddress, + serverPort: session.TelemetryServerPort, + error: operationError, + operationName: OpenTelemetryConsts.GenAI.ExecuteTool); + } + } } public async Task OnPermissionRequest(string sessionId, JsonElement permissionRequest) diff --git a/dotnet/src/CopilotTelemetry.cs b/dotnet/src/CopilotTelemetry.cs new file mode 100644 index 000000000..6afc17855 --- /dev/null +++ b/dotnet/src/CopilotTelemetry.cs @@ -0,0 +1,1958 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +using Microsoft.Extensions.AI; +using System.Diagnostics; +using System.Diagnostics.Metrics; +using System.Globalization; +using System.Reflection; +using System.Text; +using System.Text.Encodings.Web; +using System.Text.Json; + +namespace GitHub.Copilot.SDK; + +/// +/// Provides OpenTelemetry instrumentation for the Copilot SDK, implementing +/// the Semantic Conventions for Generative AI systems. +/// +/// +/// +/// This class provides an implementation of the Semantic Conventions for Generative AI systems, +/// defined at . +/// The specification is still experimental and subject to change; as such, the telemetry output +/// by this instrumentation is also subject to change. +/// +/// +/// Telemetry is emitted using for traces and +/// for metrics. No dependency on OpenTelemetry +/// libraries is required. To collect the telemetry, configure an +/// or use the OpenTelemetry SDK with the appropriate source name (default "github.copilot.sdk"). +/// +/// +internal sealed class CopilotTelemetry : IDisposable +{ + private static readonly JsonWriterOptions s_jsonWriterOptions = new() { Encoder = JavaScriptEncoder.UnsafeRelaxedJsonEscaping }; + + private static readonly string? s_sdkVersion = + typeof(CopilotTelemetry).Assembly.GetCustomAttribute()?.InformationalVersion; + + internal readonly ActivitySource ActivitySource; + private readonly Meter _meter; + + internal readonly Histogram OperationDurationHistogram; + internal readonly Histogram TokenUsageHistogram; + internal readonly Histogram TimeToFirstChunkHistogram; + internal readonly Histogram TimePerOutputChunkHistogram; + + /// + /// Gets or sets whether potentially sensitive data should be included in telemetry. + /// + public bool EnableSensitiveData { get; } + + public CopilotTelemetry(TelemetryConfig? config) + { + string sourceName = config?.SourceName ?? OpenTelemetryConsts.DefaultSourceName; + + EnableSensitiveData = config?.EnableSensitiveData ?? + string.Equals( + Environment.GetEnvironmentVariable(OpenTelemetryConsts.CaptureMessageContentEnvVar), + "true", + StringComparison.OrdinalIgnoreCase); + + ActivitySource = new ActivitySource(sourceName, s_sdkVersion); + _meter = new Meter(sourceName, s_sdkVersion); + + OperationDurationHistogram = _meter.CreateHistogram( + OpenTelemetryConsts.GenAI.Client.OperationDuration.Name, + OpenTelemetryConsts.SecondsUnit, + OpenTelemetryConsts.GenAI.Client.OperationDuration.Description, + advice: new() { HistogramBucketBoundaries = OpenTelemetryConsts.GenAI.Client.OperationDuration.ExplicitBucketBoundaries }); + + TokenUsageHistogram = _meter.CreateHistogram( + OpenTelemetryConsts.GenAI.Client.TokenUsage.Name, + OpenTelemetryConsts.TokensUnit, + OpenTelemetryConsts.GenAI.Client.TokenUsage.Description, + advice: new() { HistogramBucketBoundaries = OpenTelemetryConsts.GenAI.Client.TokenUsage.ExplicitBucketBoundaries }); + + TimeToFirstChunkHistogram = _meter.CreateHistogram( + OpenTelemetryConsts.GenAI.Client.TimeToFirstChunk.Name, + OpenTelemetryConsts.SecondsUnit, + OpenTelemetryConsts.GenAI.Client.TimeToFirstChunk.Description, + advice: new() { HistogramBucketBoundaries = OpenTelemetryConsts.GenAI.Client.TimeToFirstChunk.ExplicitBucketBoundaries }); + + TimePerOutputChunkHistogram = _meter.CreateHistogram( + OpenTelemetryConsts.GenAI.Client.TimePerOutputChunk.Name, + OpenTelemetryConsts.SecondsUnit, + OpenTelemetryConsts.GenAI.Client.TimePerOutputChunk.Description, + advice: new() { HistogramBucketBoundaries = OpenTelemetryConsts.GenAI.Client.TimePerOutputChunk.ExplicitBucketBoundaries }); + } + + /// Starts an invoke_agent activity for a session turn. + public Activity? StartInvokeAgentActivity( + string sessionId, + string? model, + string providerName, + string? serverAddress, + int? serverPort, + string? agentName = null, + string? agentDescription = null, + ActivityContext parentContext = default) + { + if (!ActivitySource.HasListeners()) + { + return null; + } + + string displayName = string.IsNullOrWhiteSpace(agentName) + ? OpenTelemetryConsts.GenAI.InvokeAgent + : $"{OpenTelemetryConsts.GenAI.InvokeAgent} {agentName}"; + + ActivityTagsCollection tags = new() + { + { OpenTelemetryConsts.GenAI.Operation.Name, OpenTelemetryConsts.GenAI.InvokeAgent }, + { OpenTelemetryConsts.GenAI.Provider.Name, providerName }, + { OpenTelemetryConsts.GenAI.Agent.Id, sessionId }, + { OpenTelemetryConsts.GenAI.Conversation.Id, sessionId }, + }; + + if (!string.IsNullOrWhiteSpace(model)) + { + tags.Add(OpenTelemetryConsts.GenAI.Request.Model, model); + } + + if (!string.IsNullOrWhiteSpace(agentName)) + { + tags.Add(OpenTelemetryConsts.GenAI.Agent.Name, agentName); + } + + if (!string.IsNullOrWhiteSpace(agentDescription)) + { + tags.Add(OpenTelemetryConsts.GenAI.Agent.Description, agentDescription); + } + + if (!string.IsNullOrWhiteSpace(serverAddress)) + { + tags.Add(OpenTelemetryConsts.Server.Address, serverAddress); + if (serverPort is int port) + { + tags.Add(OpenTelemetryConsts.Server.Port, port); + } + } + + return ActivitySource.StartActivity(displayName, ActivityKind.Client, parentContext, tags); + } + + /// Starts a chat activity for an individual LLM turn within an invoke_agent span. + public Activity? StartChatActivity( + string? model, + string providerName, + string? serverAddress, + int? serverPort, + ActivityContext parentContext, + string? conversationId = null) + { + if (!ActivitySource.HasListeners()) + { + return null; + } + + string displayName = string.IsNullOrWhiteSpace(model) + ? OpenTelemetryConsts.GenAI.Chat + : $"{OpenTelemetryConsts.GenAI.Chat} {model}"; + + ActivityTagsCollection tags = new() + { + { OpenTelemetryConsts.GenAI.Operation.Name, OpenTelemetryConsts.GenAI.Chat }, + { OpenTelemetryConsts.GenAI.Provider.Name, providerName }, + }; + + if (!string.IsNullOrWhiteSpace(model)) + { + tags.Add(OpenTelemetryConsts.GenAI.Request.Model, model); + } + + if (!string.IsNullOrWhiteSpace(conversationId)) + { + tags.Add(OpenTelemetryConsts.GenAI.Conversation.Id, conversationId); + } + + if (!string.IsNullOrWhiteSpace(serverAddress)) + { + tags.Add(OpenTelemetryConsts.Server.Address, serverAddress); + if (serverPort is int port) + { + tags.Add(OpenTelemetryConsts.Server.Port, port); + } + } + + return ActivitySource.StartActivity(displayName, ActivityKind.Client, parentContext, tags); + } + + /// Starts an execute_tool activity for a tool call. + public Activity? StartExecuteToolActivity(string toolName, string toolCallId, string? description, object? arguments, ActivityContext parentContext = default) + { + if (!ActivitySource.HasListeners()) + { + return null; + } + + string displayName = $"{OpenTelemetryConsts.GenAI.ExecuteTool} {toolName}"; + + ActivityTagsCollection tags = new() + { + { OpenTelemetryConsts.GenAI.Operation.Name, OpenTelemetryConsts.GenAI.ExecuteTool }, + { OpenTelemetryConsts.GenAI.Tool.Name, toolName }, + { OpenTelemetryConsts.GenAI.Tool.CallId, toolCallId }, + { OpenTelemetryConsts.GenAI.Tool.Type, "function" }, + }; + + if (!string.IsNullOrWhiteSpace(description)) + { + tags.Add(OpenTelemetryConsts.GenAI.Tool.Description, description); + } + + if (EnableSensitiveData && arguments is not null) + { + tags.Add(OpenTelemetryConsts.GenAI.Tool.CallArguments, SerializeTagValue(arguments)); + } + + return ActivitySource.StartActivity(displayName, ActivityKind.Internal, parentContext, tags); + } + + /// Records token usage metrics at turn completion (so error.type can be included). + public void RecordTokenUsageMetrics( + int? inputTokens, + int? outputTokens, + string? requestModel, + string? responseModel, + string providerName, + string? serverAddress, + int? serverPort, + Exception? error, + string operationName) + { + if (!TokenUsageHistogram.Enabled) + { + return; + } + + TagList baseTags = CreateMetricTags(operationName, requestModel, responseModel, providerName, serverAddress, serverPort); + + if (error is not null) + { + baseTags.Add(OpenTelemetryConsts.Error.Type, error.GetType().Name); + } + + if (inputTokens is int inputCount) + { + TagList tags = baseTags; + tags.Add(OpenTelemetryConsts.GenAI.Token.Type, OpenTelemetryConsts.TokenTypeInput); + TokenUsageHistogram.Record(inputCount, tags); + } + + if (outputTokens is int outputCount) + { + TagList tags = baseTags; + tags.Add(OpenTelemetryConsts.GenAI.Token.Type, OpenTelemetryConsts.TokenTypeOutput); + TokenUsageHistogram.Record(outputCount, tags); + } + } + + /// Records operation duration metric. + public void RecordOperationDuration( + double durationSeconds, + string? requestModel, + string? responseModel, + string providerName, + string? serverAddress, + int? serverPort, + Exception? error, + string operationName) + { + if (OperationDurationHistogram.Enabled) + { + TagList tags = CreateMetricTags(operationName, requestModel, responseModel, providerName, serverAddress, serverPort); + + if (error is not null) + { + tags.Add(OpenTelemetryConsts.Error.Type, error.GetType().Name); + } + + OperationDurationHistogram.Record(durationSeconds, tags); + } + } + + public void RecordTimeToFirstChunk( + double durationSeconds, + string? requestModel, + string? responseModel, + string providerName, + string? serverAddress, + int? serverPort) + { + if (TimeToFirstChunkHistogram.Enabled) + { + TimeToFirstChunkHistogram.Record( + durationSeconds, + CreateMetricTags( + OpenTelemetryConsts.GenAI.Chat, + requestModel, + responseModel, + providerName, + serverAddress, + serverPort)); + } + } + + public void RecordTimePerOutputChunk( + double durationSeconds, + string? requestModel, + string? responseModel, + string providerName, + string? serverAddress, + int? serverPort) + { + if (TimePerOutputChunkHistogram.Enabled) + { + TimePerOutputChunkHistogram.Record( + durationSeconds, + CreateMetricTags( + OpenTelemetryConsts.GenAI.Chat, + requestModel, + responseModel, + providerName, + serverAddress, + serverPort)); + } + } + + public void SetExecuteToolResult(Activity? activity, object? result) + { + if (EnableSensitiveData && + result is not null && + activity is { IsAllDataRequested: true }) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Tool.CallResult, SerializeTagValue(result)); + } + } + + /// Records an error on an activity. + public static void RecordError(Activity? activity, Exception error) + { + activity? + .SetTag(OpenTelemetryConsts.Error.Type, error.GetType().Name) + .SetStatus(ActivityStatusCode.Error, error.Message); + } + + /// + /// Normalizes a provider type string to its OpenTelemetry semantic convention name. + /// Only the providers supported by BYOK are mapped; all others default to "github". + /// + private static string NormalizeProviderName(string? providerType) + { + return providerType?.Trim().ToLowerInvariant() switch + { + "anthropic" => "anthropic", + "azure" => "azure.ai.openai", + "openai" => "openai", + _ => OpenTelemetryConsts.DefaultProviderName, + }; + } + + private static (string? Address, int? Port) ParseServerAddress(string? baseUrl) + { + if (!string.IsNullOrWhiteSpace(baseUrl) && + Uri.TryCreate(baseUrl, UriKind.Absolute, out var uri) && + !string.IsNullOrWhiteSpace(uri.Host)) + { + return (uri.Host, uri.Port > 0 ? uri.Port : null); + } + + return (null, null); + } + + private static TagList CreateMetricTags( + string operationName, + string? requestModel, + string? responseModel, + string providerName, + string? serverAddress, + int? serverPort) + { + TagList tags = default; + tags.Add(OpenTelemetryConsts.GenAI.Operation.Name, operationName); + tags.Add(OpenTelemetryConsts.GenAI.Provider.Name, providerName); + + if (!string.IsNullOrWhiteSpace(requestModel)) + { + tags.Add(OpenTelemetryConsts.GenAI.Request.Model, requestModel); + } + + if (!string.IsNullOrWhiteSpace(responseModel)) + { + tags.Add(OpenTelemetryConsts.GenAI.Response.Model, responseModel); + } + + if (!string.IsNullOrWhiteSpace(serverAddress)) + { + tags.Add(OpenTelemetryConsts.Server.Address, serverAddress); + if (serverPort is int port) + { + tags.Add(OpenTelemetryConsts.Server.Port, port); + } + } + + return tags; + } + + private static string SerializeTagValue(object value) + { + return value switch + { + JsonElement jsonElement => jsonElement.GetRawText(), + string text => text, + bool boolean => boolean ? "true" : "false", + float number => number.ToString("R", CultureInfo.InvariantCulture), + double number => number.ToString("R", CultureInfo.InvariantCulture), + IFormattable formattable => formattable.ToString(null, CultureInfo.InvariantCulture), + _ => value.ToString() ?? string.Empty, + }; + } + + public void Dispose() + { + ActivitySource.Dispose(); + _meter.Dispose(); + } + + /// + /// Tracks telemetry state for a single session, managing the invoke_agent span + /// lifecycle across Send / DispatchEvent / turn-completion boundaries. + /// All public methods are thread-safe. + /// + public sealed class AgentTurnTracker + { + private readonly CopilotTelemetry _telemetry; + private readonly string _sessionId; + private readonly object _lock = new(); + + private readonly string? _requestModel; + private readonly string? _agentName; + private readonly string? _agentDescription; + private readonly string? _systemInstructionsJson; + private readonly string? _toolDefinitionsJson; + private readonly bool _isStreaming; + + // Per-invoke_agent mutable state; guarded by _lock. + private Activity? _agentActivity; + private long _agentTimestamp; + private List? _agentInputMessages; + private List? _agentOutputMessages; + private Dictionary? _activeSubagents; + + // Pending tool call parent contexts; guarded by _lock. + // Tool execute_tool spans are created in OnToolCall (not from ToolExecutionStartEvent) + // because OnToolCall is where the AIFunction actually runs — creating the Activity + // there makes it Activity.Current during execution so child spans parent correctly, + // and the span measures actual SDK-side tool execution time. + // ToolExecutionStartEvent carries ParentToolCallId (which identifies the owning + // subagent), so we stash the correct parent context here for OnToolCall to consume. + private Dictionary? _pendingToolParents; + + // Tracks tool call IDs that originated from MCP server tools, mapping to + // the MCP server name, so that ToolExecutionCompleteEvent can emit the + // correct message type with the right server_tool_call_response discriminator. + private Dictionary? _serverToolCallIds; + + // Agent-level accumulated usage; guarded by _lock. + // Tracks totals across all chat turns for the invoke_agent span. + private string? _agentResponseModel; + private string? _agentResponseId; + private int _agentTotalInputTokens; + private int _agentTotalOutputTokens; + private int _agentTotalCacheReadTokens; + private int _agentTotalCacheCreationTokens; + private double _agentTotalCost; + private double _agentTotalAiu; + + // Per-chat-turn mutable state; guarded by _lock. + // Reset on each AssistantTurnStartEvent. + private Activity? _turnActivity; + private long _turnTimestamp; + private bool _firstOutputChunkRecorded; + private TimeSpan _lastOutputChunkElapsed; + private string? _responseModel; + private string? _responseId; + private int _inputTokens; + private int _outputTokens; + private int _cacheReadTokens; + private int _cacheCreationTokens; + private List? _inputMessages; + private List? _outputMessages; + + // Copilot-specific per-turn attributes from AssistantUsageData. + private double? _turnCost; + private double? _turnServerDuration; + private string? _turnInitiator; + private double? _turnAiu; + private string? _turnId; + private string? _turnInteractionId; + + internal AgentTurnTracker( + CopilotTelemetry telemetry, + string sessionId, + string? model, + ProviderConfig? provider, + SystemMessageConfig? systemMessage, + ICollection? tools, + bool streaming, + string? agentName = null, + string? agentDescription = null) + { + _telemetry = telemetry; + _sessionId = sessionId; + _requestModel = model; + _agentName = agentName; + _agentDescription = agentDescription; + ProviderName = NormalizeProviderName(provider?.Type); + (ServerAddress, ServerPort) = ParseServerAddress(provider?.BaseUrl); + _systemInstructionsJson = BuildSystemInstructionsJson(systemMessage); + _toolDefinitionsJson = BuildToolDefinitionsJson(tools); + _isStreaming = streaming; + } + + internal string ProviderName { get; } + + internal string? ServerAddress { get; } + + internal int? ServerPort { get; } + + /// Gets the of the current invoke_agent activity, if any. + internal ActivityContext GetActivityContext() + { + lock (_lock) + { + return _agentActivity?.Context ?? default; + } + } + + /// + /// Gets the parent for a tool call, which may differ + /// from the root invoke_agent when a subagent initiated the tool call. + /// Consumes the stored context (one-time use). + /// + internal ActivityContext GetToolCallParentContext(string toolCallId) + { + lock (_lock) + { + if (_pendingToolParents is not null && _pendingToolParents.Remove(toolCallId, out var ctx)) + { + return ctx; + } + + return _agentActivity?.Context ?? default; + } + } + + /// + /// Closes any active spans with an error status. Called when the session is disposed + /// while a turn may still be in progress, ensuring spans are not orphaned. + /// + internal void CompleteOnDispose() + { + lock (_lock) + { + if (_agentActivity is not null) + { + var disposeError = new ObjectDisposedException("Session disposed while agent turn was in progress"); + CompleteChatTurnLocked(disposeError); + CompleteAgentTurnLocked(disposeError); + } + } + } + + /// + /// Called at the start of each SendAsync. Starts a new invoke_agent span + /// if one isn't already active, and records a user message event. + /// + internal void BeginSend(string? prompt) + { + lock (_lock) + { + if (_agentActivity is null) + { + _agentActivity = _telemetry.StartInvokeAgentActivity( + _sessionId, + _requestModel, + ProviderName, + ServerAddress, + ServerPort, + _agentName, + _agentDescription); + _agentTimestamp = Stopwatch.GetTimestamp(); + _agentInputMessages = []; + } + + // Agent-level input = what the caller sent (all user prompts). + if (_agentInputMessages is not null && !string.IsNullOrWhiteSpace(prompt)) + { + _agentInputMessages.Add(new("user", [new("text", Content: prompt)])); + } + + // Record the user prompt as an input message on the first chat + // turn's message list when that turn starts. Stash it here for now. + _pendingUserPrompt = prompt; + } + } + + private string? _pendingUserPrompt; + + /// + /// Processes a dispatched session event, enriching the current span and + /// completing the turn on idle/error events. + /// + internal void ProcessEvent(SessionEvent sessionEvent) + { + lock (_lock) + { + // Route subagent events by ParentToolCallId. + var parentToolCallId = GetParentToolCallId(sessionEvent); + if (!string.IsNullOrEmpty(parentToolCallId)) + { + if (_activeSubagents?.TryGetValue(parentToolCallId, out var subagentState) == true) + { + ProcessSubagentEventLocked(subagentState, sessionEvent); + } + + return; + } + + // Handle subagent lifecycle events. + switch (sessionEvent) + { + case SubagentStartedEvent started: + BeginSubagentLocked(started); + return; + case SubagentCompletedEvent completed when completed.Data is not null: + CompleteSubagentLocked(completed.Data.ToolCallId, error: null); + return; + case SubagentFailedEvent failed when failed.Data is not null: + CompleteSubagentLocked(failed.Data.ToolCallId, + new InvalidOperationException($"Subagent '{failed.Data.AgentName}' failed: {failed.Data.Error}")); + return; + } + + // Record chunk timing for main agent events during a turn. + RecordOutputChunkMetricLocked(); + + // Per-turn event processing (writes to the chat child span). + if (_turnActivity is not null) + { + switch (sessionEvent) + { + case AssistantMessageEvent messageEvent: + { + List parts = []; + if (!string.IsNullOrWhiteSpace(messageEvent.Data?.ReasoningText)) + { + parts.Add(new("reasoning", Content: messageEvent.Data.ReasoningText)); + } + + if (!string.IsNullOrWhiteSpace(messageEvent.Data?.Content)) + { + parts.Add(new("text", Content: messageEvent.Data.Content)); + } + + if (parts.Count > 0) + { + _outputMessages?.Add(new("assistant", parts)); + } + + break; + } + + case AssistantUsageEvent usageEvent: + _responseModel = usageEvent.Data.Model; + if (!string.IsNullOrWhiteSpace(usageEvent.Data.ApiCallId)) + { + _responseId = usageEvent.Data.ApiCallId; + } + else if (!string.IsNullOrWhiteSpace(usageEvent.Data.ProviderCallId)) + { + _responseId = usageEvent.Data.ProviderCallId; + } + if (usageEvent.Data.InputTokens is double inTok) + { + _inputTokens += (int)inTok; + } + if (usageEvent.Data.OutputTokens is double outTok) + { + _outputTokens += (int)outTok; + } + if (usageEvent.Data.CacheReadTokens is double cacheRead) + { + _cacheReadTokens += (int)cacheRead; + } + if (usageEvent.Data.CacheWriteTokens is double cacheWrite) + { + _cacheCreationTokens += (int)cacheWrite; + } + + // Copilot-specific vendor attributes + if (usageEvent.Data.Cost is double cost) + { + _turnCost = (_turnCost ?? 0) + cost; + } + if (usageEvent.Data.Duration is double dur) + { + _turnServerDuration = (_turnServerDuration ?? 0) + dur; + } + if (!string.IsNullOrWhiteSpace(usageEvent.Data.Initiator)) + { + _turnInitiator = usageEvent.Data.Initiator; + } + if (usageEvent.Data.CopilotUsage is { } copilotUsage) + { + _turnAiu = (_turnAiu ?? 0) + copilotUsage.TotalNanoAiu; + } + break; + + case SessionModelChangeEvent modelChangeEvent: + _responseModel = modelChangeEvent.Data.NewModel; + break; + + case ToolExecutionStartEvent toolStartEvent: + { + if (toolStartEvent.Data is { } startData) + { + var isServerTool = startData.McpServerName is not null; + if (isServerTool && startData.ToolCallId is not null) + { + _serverToolCallIds ??= []; + _serverToolCallIds[startData.ToolCallId] = startData.McpServerName!; + } + + _outputMessages?.Add(new("assistant", + [ + new(isServerTool ? "server_tool_call" : "tool_call", + Id: startData.ToolCallId, + Name: startData.ToolName, + Arguments: startData.Arguments, + McpServerName: startData.McpServerName) + ])); + + // For main agent tool calls, parent is the root invoke_agent. + if (_agentActivity is not null && startData.ToolCallId is not null) + { + _pendingToolParents ??= []; + _pendingToolParents[startData.ToolCallId] = _agentActivity.Context; + } + } + + break; + } + + case ToolExecutionCompleteEvent toolCompleteEvent: + { + if (toolCompleteEvent.Data is { } toolData) + { + var resultContent = toolData.Result?.Content ?? toolData.Error?.Message; + string? serverName = null; + var isServerTool = _serverToolCallIds is not null + && _serverToolCallIds.Remove(toolData.ToolCallId, out serverName); + + _inputMessages?.Add(new("tool", + [ + new(isServerTool ? "server_tool_call_response" : "tool_call_response", + Id: toolData.ToolCallId, + Response: resultContent, + McpServerName: serverName) + ])); + } + + break; + } + } + } + + // Copilot-specific lifecycle events emitted as span events on the + // current activity (chat turn if active, otherwise invoke_agent). + { + var target = _turnActivity ?? _agentActivity; + if (target is not null) + { + switch (sessionEvent) + { + case SessionTruncationEvent { Data: { } trunc }: + { + ActivityTagsCollection truncTags = []; + if (trunc.TokenLimit != 0) + { + truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.TokenLimit, (long)trunc.TokenLimit); + } + if (trunc.PreTruncationTokensInMessages != 0) + { + truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.PreTokens, (long)trunc.PreTruncationTokensInMessages); + } + if (trunc.PostTruncationTokensInMessages != 0) + { + truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.PostTokens, (long)trunc.PostTruncationTokensInMessages); + } + if (trunc.PreTruncationMessagesLength != 0) + { + truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.PreMessages, (long)trunc.PreTruncationMessagesLength); + } + if (trunc.PostTruncationMessagesLength != 0) + { + truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.PostMessages, (long)trunc.PostTruncationMessagesLength); + } + if (trunc.TokensRemovedDuringTruncation != 0) + { + truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.TokensRemoved, (long)trunc.TokensRemovedDuringTruncation); + } + if (trunc.MessagesRemovedDuringTruncation != 0) + { + truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.MessagesRemoved, (long)trunc.MessagesRemovedDuringTruncation); + } + if (trunc.PerformedBy is not null) + { + truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.PerformedBy, trunc.PerformedBy); + } + target.AddEvent(new(OpenTelemetryConsts.GenAI.CopilotEvent.SessionTruncation, tags: truncTags)); + break; + } + + case SessionCompactionStartEvent: + target.AddEvent(new(OpenTelemetryConsts.GenAI.CopilotEvent.SessionCompactionStart)); + break; + + case SessionCompactionCompleteEvent { Data: { } compaction }: + { + ActivityTagsCollection tags = new() + { + { OpenTelemetryConsts.GenAI.CopilotEvent.Success, compaction.Success }, + }; + + if (_telemetry.EnableSensitiveData && compaction.Error is not null) + { + tags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.Message, compaction.Error); + } + + if (compaction.PreCompactionTokens is { } preTokens) + { + tags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.PreTokens, (long)preTokens); + } + + if (compaction.PostCompactionTokens is { } postTokens) + { + tags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.PostTokens, (long)postTokens); + } + + if (compaction.TokensRemoved is { } tokensRemoved) + { + tags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.TokensRemoved, (long)tokensRemoved); + } + + if (compaction.MessagesRemoved is { } messagesRemoved) + { + tags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.MessagesRemoved, (long)messagesRemoved); + } + + target.AddEvent(new(OpenTelemetryConsts.GenAI.CopilotEvent.SessionCompactionComplete, tags: tags)); + break; + } + + case SkillInvokedEvent { Data: { } skill }: + { + ActivityTagsCollection tags = new() + { + { OpenTelemetryConsts.GenAI.CopilotEvent.SkillName, skill.Name }, + { OpenTelemetryConsts.GenAI.CopilotEvent.SkillPath, skill.Path }, + }; + + if (skill.PluginName is not null) + { + tags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.SkillPluginName, skill.PluginName); + } + + if (skill.PluginVersion is not null) + { + tags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.SkillPluginVersion, skill.PluginVersion); + } + + if (_telemetry.EnableSensitiveData) + { + tags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.SkillContent, skill.Content); + } + + target.AddEvent(new(OpenTelemetryConsts.GenAI.CopilotEvent.SkillInvoked, tags: tags)); + break; + } + } + } + } + + // Lifecycle events that start/end turns or the overall agent operation. + switch (sessionEvent) + { + case AssistantTurnStartEvent turnStartEvent: + BeginChatTurnLocked(); + if (turnStartEvent.Data is { } turnStartData) + { + _turnId = turnStartData.TurnId; + _turnInteractionId = turnStartData.InteractionId; + } + break; + + case AssistantTurnEndEvent: + CompleteChatTurnLocked(error: null); + break; + + case SessionIdleEvent: + CompleteChatTurnLocked(error: null); + CompleteAgentTurnLocked(error: null); + break; + + case SessionErrorEvent errorEvent: + var ex = new InvalidOperationException($"Session error: {errorEvent.Data?.Message ?? "unknown error"}"); + CompleteChatTurnLocked(ex); + CompleteAgentTurnLocked(ex); + break; + } + } + } + + /// + /// Completes the current turn with an error (e.g. from a failed SendAsync). + /// + internal void CompleteTurnWithError(Exception error) + { + lock (_lock) + { + CompleteChatTurnLocked(error); + CompleteAgentTurnLocked(error); + } + } + + /// + /// Starts a new chat child span for an LLM turn. + /// Caller must hold . + /// + private void BeginChatTurnLocked() + { + Debug.Assert(Monitor.IsEntered(_lock)); + + // If there's already an active turn, complete it first (shouldn't normally happen). + CompleteChatTurnLocked(error: null); + + _responseModel = null; + _responseId = null; + _inputTokens = 0; + _outputTokens = 0; + _cacheReadTokens = 0; + _cacheCreationTokens = 0; + _firstOutputChunkRecorded = false; + _lastOutputChunkElapsed = TimeSpan.Zero; + _inputMessages = []; + _outputMessages = []; + _turnCost = null; + _turnServerDuration = null; + _turnInitiator = null; + _turnAiu = null; + _turnId = null; + _turnInteractionId = null; + + // Add stashed user prompt as input message for the first turn. + if (!string.IsNullOrWhiteSpace(_pendingUserPrompt)) + { + _inputMessages.Add(new("user", [new("text", Content: _pendingUserPrompt)])); + _pendingUserPrompt = null; + } + + var parentContext = _agentActivity?.Context ?? default; + _turnActivity = _telemetry.StartChatActivity( + _requestModel, + ProviderName, + ServerAddress, + ServerPort, + parentContext, + _sessionId); + + _turnTimestamp = Stopwatch.GetTimestamp(); + } + + /// + /// Completes the current chat child span with per-turn attributes and metrics. + /// Caller must hold . + /// + private void CompleteChatTurnLocked(Exception? error) + { + Debug.Assert(Monitor.IsEntered(_lock)); + + var activity = _turnActivity; + if (activity is null) + { + return; + } + + var timestamp = _turnTimestamp; + var inputMessages = _inputMessages; + var outputMessages = _outputMessages; + var responseModel = _responseModel; + var responseId = _responseId; + var inputTokens = _inputTokens; + var outputTokens = _outputTokens; + var cacheReadTokens = _cacheReadTokens; + var cacheCreationTokens = _cacheCreationTokens; + var turnCost = _turnCost; + var turnServerDuration = _turnServerDuration; + var turnInitiator = _turnInitiator; + var turnAiu = _turnAiu; + var turnId = _turnId; + var turnInteractionId = _turnInteractionId; + + _turnActivity = null; + _turnTimestamp = 0; + _inputMessages = null; + _outputMessages = null; + _responseModel = null; + _responseId = null; + _inputTokens = 0; + _outputTokens = 0; + _cacheReadTokens = 0; + _cacheCreationTokens = 0; + _turnCost = null; + _turnServerDuration = null; + _turnInitiator = null; + _turnAiu = null; + _turnId = null; + _turnInteractionId = null; + + if (error is not null) + { + RecordError(activity, error); + } + + var finishReason = error is not null ? "error" : "stop"; + activity.SetTag(OpenTelemetryConsts.GenAI.Response.FinishReasons, new[] { finishReason }); + + // Agent-level output = only the final turn's output (what the agent + // returns to the caller). Each turn overwrites; the last one wins. + if (outputMessages is { Count: > 0 }) + { + _agentOutputMessages = []; + foreach (var msg in outputMessages) + { + _agentOutputMessages.Add(msg with { FinishReason = finishReason }); + } + } + + // Accumulate agent-level usage across turns. + if (responseModel is not null) + { + _agentResponseModel = responseModel; + } + if (responseId is not null) + { + _agentResponseId = responseId; + } + _agentTotalInputTokens += inputTokens; + _agentTotalOutputTokens += outputTokens; + _agentTotalCacheReadTokens += cacheReadTokens; + _agentTotalCacheCreationTokens += cacheCreationTokens; + if (turnCost is double c) + { + _agentTotalCost += c; + } + if (turnAiu is double a) + { + _agentTotalAiu += a; + } + + // Set usage-related span attributes for this LLM turn + if (activity.IsAllDataRequested) + { + if (responseModel is not null) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Response.Model, responseModel); + } + if (responseId is not null) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Response.Id, responseId); + } + if (inputTokens > 0) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Usage.InputTokens, inputTokens); + } + if (outputTokens > 0) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Usage.OutputTokens, outputTokens); + } + if (cacheReadTokens > 0) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheReadInputTokens, cacheReadTokens); + } + if (cacheCreationTokens > 0) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheCreationInputTokens, cacheCreationTokens); + } + + // Copilot vendor-prefixed attributes on chat spans + if (turnCost is not null) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.Cost, turnCost.Value); + } + if (turnServerDuration is not null) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.ServerDuration, turnServerDuration.Value); + } + if (turnInitiator is not null) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.Initiator, turnInitiator); + } + if (turnAiu is not null) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.Aiu, turnAiu.Value); + } + if (turnId is not null) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.TurnId, turnId); + } + if (turnInteractionId is not null) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.InteractionId, turnInteractionId); + } + } + + // Set input/output message content as span attributes (sensitive) + if (_telemetry.EnableSensitiveData) + { + var inputMessagesJson = BuildMessagesJson(inputMessages); + if (!string.IsNullOrWhiteSpace(inputMessagesJson)) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Input.Messages, inputMessagesJson); + } + + var outputMessagesJson = BuildMessagesJson(outputMessages, finishReason: finishReason); + if (!string.IsNullOrWhiteSpace(outputMessagesJson)) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Output.Messages, outputMessagesJson); + } + } + + // Token usage metrics (per-turn) + _telemetry.RecordTokenUsageMetrics( + inputTokens > 0 ? inputTokens : null, + outputTokens > 0 ? outputTokens : null, + _requestModel, + responseModel, + ProviderName, + ServerAddress, + ServerPort, + error, + OpenTelemetryConsts.GenAI.Chat); + + // Per-turn operation duration + if (_telemetry.OperationDurationHistogram.Enabled) + { + _telemetry.RecordOperationDuration( + Stopwatch.GetElapsedTime(timestamp).TotalSeconds, + _requestModel, + responseModel, + ProviderName, + ServerAddress, + ServerPort, + error: error, + operationName: OpenTelemetryConsts.GenAI.Chat); + } + + _firstOutputChunkRecorded = false; + _lastOutputChunkElapsed = TimeSpan.Zero; + activity.Dispose(); + } + + /// + /// Completes the invoke_agent span and records overall operation duration. + /// Caller must hold . + /// + private void CompleteAgentTurnLocked(Exception? error) + { + Debug.Assert(Monitor.IsEntered(_lock)); + + var activity = _agentActivity; + if (activity is null) + { + return; + } + + var timestamp = _agentTimestamp; + + _agentActivity = null; + _agentTimestamp = 0; + _pendingUserPrompt = null; + var agentInputMessages = _agentInputMessages; + var agentOutputMessages = _agentOutputMessages; + _agentInputMessages = null; + _agentOutputMessages = null; + + // Complete any remaining subagents before closing the parent. + if (_activeSubagents is { Count: > 0 }) + { + foreach (var key in _activeSubagents.Keys.ToList()) + { + CompleteSubagentLocked(key, error); + } + } + + _activeSubagents = null; + _pendingToolParents = null; + _serverToolCallIds = null; + + if (error is not null) + { + RecordError(activity, error); + } + + var finishReason = error is not null ? "error" : "stop"; + activity.SetTag(OpenTelemetryConsts.GenAI.Response.FinishReasons, new[] { finishReason }); + + // Set accumulated usage across all chat turns on the invoke_agent span. + var agentResponseModel = _agentResponseModel; + var agentResponseId = _agentResponseId; + var agentTotalInputTokens = _agentTotalInputTokens; + var agentTotalOutputTokens = _agentTotalOutputTokens; + var agentTotalCacheReadTokens = _agentTotalCacheReadTokens; + var agentTotalCacheCreationTokens = _agentTotalCacheCreationTokens; + var agentTotalCost = _agentTotalCost; + var agentTotalAiu = _agentTotalAiu; + _agentResponseModel = null; + _agentResponseId = null; + _agentTotalInputTokens = 0; + _agentTotalOutputTokens = 0; + _agentTotalCacheReadTokens = 0; + _agentTotalCacheCreationTokens = 0; + _agentTotalCost = 0; + _agentTotalAiu = 0; + + if (activity.IsAllDataRequested) + { + if (agentResponseModel is not null) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Response.Model, agentResponseModel); + } + if (agentResponseId is not null) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Response.Id, agentResponseId); + } + if (agentTotalInputTokens > 0) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Usage.InputTokens, agentTotalInputTokens); + } + if (agentTotalOutputTokens > 0) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Usage.OutputTokens, agentTotalOutputTokens); + } + if (agentTotalCacheReadTokens > 0) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheReadInputTokens, agentTotalCacheReadTokens); + } + if (agentTotalCacheCreationTokens > 0) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheCreationInputTokens, agentTotalCacheCreationTokens); + } + + // Copilot vendor-prefixed attributes on invoke_agent span + if (agentTotalCost > 0) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.Cost, agentTotalCost); + } + if (agentTotalAiu > 0) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.Aiu, agentTotalAiu); + } + } + + // Agent-level input = caller's message; output = agent's final response. + if (_telemetry.EnableSensitiveData) + { + var inputJson = BuildMessagesJson(agentInputMessages); + if (inputJson is not null) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Input.Messages, inputJson); + } + + var outputJson = BuildMessagesJson(agentOutputMessages); + if (outputJson is not null) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Output.Messages, outputJson); + } + + if (!string.IsNullOrWhiteSpace(_systemInstructionsJson)) + { + activity.SetTag(OpenTelemetryConsts.GenAI.SystemInstructions, _systemInstructionsJson); + } + } + + if (!string.IsNullOrWhiteSpace(_toolDefinitionsJson)) + { + activity.SetTag(OpenTelemetryConsts.GenAI.Tool.Definitions, _toolDefinitionsJson); + } + + if (_telemetry.OperationDurationHistogram.Enabled) + { + _telemetry.RecordOperationDuration( + Stopwatch.GetElapsedTime(timestamp).TotalSeconds, + _requestModel, + agentResponseModel, + ProviderName, + ServerAddress, + ServerPort, + error: error, + operationName: OpenTelemetryConsts.GenAI.InvokeAgent); + } + + activity.Dispose(); + } + + /// + /// Records streaming chunk timing metrics. + /// Caller must hold . + /// + private void RecordOutputChunkMetricLocked() + { + Debug.Assert(Monitor.IsEntered(_lock)); + + if (!_isStreaming) + { + return; + } + + var elapsed = Stopwatch.GetElapsedTime(_turnTimestamp); + + if (!_firstOutputChunkRecorded) + { + _firstOutputChunkRecorded = true; + _lastOutputChunkElapsed = elapsed; + _telemetry.RecordTimeToFirstChunk( + elapsed.TotalSeconds, + _requestModel, + null, // response model not yet known during streaming + ProviderName, + ServerAddress, + ServerPort); + return; + } + + var delta = elapsed - _lastOutputChunkElapsed; + _lastOutputChunkElapsed = elapsed; + _telemetry.RecordTimePerOutputChunk( + delta.TotalSeconds, + _requestModel, + null, // response model not yet known during streaming + ProviderName, + ServerAddress, + ServerPort); + } + + /// + /// Extracts ParentToolCallId from events that carry it. + /// A non-null/non-empty value indicates the event belongs to a subagent. + /// + private static string? GetParentToolCallId(SessionEvent evt) => evt switch + { + AssistantUsageEvent e => e.Data?.ParentToolCallId, + AssistantMessageEvent e => e.Data?.ParentToolCallId, + AssistantMessageDeltaEvent e => e.Data?.ParentToolCallId, + ToolExecutionStartEvent e => e.Data?.ParentToolCallId, + ToolExecutionCompleteEvent e => e.Data?.ParentToolCallId, + _ => null, + }; + + /// + /// Creates a nested invoke_agent + chat span pair for a subagent. + /// Caller must hold . + /// + private void BeginSubagentLocked(SubagentStartedEvent started) + { + Debug.Assert(Monitor.IsEntered(_lock)); + + var data = started.Data; + if (data is null) + { + return; + } + + var parentContext = _agentActivity?.Context ?? default; + var invokeActivity = _telemetry.StartInvokeAgentActivity( + _sessionId, + _requestModel, + ProviderName, + ServerAddress, + ServerPort, + agentName: data.AgentName, + agentDescription: data.AgentDescription, + parentContext: parentContext); + + if (invokeActivity is null) + { + return; + } + + var chatActivity = _telemetry.StartChatActivity( + _requestModel, + ProviderName, + ServerAddress, + ServerPort, + invokeActivity.Context, + _sessionId); + + var state = new SubagentState + { + InvokeAgentActivity = invokeActivity, + InvokeAgentTimestamp = Stopwatch.GetTimestamp(), + ChatActivity = chatActivity, + AgentName = data.AgentName, + }; + + _activeSubagents ??= new(StringComparer.Ordinal); + _activeSubagents[data.ToolCallId] = state; + } + + /// + /// Routes an event to its owning subagent's spans. + /// Caller must hold . + /// + private void ProcessSubagentEventLocked(SubagentState subagent, SessionEvent sessionEvent) + { + Debug.Assert(Monitor.IsEntered(_lock)); + + switch (sessionEvent) + { + case AssistantUsageEvent usageEvent: + subagent.ResponseModel = usageEvent.Data.Model; + + // Update response model on both spans if the subagent is using + // a different model than what was set at span creation time. + if (!string.IsNullOrWhiteSpace(usageEvent.Data.Model)) + { + subagent.ChatActivity?.SetTag(OpenTelemetryConsts.GenAI.Response.Model, usageEvent.Data.Model); + subagent.InvokeAgentActivity?.SetTag(OpenTelemetryConsts.GenAI.Response.Model, usageEvent.Data.Model); + } + + if (!string.IsNullOrWhiteSpace(usageEvent.Data.ApiCallId)) + { + subagent.ResponseId = usageEvent.Data.ApiCallId; + } + else if (!string.IsNullOrWhiteSpace(usageEvent.Data.ProviderCallId)) + { + subagent.ResponseId = usageEvent.Data.ProviderCallId; + } + + if (usageEvent.Data.InputTokens is double inTok) + { + subagent.InputTokens += (int)inTok; + } + + if (usageEvent.Data.OutputTokens is double outTok) + { + subagent.OutputTokens += (int)outTok; + } + + if (usageEvent.Data.CacheReadTokens is double cacheRead) + { + subagent.CacheReadTokens += (int)cacheRead; + } + + if (usageEvent.Data.CacheWriteTokens is double cacheWrite) + { + subagent.CacheCreationTokens += (int)cacheWrite; + } + + break; + + case AssistantMessageEvent messageEvent: + { + List parts = []; + if (!string.IsNullOrWhiteSpace(messageEvent.Data?.ReasoningText)) + { + parts.Add(new("reasoning", Content: messageEvent.Data.ReasoningText)); + } + + if (!string.IsNullOrWhiteSpace(messageEvent.Data?.Content)) + { + parts.Add(new("text", Content: messageEvent.Data.Content)); + } + + if (parts.Count > 0) + { + subagent.OutputMessages.Add(new("assistant", parts)); + } + + break; + } + + case ToolExecutionStartEvent toolStartEvent: + { + if (toolStartEvent.Data is { } startData) + { + var isServerTool = startData.McpServerName is not null; + if (isServerTool && startData.ToolCallId is not null) + { + _serverToolCallIds ??= []; + _serverToolCallIds[startData.ToolCallId] = startData.McpServerName!; + } + + subagent.OutputMessages.Add(new("assistant", + [ + new(isServerTool ? "server_tool_call" : "tool_call", + Id: startData.ToolCallId, + Name: startData.ToolName, + Arguments: startData.Arguments, + McpServerName: startData.McpServerName) + ])); + + // Store the parent context for OnToolCall to use. + // For subagent tool calls, parent is the subagent's invoke_agent. + if (subagent.InvokeAgentActivity is not null && startData.ToolCallId is not null) + { + _pendingToolParents ??= []; + _pendingToolParents[startData.ToolCallId] = subagent.InvokeAgentActivity.Context; + } + } + + break; + } + + case ToolExecutionCompleteEvent toolCompleteEvent: + { + if (toolCompleteEvent.Data is { } toolData) + { + var resultContent = toolData.Result?.Content ?? toolData.Error?.Message; + string? serverName = null; + var isServerTool = _serverToolCallIds is not null + && _serverToolCallIds.Remove(toolData.ToolCallId, out serverName); + + subagent.InputMessages.Add(new("tool", + [ + new(isServerTool ? "server_tool_call_response" : "tool_call_response", + Id: toolData.ToolCallId, + Response: resultContent, + McpServerName: serverName) + ])); + } + + break; + } + } + } + + /// + /// Completes a subagent's chat and invoke_agent spans. + /// Caller must hold . + /// + private void CompleteSubagentLocked(string toolCallId, Exception? error) + { + Debug.Assert(Monitor.IsEntered(_lock)); + + if (_activeSubagents is null || !_activeSubagents.Remove(toolCallId, out var subagent)) + { + return; + } + + var finishReason = error is not null ? "error" : "stop"; + + // -- Complete the chat child span -- + var chatActivity = subagent.ChatActivity; + if (chatActivity is not null) + { + if (error is not null) + { + RecordError(chatActivity, error); + } + + chatActivity.SetTag(OpenTelemetryConsts.GenAI.Response.FinishReasons, new[] { finishReason }); + + if (chatActivity.IsAllDataRequested) + { + if (subagent.ResponseModel is not null) + { + chatActivity.SetTag(OpenTelemetryConsts.GenAI.Response.Model, subagent.ResponseModel); + } + + if (subagent.ResponseId is not null) + { + chatActivity.SetTag(OpenTelemetryConsts.GenAI.Response.Id, subagent.ResponseId); + } + + if (subagent.InputTokens > 0) + { + chatActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.InputTokens, subagent.InputTokens); + } + + if (subagent.OutputTokens > 0) + { + chatActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.OutputTokens, subagent.OutputTokens); + } + + if (subagent.CacheReadTokens > 0) + { + chatActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheReadInputTokens, subagent.CacheReadTokens); + } + + if (subagent.CacheCreationTokens > 0) + { + chatActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheCreationInputTokens, subagent.CacheCreationTokens); + } + } + + if (_telemetry.EnableSensitiveData) + { + if (subagent.InputMessages.Count > 0) + { + var inputJson = BuildMessagesJson(subagent.InputMessages); + if (inputJson is not null) + { + chatActivity.SetTag(OpenTelemetryConsts.GenAI.Input.Messages, inputJson); + } + } + + if (subagent.OutputMessages.Count > 0) + { + var outputJson = BuildMessagesJson(subagent.OutputMessages, finishReason: finishReason); + if (outputJson is not null) + { + chatActivity.SetTag(OpenTelemetryConsts.GenAI.Output.Messages, outputJson); + } + } + } + + _telemetry.RecordTokenUsageMetrics( + subagent.InputTokens > 0 ? subagent.InputTokens : null, + subagent.OutputTokens > 0 ? subagent.OutputTokens : null, + subagent.ResponseModel ?? _requestModel, + subagent.ResponseModel, + ProviderName, + ServerAddress, + ServerPort, + error, + OpenTelemetryConsts.GenAI.Chat); + + chatActivity.Dispose(); + } + + // -- Complete the invoke_agent span -- + var invokeActivity = subagent.InvokeAgentActivity; + if (invokeActivity is not null) + { + if (error is not null) + { + RecordError(invokeActivity, error); + } + + invokeActivity.SetTag(OpenTelemetryConsts.GenAI.Response.FinishReasons, new[] { finishReason }); + + if (invokeActivity.IsAllDataRequested) + { + if (subagent.ResponseModel is not null) + { + invokeActivity.SetTag(OpenTelemetryConsts.GenAI.Response.Model, subagent.ResponseModel); + } + + if (subagent.ResponseId is not null) + { + invokeActivity.SetTag(OpenTelemetryConsts.GenAI.Response.Id, subagent.ResponseId); + } + + if (subagent.InputTokens > 0) + { + invokeActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.InputTokens, subagent.InputTokens); + } + + if (subagent.OutputTokens > 0) + { + invokeActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.OutputTokens, subagent.OutputTokens); + } + + if (subagent.CacheReadTokens > 0) + { + invokeActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheReadInputTokens, subagent.CacheReadTokens); + } + + if (subagent.CacheCreationTokens > 0) + { + invokeActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheCreationInputTokens, subagent.CacheCreationTokens); + } + } + + if (_telemetry.EnableSensitiveData && subagent.OutputMessages.Count > 0) + { + var outputJson = BuildMessagesJson( + subagent.OutputMessages.Select(m => m with { FinishReason = finishReason }).ToList()); + if (outputJson is not null) + { + invokeActivity.SetTag(OpenTelemetryConsts.GenAI.Output.Messages, outputJson); + } + } + + if (_telemetry.OperationDurationHistogram.Enabled) + { + _telemetry.RecordOperationDuration( + Stopwatch.GetElapsedTime(subagent.InvokeAgentTimestamp).TotalSeconds, + subagent.ResponseModel ?? _requestModel, + subagent.ResponseModel, + ProviderName, + ServerAddress, + ServerPort, + error: error, + operationName: OpenTelemetryConsts.GenAI.InvokeAgent); + } + + invokeActivity.Dispose(); + } + } + + /// Tracks mutable state for an active subagent's spans. + private sealed class SubagentState + { + public Activity? InvokeAgentActivity; + public long InvokeAgentTimestamp; + public Activity? ChatActivity; + public string? AgentName; + public string? ResponseModel; + public string? ResponseId; + public int InputTokens; + public int OutputTokens; + public int CacheReadTokens; + public int CacheCreationTokens; + public List InputMessages = []; + public List OutputMessages = []; + } + + private static string? BuildMessagesJson(List? messages, string? finishReason = null) + { + if (messages is not { Count: > 0 }) + { + return null; + } + + using var stream = new MemoryStream(); + using (var writer = new Utf8JsonWriter(stream, s_jsonWriterOptions)) + { + writer.WriteStartArray(); + foreach (var message in messages) + { + if (message.Parts.Count == 0) + { + continue; + } + + writer.WriteStartObject(); + writer.WriteString("role", message.Role); + writer.WritePropertyName("parts"); + writer.WriteStartArray(); + foreach (var part in message.Parts) + { + writer.WriteStartObject(); + writer.WriteString("type", part.Type); + + switch (part.Type) + { + case "server_tool_call": + if (part.Id is not null) + { + writer.WriteString("id", part.Id); + } + + if (part.Name is not null) + { + writer.WriteString("name", part.Name); + } + + // Spec requires a nested server_tool_call object with a type discriminator. + // MCP tools use type "mcp" with a server_name field per the MEAI convention. + writer.WritePropertyName("server_tool_call"); + writer.WriteStartObject(); + writer.WriteString("type", "mcp"); + if (part.McpServerName is not null) + { + writer.WriteString("server_name", part.McpServerName); + } + if (part.Arguments is not null) + { + writer.WritePropertyName("arguments"); + WriteJsonValue(writer, part.Arguments); + } + + writer.WriteEndObject(); + break; + + case "server_tool_call_response": + if (part.Id is not null) + { + writer.WriteString("id", part.Id); + } + + // Spec requires a nested server_tool_call_response object with a type discriminator. + writer.WritePropertyName("server_tool_call_response"); + writer.WriteStartObject(); + writer.WriteString("type", "mcp"); + if (part.McpServerName is not null) + { + writer.WriteString("server_name", part.McpServerName); + } + if (part.Response is not null) + { + writer.WritePropertyName("response"); + WriteJsonValue(writer, part.Response); + } + + writer.WriteEndObject(); + break; + + default: + if (part.Content is not null) + { + writer.WriteString("content", part.Content); + } + + if (part.Id is not null) + { + writer.WriteString("id", part.Id); + } + + if (part.Name is not null) + { + writer.WriteString("name", part.Name); + } + + if (part.Arguments is not null) + { + writer.WritePropertyName("arguments"); + WriteJsonValue(writer, part.Arguments); + } + + if (part.Response is not null) + { + writer.WritePropertyName("response"); + WriteJsonValue(writer, part.Response); + } + + break; + } + + writer.WriteEndObject(); + } + + writer.WriteEndArray(); + var effectiveFinishReason = message.FinishReason ?? finishReason; + if (effectiveFinishReason is not null) + { + writer.WriteString("finish_reason", effectiveFinishReason); + } + + writer.WriteEndObject(); + } + + writer.WriteEndArray(); + writer.Flush(); + } + + return MemoryStreamToUtf8String(stream); + } + + private static void WriteJsonValue(Utf8JsonWriter writer, object value) + { + switch (value) + { + case JsonElement jsonElement: + jsonElement.WriteTo(writer); + break; + case string text: + writer.WriteStringValue(text); + break; + default: + writer.WriteStringValue(value.ToString()); + break; + } + } + + private sealed record OtelMsg( + string Role, + List Parts, + string? FinishReason = null); + + private sealed record OtelPart( + string Type, + string? Content = null, + string? Id = null, + string? Name = null, + object? Arguments = null, + object? Response = null, + string? McpServerName = null); + + internal static string? BuildSystemInstructionsJson(SystemMessageConfig? systemMessage) + { + if (string.IsNullOrWhiteSpace(systemMessage?.Content)) + { + return null; + } + + using var stream = new MemoryStream(); + using (var writer = new Utf8JsonWriter(stream, s_jsonWriterOptions)) + { + writer.WriteStartArray(); + writer.WriteStartObject(); + writer.WriteString("type", "text"); + writer.WriteString("content", systemMessage.Content); + writer.WriteEndObject(); + writer.WriteEndArray(); + writer.Flush(); + } + + return MemoryStreamToUtf8String(stream); + } + + internal static string? BuildToolDefinitionsJson(ICollection? tools) + { + if (tools is not { Count: > 0 }) + { + return null; + } + + using var stream = new MemoryStream(); + using (var writer = new Utf8JsonWriter(stream, s_jsonWriterOptions)) + { + writer.WriteStartArray(); + foreach (var tool in tools) + { + writer.WriteStartObject(); + writer.WriteString("type", "function"); + writer.WriteString("name", tool.Name); + + if (!string.IsNullOrWhiteSpace(tool.Description)) + { + writer.WriteString("description", tool.Description); + } + + if (tool.JsonSchema.ValueKind is not JsonValueKind.Undefined and not JsonValueKind.Null) + { + writer.WritePropertyName("parameters"); + tool.JsonSchema.WriteTo(writer); + } + + writer.WriteEndObject(); + } + + writer.WriteEndArray(); + writer.Flush(); + } + + return MemoryStreamToUtf8String(stream); + } + + private static string MemoryStreamToUtf8String(MemoryStream stream) => + Encoding.UTF8.GetString(stream.GetBuffer(), 0, (int)stream.Length); + } +} diff --git a/dotnet/src/OpenTelemetryConsts.cs b/dotnet/src/OpenTelemetryConsts.cs new file mode 100644 index 000000000..973aeea51 --- /dev/null +++ b/dotnet/src/OpenTelemetryConsts.cs @@ -0,0 +1,188 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +namespace GitHub.Copilot.SDK; + +/// +/// String constants for OpenTelemetry Semantic Conventions for Generative AI systems. +/// +/// +/// Based on the Semantic Conventions for Generative AI systems v1.40, +/// defined at . +/// The specification is still experimental and subject to change. +/// +internal static class OpenTelemetryConsts +{ + public const string DefaultSourceName = "github.copilot.sdk"; + public const string DefaultProviderName = "github"; + public const string CaptureMessageContentEnvVar = "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"; + public const string SecondsUnit = "s"; + public const string TokensUnit = "{token}"; + + public const string TokenTypeInput = "input"; + public const string TokenTypeOutput = "output"; + + public static class Error + { + public const string Type = "error.type"; + } + + public static class Server + { + public const string Address = "server.address"; + public const string Port = "server.port"; + } + + public static class GenAI + { + public const string Chat = "chat"; + public const string InvokeAgent = "invoke_agent"; + public const string ExecuteTool = "execute_tool"; + + public static class Operation + { + public const string Name = "gen_ai.operation.name"; + } + + public static class Provider + { + public const string Name = "gen_ai.provider.name"; + } + + public static class Agent + { + public const string Id = "gen_ai.agent.id"; + public const string Name = "gen_ai.agent.name"; + public const string Description = "gen_ai.agent.description"; + } + + public static class Conversation + { + public const string Id = "gen_ai.conversation.id"; + } + + public static class Request + { + public const string Model = "gen_ai.request.model"; + } + + public static class Response + { + public const string Id = "gen_ai.response.id"; + public const string Model = "gen_ai.response.model"; + public const string FinishReasons = "gen_ai.response.finish_reasons"; + } + + public static class Usage + { + public const string InputTokens = "gen_ai.usage.input_tokens"; + public const string OutputTokens = "gen_ai.usage.output_tokens"; + public const string CacheReadInputTokens = "gen_ai.usage.cache_read.input_tokens"; + public const string CacheCreationInputTokens = "gen_ai.usage.cache_creation.input_tokens"; + } + + public static class Token + { + public const string Type = "gen_ai.token.type"; + } + + public static class Input + { + public const string Messages = "gen_ai.input.messages"; + } + + public static class Output + { + public const string Messages = "gen_ai.output.messages"; + } + + public const string SystemInstructions = "gen_ai.system_instructions"; + + public static class Tool + { + public const string Definitions = "gen_ai.tool.definitions"; + public const string CallId = "gen_ai.tool.call.id"; + public const string CallArguments = "gen_ai.tool.call.arguments"; + public const string CallResult = "gen_ai.tool.call.result"; + public const string Name = "gen_ai.tool.name"; + public const string Description = "gen_ai.tool.description"; + public const string Type = "gen_ai.tool.type"; + } + + public static class Client + { + public static class TokenUsage + { + public const string Name = "gen_ai.client.token.usage"; + public const string Description = "Number of input and output tokens used."; + public static readonly int[] ExplicitBucketBoundaries = + [1, 4, 16, 64, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864]; + } + + public static class OperationDuration + { + public const string Name = "gen_ai.client.operation.duration"; + public const string Description = "GenAI operation duration."; + public static readonly double[] ExplicitBucketBoundaries = + [0.01, 0.02, 0.04, 0.08, 0.16, 0.32, 0.64, 1.28, 2.56, 5.12, 10.24, 20.48, 40.96, 81.92]; + } + + public static class TimeToFirstChunk + { + public const string Name = "gen_ai.client.operation.time_to_first_chunk"; + public const string Description = "Time to receive the first chunk from a streaming response."; + public static double[] ExplicitBucketBoundaries => + OperationDuration.ExplicitBucketBoundaries; + } + + public static class TimePerOutputChunk + { + public const string Name = "gen_ai.client.operation.time_per_output_chunk"; + public const string Description = "Time elapsed between streamed output chunks after the first chunk."; + public static double[] ExplicitBucketBoundaries => + OperationDuration.ExplicitBucketBoundaries; + } + } + + // Vendor-prefixed span event names for Copilot-specific lifecycle events. + // These follow the {vendor}.{domain}.{event} convention. + public static class CopilotEvent + { + public const string SessionTruncation = "github.copilot.session.truncation"; + public const string SessionCompactionStart = "github.copilot.session.compaction_start"; + public const string SessionCompactionComplete = "github.copilot.session.compaction_complete"; + public const string SkillInvoked = "github.copilot.skill.invoked"; + + // Attribute keys for custom events (vendor-prefixed). + public const string Message = "github.copilot.message"; + public const string TokenLimit = "github.copilot.token_limit"; + public const string PreTokens = "github.copilot.pre_tokens"; + public const string PostTokens = "github.copilot.post_tokens"; + public const string PreMessages = "github.copilot.pre_messages"; + public const string PostMessages = "github.copilot.post_messages"; + public const string TokensRemoved = "github.copilot.tokens_removed"; + public const string MessagesRemoved = "github.copilot.messages_removed"; + public const string PerformedBy = "github.copilot.performed_by"; + public const string Success = "github.copilot.success"; + public const string SkillName = "github.copilot.skill.name"; + public const string SkillPath = "github.copilot.skill.path"; + public const string SkillContent = "github.copilot.skill.content"; + public const string SkillPluginName = "github.copilot.skill.plugin_name"; + public const string SkillPluginVersion = "github.copilot.skill.plugin_version"; + } + + // Vendor-prefixed span attributes for Copilot-specific data on standardized spans. + public static class Copilot + { + // High-value: on chat spans (from AssistantUsageData) + public const string Cost = "github.copilot.cost"; + public const string ServerDuration = "github.copilot.server_duration"; + public const string Initiator = "github.copilot.initiator"; + public const string Aiu = "github.copilot.aiu"; + + public const string TurnId = "github.copilot.turn_id"; + public const string InteractionId = "github.copilot.interaction_id"; + } + } +} diff --git a/dotnet/src/Session.cs b/dotnet/src/Session.cs index 923b193cc..41f6cb545 100644 --- a/dotnet/src/Session.cs +++ b/dotnet/src/Session.cs @@ -4,6 +4,7 @@ using Microsoft.Extensions.AI; using StreamJsonRpc; +using System.Diagnostics; using System.Text.Json; using System.Text.Json.Nodes; using System.Text.Json.Serialization; @@ -47,6 +48,7 @@ public partial class CopilotSession : IAsyncDisposable private readonly HashSet _eventHandlers = new(); private readonly Dictionary _toolHandlers = new(); private readonly JsonRpc _rpc; + private readonly CopilotTelemetry.AgentTurnTracker? _turnTracker; private PermissionRequestHandler? _permissionHandler; private readonly SemaphoreSlim _permissionHandlerLock = new(1, 1); private UserInputHandler? _userInputHandler; @@ -76,19 +78,44 @@ public partial class CopilotSession : IAsyncDisposable /// public string? WorkspacePath { get; } + internal string TelemetryProviderName => _turnTracker?.ProviderName ?? OpenTelemetryConsts.DefaultProviderName; + internal string? TelemetryServerAddress => _turnTracker?.ServerAddress; + internal int? TelemetryServerPort => _turnTracker?.ServerPort; + internal ActivityContext TelemetryActivityContext => _turnTracker?.GetActivityContext() ?? default; + internal ActivityContext GetTelemetryToolCallParentContext(string toolCallId) => + _turnTracker?.GetToolCallParentContext(toolCallId) ?? default; + /// /// Initializes a new instance of the class. /// /// The unique identifier for this session. /// The JSON-RPC connection to the Copilot CLI. + /// The telemetry instance for this session, or null if telemetry is disabled. /// The workspace path if infinite sessions are enabled. + /// The request model for telemetry. + /// The provider configuration for telemetry. + /// The system message configuration for telemetry. + /// The tool definitions for telemetry. + /// Whether streaming is enabled, for telemetry. /// /// This constructor is internal. Use to create sessions. /// - internal CopilotSession(string sessionId, JsonRpc rpc, string? workspacePath = null) + internal CopilotSession( + string sessionId, + JsonRpc rpc, + CopilotTelemetry? telemetry = null, + string? workspacePath = null, + string? model = null, + ProviderConfig? provider = null, + SystemMessageConfig? systemMessage = null, + ICollection? tools = null, + bool streaming = false, + string? agentName = null, + string? agentDescription = null) { SessionId = sessionId; _rpc = rpc; + _turnTracker = telemetry is not null ? new CopilotTelemetry.AgentTurnTracker(telemetry, sessionId, model, provider, systemMessage, tools, streaming, agentName, agentDescription) : null; WorkspacePath = workspacePath; } @@ -125,18 +152,27 @@ private Task InvokeRpcAsync(string method, object?[]? args, CancellationTo /// public async Task SendAsync(MessageOptions options, CancellationToken cancellationToken = default) { - var request = new SendMessageRequest + _turnTracker?.BeginSend(options.Prompt); + try { - SessionId = SessionId, - Prompt = options.Prompt, - Attachments = options.Attachments, - Mode = options.Mode - }; + var request = new SendMessageRequest + { + SessionId = SessionId, + Prompt = options.Prompt, + Attachments = options.Attachments, + Mode = options.Mode + }; - var response = await InvokeRpcAsync( - "session.send", [request], cancellationToken); + var response = await InvokeRpcAsync( + "session.send", [request], cancellationToken); - return response.MessageId; + return response.MessageId; + } + catch (Exception ex) when (_turnTracker is { } tracker) + { + tracker.CompleteTurnWithError(ex); + throw; + } } /// @@ -198,17 +234,28 @@ void Handler(SessionEvent evt) await SendAsync(options, cancellationToken); - using var cts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); - cts.CancelAfter(effectiveTimeout); + try + { + using var cts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); + cts.CancelAfter(effectiveTimeout); + + using var registration = cts.Token.Register(() => + { + if (cancellationToken.IsCancellationRequested) + tcs.TrySetCanceled(cancellationToken); + else + tcs.TrySetException(new TimeoutException($"SendAndWaitAsync timed out after {effectiveTimeout}")); + }); - using var registration = cts.Token.Register(() => + return await tcs.Task; + } + catch (Exception ex) when (_turnTracker is { } tracker) { - if (cancellationToken.IsCancellationRequested) - tcs.TrySetCanceled(cancellationToken); - else - tcs.TrySetException(new TimeoutException($"SendAndWaitAsync timed out after {effectiveTimeout}")); - }); - return await tcs.Task; + // If timeout/cancellation occurs before DispatchEvent handles the turn-ending event, + // complete the telemetry span with the error (idempotent if already completed). + tracker.CompleteTurnWithError(ex); + throw; + } } /// @@ -258,6 +305,8 @@ public IDisposable On(SessionEventHandler handler) /// internal void DispatchEvent(SessionEvent sessionEvent) { + _turnTracker?.ProcessEvent(sessionEvent); + foreach (var handler in _eventHandlers.ToArray()) { // We allow handler exceptions to propagate so they are not lost @@ -588,6 +637,7 @@ await InvokeRpcAsync( _eventHandlers.Clear(); _toolHandlers.Clear(); + _turnTracker?.CompleteOnDispose(); await _permissionHandlerLock.WaitAsync(); try diff --git a/dotnet/src/Types.cs b/dotnet/src/Types.cs index 1b716cd41..2337986c7 100644 --- a/dotnet/src/Types.cs +++ b/dotnet/src/Types.cs @@ -48,6 +48,7 @@ protected CopilotClientOptions(CopilotClientOptions? other) Logger = other.Logger; LogLevel = other.LogLevel; Port = other.Port; + Telemetry = other.Telemetry; UseLoggedInUser = other.UseLoggedInUser; UseStdio = other.UseStdio; } @@ -92,6 +93,15 @@ public string? GithubToken /// public bool? UseLoggedInUser { get; set; } + /// + /// Gets or sets configuration for OpenTelemetry instrumentation. + /// + /// + /// When , no telemetry is emitted. To enable telemetry with default settings, + /// set this property to a new instance. + /// + public TelemetryConfig? Telemetry { get; set; } + /// /// Creates a shallow clone of this instance. /// @@ -104,6 +114,29 @@ public string? GithubToken public virtual CopilotClientOptions Clone() => new(this); } +/// +/// Configuration for OpenTelemetry instrumentation of the Copilot SDK. +/// +public class TelemetryConfig +{ + /// + /// Gets or sets whether to include potentially sensitive data (e.g. message content, tool arguments/results) in telemetry. + /// + /// + /// When , defaults to the value of the OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT + /// environment variable (if set to ), otherwise . + /// + public bool? EnableSensitiveData { get; set; } + + /// + /// Gets or sets the name used for the and . + /// + /// + /// When , defaults to "github.copilot.sdk". + /// + public string? SourceName { get; set; } +} + public class ToolBinaryResult { [JsonPropertyName("data")] @@ -754,6 +787,8 @@ protected SessionConfig(SessionConfig? other) { if (other is null) return; + AgentName = other.AgentName; + AgentDescription = other.AgentDescription; AvailableTools = other.AvailableTools is not null ? [.. other.AvailableTools] : null; ClientName = other.ClientName; ConfigDir = other.ConfigDir; @@ -857,6 +892,18 @@ protected SessionConfig(SessionConfig? other) /// public List? DisabledSkills { get; set; } + /// + /// Name of the agent for telemetry attribution. + /// When set, the invoke_agent span includes a gen_ai.agent.name attribute. + /// + public string? AgentName { get; set; } + + /// + /// Description of the agent for telemetry attribution. + /// When set, the invoke_agent span includes a gen_ai.agent.description attribute. + /// + public string? AgentDescription { get; set; } + /// /// Infinite session configuration for persistent workspaces and automatic compaction. /// When enabled (default), sessions automatically manage context limits and persist state. @@ -891,6 +938,8 @@ protected ResumeSessionConfig(ResumeSessionConfig? other) { if (other is null) return; + AgentName = other.AgentName; + AgentDescription = other.AgentDescription; AvailableTools = other.AvailableTools is not null ? [.. other.AvailableTools] : null; ClientName = other.ClientName; ConfigDir = other.ConfigDir; @@ -1014,6 +1063,16 @@ protected ResumeSessionConfig(ResumeSessionConfig? other) /// public List? DisabledSkills { get; set; } + /// + /// Name of the agent for telemetry attribution. + /// + public string? AgentName { get; set; } + + /// + /// Description of the agent for telemetry attribution. + /// + public string? AgentDescription { get; set; } + /// /// Infinite session configuration for persistent workspaces and automatic compaction. /// diff --git a/dotnet/test/GitHub.Copilot.SDK.Test.csproj b/dotnet/test/GitHub.Copilot.SDK.Test.csproj index 654a988a0..34c0ba07c 100644 --- a/dotnet/test/GitHub.Copilot.SDK.Test.csproj +++ b/dotnet/test/GitHub.Copilot.SDK.Test.csproj @@ -20,6 +20,7 @@ + runtime; build; native; contentfiles; analyzers; buildtransitive diff --git a/dotnet/test/Harness/E2ETestContext.cs b/dotnet/test/Harness/E2ETestContext.cs index 00fc32075..25a8b26c4 100644 --- a/dotnet/test/Harness/E2ETestContext.cs +++ b/dotnet/test/Harness/E2ETestContext.cs @@ -89,12 +89,13 @@ public IReadOnlyDictionary GetEnvironment() return env!; } - public CopilotClient CreateClient() => new(new CopilotClientOptions + public CopilotClient CreateClient(TelemetryConfig? telemetry = null) => new(new CopilotClientOptions { Cwd = WorkDir, CliPath = GetCliPath(_repoRoot), Environment = GetEnvironment(), GitHubToken = !string.IsNullOrEmpty(Environment.GetEnvironmentVariable("CI")) ? "fake-token-for-e2e-tests" : null, + Telemetry = telemetry, }); public async ValueTask DisposeAsync() diff --git a/dotnet/test/OpenTelemetryTests.cs b/dotnet/test/OpenTelemetryTests.cs new file mode 100644 index 000000000..88e2e01f6 --- /dev/null +++ b/dotnet/test/OpenTelemetryTests.cs @@ -0,0 +1,804 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +using System.Diagnostics; +using System.Diagnostics.Metrics; +using System.Linq; +using GitHub.Copilot.SDK.Test.Harness; +using Microsoft.Extensions.AI; +using System.ComponentModel; +using Xunit; + +namespace GitHub.Copilot.SDK.Test; + +/// +/// E2E tests for OpenTelemetry instrumentation that validate spans, events, +/// metrics, sensitive data gating, and configuration through public APIs. +/// All tests reuse the "invokes_custom_tool" snapshot since telemetry is +/// purely client-side and does not affect the proxy traffic. +/// +public class OpenTelemetryE2ETests(E2ETestFixture fixture) + : IClassFixture, IAsyncLifetime +{ + private E2ETestContext Ctx => fixture.Ctx; + + public Task InitializeAsync() => + Ctx.ConfigureForTestAsync("tools", "Invokes_Custom_Tool"); + + public Task DisposeAsync() => Task.CompletedTask; + + [Fact] + public async Task Records_Invoke_Agent_And_Execute_Tool_Spans() + { + var activities = new List(); + using var activityListener = CreateActivityListener("github.copilot.sdk", activities); + + var tokenRecordings = new List<(int Value, KeyValuePair[] Tags)>(); + var durationRecordings = new List<(double Value, KeyValuePair[] Tags)>(); + using var meterListener = CreateMeterListener("github.copilot.sdk", tokenRecordings, durationRecordings); + + await using var client = Ctx.CreateClient(new TelemetryConfig()); + var session = await client.CreateSessionAsync(new SessionConfig + { + Model = "gpt-4o", + Tools = [AIFunctionFactory.Create(EncryptString, "encrypt_string")], + OnPermissionRequest = PermissionHandler.ApproveAll, + }); + + var response = await session.SendAndWaitAsync(new MessageOptions + { + Prompt = "Use encrypt_string to encrypt this string: Hello" + }); + + Assert.NotNull(response); + Assert.Contains("HELLO", response!.Data.Content ?? string.Empty); + + // -- invoke_agent span -- + var invokeActivity = Assert.Single(activities, a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "invoke_agent"); + Assert.Equal(ActivityKind.Client, invokeActivity.Kind); + Assert.Equal(session.SessionId, invokeActivity.GetTagItem("gen_ai.conversation.id")?.ToString()); + Assert.Equal(session.SessionId, invokeActivity.GetTagItem("gen_ai.agent.id")?.ToString()); + Assert.NotNull(invokeActivity.GetTagItem("gen_ai.provider.name")); + + // -- gen_ai.request.model -- + Assert.Equal("gpt-4o", invokeActivity.GetTagItem("gen_ai.request.model")?.ToString()); + + // -- server.address (conditional — depends on provider config) -- + if (invokeActivity.GetTagItem("server.address") is not null) + { + Assert.IsType(invokeActivity.GetTagItem("server.address")); + } + + // -- chat child spans (one per LLM turn) -- + var chatActivities = activities.Where(a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "chat").ToList(); + Assert.NotEmpty(chatActivities); + Assert.All(chatActivities, chatActivity => + { + Assert.Equal(ActivityKind.Client, chatActivity.Kind); + Assert.Equal(invokeActivity.TraceId, chatActivity.TraceId); + Assert.Equal(invokeActivity.SpanId, chatActivity.ParentSpanId); + }); + + // -- gen_ai.response.model, gen_ai.response.id, token counts (on chat spans) -- + var firstChat = chatActivities[0]; + if (firstChat.GetTagItem("gen_ai.response.model") is not null) + { + Assert.IsType(firstChat.GetTagItem("gen_ai.response.model")); + } + if (firstChat.GetTagItem("gen_ai.response.id") is not null) + { + Assert.IsType(firstChat.GetTagItem("gen_ai.response.id")); + } + + // -- execute_tool span -- + var toolActivity = Assert.Single(activities, a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "execute_tool"); + Assert.Equal(ActivityKind.Internal, toolActivity.Kind); + Assert.Equal("encrypt_string", toolActivity.GetTagItem("gen_ai.tool.name")?.ToString()); + Assert.Equal("function", toolActivity.GetTagItem("gen_ai.tool.type")?.ToString()); + Assert.NotNull(toolActivity.GetTagItem("gen_ai.tool.call.id")); + + // -- execute_tool is a child of invoke_agent -- + Assert.Equal(invokeActivity.TraceId, toolActivity.TraceId); + Assert.Equal(invokeActivity.SpanId, toolActivity.ParentSpanId); + + // -- gen_ai.tool.description -- + Assert.NotNull(toolActivity.GetTagItem("gen_ai.tool.description")); + + // -- duration metrics -- + Assert.NotEmpty(durationRecordings); + Assert.Contains(durationRecordings, r => + r.Tags.Any(t => t.Key == "gen_ai.operation.name" && t.Value?.ToString() == "invoke_agent") && + r.Value > 0); + Assert.Contains(durationRecordings, r => + r.Tags.Any(t => t.Key == "gen_ai.operation.name" && t.Value?.ToString() == "execute_tool") && + r.Value > 0); + Assert.Contains(durationRecordings, r => + r.Tags.Any(t => t.Key == "gen_ai.operation.name" && t.Value?.ToString() == "chat") && + r.Value > 0); + + // Duration metrics should include provider tag + var invokeMetric = durationRecordings.First(r => + r.Tags.Any(t => t.Key == "gen_ai.operation.name" && t.Value?.ToString() == "invoke_agent")); + Assert.Contains(invokeMetric.Tags, t => t.Key == "gen_ai.provider.name" && t.Value is not null); + + // -- token usage metrics (conditional — depends on proxy response) -- + if (tokenRecordings.Count > 0) + { + Assert.All(tokenRecordings, r => + { + var tokenType = r.Tags.FirstOrDefault(t => t.Key == "gen_ai.token.type").Value?.ToString(); + Assert.True(tokenType is "input" or "output", + $"Expected token type 'input' or 'output', got '{tokenType}'"); + Assert.Contains(r.Tags, t => t.Key == "gen_ai.operation.name" && t.Value?.ToString() == "chat"); + Assert.Contains(r.Tags, t => t.Key == "gen_ai.provider.name" && t.Value is not null); + }); + } + } + + [Fact] + public async Task Sensitive_Data_Includes_Tool_Content() + { + var activities = new List(); + using var activityListener = CreateActivityListener("github.copilot.sdk", activities); + + await using var client = Ctx.CreateClient(new TelemetryConfig { EnableSensitiveData = true }); + var session = await client.CreateSessionAsync(new SessionConfig + { + Tools = [AIFunctionFactory.Create(EncryptString, "encrypt_string")], + OnPermissionRequest = PermissionHandler.ApproveAll, + }); + + var response = await session.SendAndWaitAsync(new MessageOptions + { + Prompt = "Use encrypt_string to encrypt this string: Hello" + }); + + Assert.NotNull(response); + + // Tool arguments are set on the execute_tool span + var toolActivity = Assert.Single(activities, a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "execute_tool"); + var toolArgs = toolActivity.GetTagItem("gen_ai.tool.call.arguments")?.ToString(); + Assert.NotNull(toolArgs); + Assert.Contains("Hello", toolArgs); + + // Tool result is set + var toolResult = toolActivity.GetTagItem("gen_ai.tool.call.result")?.ToString(); + Assert.NotNull(toolResult); + Assert.Contains("HELLO", toolResult); + } + + [Fact] + public async Task No_Telemetry_When_Config_Is_Null() + { + var activities = new List(); + using var activityListener = CreateActivityListener("github.copilot.sdk", activities); + + var tokenRecordings = new List<(int Value, KeyValuePair[] Tags)>(); + var durationRecordings = new List<(double Value, KeyValuePair[] Tags)>(); + using var meterListener = CreateMeterListener("github.copilot.sdk", tokenRecordings, durationRecordings); + + await using var client = Ctx.CreateClient(telemetry: null); + var session = await client.CreateSessionAsync(new SessionConfig + { + Tools = [AIFunctionFactory.Create(EncryptString, "encrypt_string")], + OnPermissionRequest = PermissionHandler.ApproveAll, + }); + + var response = await session.SendAndWaitAsync(new MessageOptions + { + Prompt = "Use encrypt_string to encrypt this string: Hello" + }); + + Assert.NotNull(response); + Assert.Contains("HELLO", response!.Data.Content ?? string.Empty); + + // No activities or metrics emitted + Assert.Empty(activities); + Assert.Empty(durationRecordings); + Assert.Empty(tokenRecordings); + } + + [Fact] + public async Task Custom_Source_Name_Used() + { + const string customSourceName = "my.custom.source"; + + var activities = new List(); + using var activityListener = CreateActivityListener(customSourceName, activities); + + var durationRecordings = new List<(double Value, KeyValuePair[] Tags)>(); + using var meterListener = CreateMeterListener(customSourceName, + new List<(int, KeyValuePair[])>(), durationRecordings); + + await using var client = Ctx.CreateClient(new TelemetryConfig { SourceName = customSourceName }); + var session = await client.CreateSessionAsync(new SessionConfig + { + Tools = [AIFunctionFactory.Create(EncryptString, "encrypt_string")], + OnPermissionRequest = PermissionHandler.ApproveAll, + }); + + await session.SendAndWaitAsync(new MessageOptions + { + Prompt = "Use encrypt_string to encrypt this string: Hello" + }); + + Assert.NotEmpty(activities); + Assert.All(activities, a => Assert.Equal(customSourceName, a.Source.Name)); + Assert.NotEmpty(durationRecordings); + } + + [Fact] + public async Task Default_Source_Name_Not_Captured_When_Custom_Name_Used() + { + // Verify that the default source name does NOT emit when a custom name is configured + var defaultActivities = new List(); + using var defaultListener = CreateActivityListener("github.copilot.sdk", defaultActivities); + + await using var client = Ctx.CreateClient(new TelemetryConfig { SourceName = "custom.telemetry" }); + var session = await client.CreateSessionAsync(new SessionConfig + { + Tools = [AIFunctionFactory.Create(EncryptString, "encrypt_string")], + OnPermissionRequest = PermissionHandler.ApproveAll, + }); + + await session.SendAndWaitAsync(new MessageOptions + { + Prompt = "Use encrypt_string to encrypt this string: Hello" + }); + + Assert.Empty(defaultActivities); + } + + [Fact] + public async Task Successful_Turn_Has_Finish_Reasons() + { + var activities = new List(); + using var activityListener = CreateActivityListener("github.copilot.sdk", activities); + + await using var client = Ctx.CreateClient(new TelemetryConfig()); + var session = await client.CreateSessionAsync(new SessionConfig + { + Tools = [AIFunctionFactory.Create(EncryptString, "encrypt_string")], + OnPermissionRequest = PermissionHandler.ApproveAll, + }); + + var response = await session.SendAndWaitAsync(new MessageOptions + { + Prompt = "Use encrypt_string to encrypt this string: Hello" + }); + + Assert.NotNull(response); + + // Verify finish_reasons are set on the invoke_agent span + var invokeActivity = Assert.Single(activities, a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "invoke_agent"); + var finishReasons = invokeActivity.GetTagItem("gen_ai.response.finish_reasons") as string[]; + Assert.NotNull(finishReasons); + Assert.Contains("stop", finishReasons); + } + + [Fact] + public async Task Env_Var_Enables_Sensitive_Data() + { + try + { + Environment.SetEnvironmentVariable("OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT", "true"); + + var activities = new List(); + using var activityListener = CreateActivityListener("github.copilot.sdk", activities); + + // Client created with default TelemetryConfig (no explicit EnableSensitiveData) + // but the env var should enable it + await using var client = Ctx.CreateClient(new TelemetryConfig()); + var session = await client.CreateSessionAsync(new SessionConfig + { + Tools = [AIFunctionFactory.Create(EncryptString, "encrypt_string")], + OnPermissionRequest = PermissionHandler.ApproveAll, + }); + + var response = await session.SendAndWaitAsync(new MessageOptions + { + Prompt = "Use encrypt_string to encrypt this string: Hello" + }); + + Assert.NotNull(response); + + // Tool arguments should be present due to env var enabling sensitive data + var toolActivity = Assert.Single(activities, a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "execute_tool"); + var toolArgs = toolActivity.GetTagItem("gen_ai.tool.call.arguments"); + Assert.NotNull(toolArgs); + } + finally + { + Environment.SetEnvironmentVariable("OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT", null); + } + } + + [Fact] + public async Task Sensitive_Data_Disabled_Excludes_Tool_Content() + { + var activities = new List(); + using var activityListener = CreateActivityListener("github.copilot.sdk", activities); + + await using var client = Ctx.CreateClient(new TelemetryConfig { EnableSensitiveData = false }); + var session = await client.CreateSessionAsync(new SessionConfig + { + Tools = [AIFunctionFactory.Create(EncryptString, "encrypt_string")], + OnPermissionRequest = PermissionHandler.ApproveAll, + }); + + var response = await session.SendAndWaitAsync(new MessageOptions + { + Prompt = "Use encrypt_string to encrypt this string: Hello" + }); + + Assert.NotNull(response); + + // Tool arguments and results should NOT be present + var toolActivity = Assert.Single(activities, a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "execute_tool"); + Assert.Null(toolActivity.GetTagItem("gen_ai.tool.call.arguments")); + Assert.Null(toolActivity.GetTagItem("gen_ai.tool.call.result")); + } + + [Fact] + public async Task Sensitive_Data_Includes_Message_Content() + { + var activities = new List(); + using var activityListener = CreateActivityListener("github.copilot.sdk", activities); + + await using var client = Ctx.CreateClient(new TelemetryConfig { EnableSensitiveData = true }); + var session = await client.CreateSessionAsync(new SessionConfig + { + Model = "gpt-4o", + SystemMessage = new SystemMessageConfig { Content = "You are a helpful assistant." }, + Tools = [AIFunctionFactory.Create(EncryptString, "encrypt_string")], + OnPermissionRequest = PermissionHandler.ApproveAll, + }); + + var response = await session.SendAndWaitAsync(new MessageOptions + { + Prompt = "Use encrypt_string to encrypt this string: Hello" + }); + + Assert.NotNull(response); + + var invokeActivity = Assert.Single(activities, a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "invoke_agent"); + + // Messages are on chat child spans, not invoke_agent + var chatActivities = activities.Where(a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "chat").ToList(); + Assert.NotEmpty(chatActivities); + + // Input messages should be present on at least one chat span + var chatWithInput = chatActivities.FirstOrDefault(a => + a.GetTagItem("gen_ai.input.messages")?.ToString() is not null); + Assert.NotNull(chatWithInput); + var inputMessages = chatWithInput!.GetTagItem("gen_ai.input.messages")!.ToString()!; + Assert.Contains("Hello", inputMessages); + Assert.Contains("\"role\":\"user\"", inputMessages); + + // Tool results should appear as input messages with role "tool" + var allInputMessages = string.Join(" ", chatActivities + .Select(a => a.GetTagItem("gen_ai.input.messages")?.ToString() ?? "")); + Assert.Contains("\"role\":\"tool\"", allInputMessages); + Assert.Contains("\"type\":\"tool_call_response\"", allInputMessages); + + // Output messages should be present on at least one chat span + var allOutputMessages = string.Join(" ", chatActivities + .Select(a => a.GetTagItem("gen_ai.output.messages")?.ToString() ?? "")); + Assert.Contains("\"role\":\"assistant\"", allOutputMessages); + + // Tool call requests should appear in output messages + Assert.Contains("\"type\":\"tool_call\"", allOutputMessages); + Assert.Contains("encrypt_string", allOutputMessages); + + // System instructions should be present on invoke_agent + var sysInstructions = invokeActivity.GetTagItem("gen_ai.system_instructions")?.ToString(); + Assert.NotNull(sysInstructions); + Assert.Contains("You are a helpful assistant.", sysInstructions); + + // Tool definitions should be present on invoke_agent + var toolDefs = invokeActivity.GetTagItem("gen_ai.tool.definitions")?.ToString(); + Assert.NotNull(toolDefs); + Assert.Contains("encrypt_string", toolDefs); + } + + [Fact] + public async Task Sensitive_Data_Disabled_Excludes_Message_Content() + { + var activities = new List(); + using var activityListener = CreateActivityListener("github.copilot.sdk", activities); + + await using var client = Ctx.CreateClient(new TelemetryConfig { EnableSensitiveData = false }); + var session = await client.CreateSessionAsync(new SessionConfig + { + Model = "gpt-4o", + SystemMessage = new SystemMessageConfig { Content = "You are a helpful assistant." }, + Tools = [AIFunctionFactory.Create(EncryptString, "encrypt_string")], + OnPermissionRequest = PermissionHandler.ApproveAll, + }); + + var response = await session.SendAndWaitAsync(new MessageOptions + { + Prompt = "Use encrypt_string to encrypt this string: Hello" + }); + + Assert.NotNull(response); + + var invokeActivity = Assert.Single(activities, a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "invoke_agent"); + + // Sensitive attributes should be excluded from invoke_agent + Assert.Null(invokeActivity.GetTagItem("gen_ai.input.messages")); + Assert.Null(invokeActivity.GetTagItem("gen_ai.output.messages")); + Assert.Null(invokeActivity.GetTagItem("gen_ai.system_instructions")); + + // Sensitive attributes should be excluded from chat spans too + var chatActivities = activities.Where(a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "chat").ToList(); + Assert.All(chatActivities, chatActivity => + { + Assert.Null(chatActivity.GetTagItem("gen_ai.input.messages")); + Assert.Null(chatActivity.GetTagItem("gen_ai.output.messages")); + }); + + // Tool definitions should still be present (not sensitive-gated) + var toolDefs = invokeActivity.GetTagItem("gen_ai.tool.definitions")?.ToString(); + Assert.NotNull(toolDefs); + Assert.Contains("encrypt_string", toolDefs); + } + + [Fact] + public async Task Records_Subagent_Invoke_Agent_And_Chat_Spans() + { + // Use a separate snapshot for the subagent scenario. + await Ctx.ConfigureForTestAsync("otel", "Records_Subagent_Invoke_Agent_And_Chat_Spans"); + + var activities = new List(); + using var activityListener = CreateActivityListener("github.copilot.sdk", activities); + + var tokenRecordings = new List<(int Value, KeyValuePair[] Tags)>(); + var durationRecordings = new List<(double Value, KeyValuePair[] Tags)>(); + using var meterListener = CreateMeterListener("github.copilot.sdk", tokenRecordings, durationRecordings); + + int topicCallCount = 0; + string GetTopic() => Interlocked.Increment(ref topicCallCount) == 1 ? "mountains" : "rivers"; + + await using var client = Ctx.CreateClient(new TelemetryConfig { EnableSensitiveData = true }); + var session = await client.CreateSessionAsync(new SessionConfig + { + Tools = [AIFunctionFactory.Create(GetTopic, "get_topic", "Returns a poem topic")], + OnPermissionRequest = PermissionHandler.ApproveAll, + }); + + var response = await session.SendAndWaitAsync(new MessageOptions + { + Prompt = "Call get_topic twice to get two different topics, then launch two background " + + "general-purpose subagents in parallel (one per topic) to each write a short " + + "four-line poem about their assigned topic. Use read_agent to wait for both " + + "results and show them to me." + }); + + Assert.NotNull(response); + + // ===================================================================== + // 1. Root invoke_agent span + // ===================================================================== + var allInvokeActivities = activities.Where(a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "invoke_agent").ToList(); + Assert.True(allInvokeActivities.Count >= 3, + $"Expected ≥3 invoke_agent spans (1 root + ≥2 subagents), got {allInvokeActivities.Count}"); + + // The root invoke_agent has no parent invoke_agent + var mainInvoke = allInvokeActivities.First(a => + !allInvokeActivities.Any(p => p.SpanId == a.ParentSpanId)); + Assert.Equal(ActivityKind.Client, mainInvoke.Kind); + Assert.Equal("invoke_agent", mainInvoke.GetTagItem("gen_ai.operation.name")?.ToString()); + Assert.Equal(session.SessionId, mainInvoke.GetTagItem("gen_ai.conversation.id")?.ToString()); + Assert.Equal(session.SessionId, mainInvoke.GetTagItem("gen_ai.agent.id")?.ToString()); + Assert.NotNull(mainInvoke.GetTagItem("gen_ai.provider.name")); + + // Root invoke_agent finish_reasons should be "stop" + var rootFinish = mainInvoke.GetTagItem("gen_ai.response.finish_reasons") as string[]; + Assert.NotNull(rootFinish); + Assert.Contains("stop", rootFinish); + + // Tool definitions should be present (not sensitive-gated) + var toolDefs = mainInvoke.GetTagItem("gen_ai.tool.definitions")?.ToString(); + Assert.NotNull(toolDefs); + Assert.Contains("get_topic", toolDefs); + + // Root invoke_agent should have accumulated input messages (sensitive data on) + var rootInputMessages = mainInvoke.GetTagItem("gen_ai.input.messages")?.ToString(); + Assert.NotNull(rootInputMessages); + Assert.Contains("\"role\":\"user\"", rootInputMessages); + + // Root invoke_agent should have accumulated output messages + var rootOutputMessages = mainInvoke.GetTagItem("gen_ai.output.messages")?.ToString(); + Assert.NotNull(rootOutputMessages); + Assert.Contains("\"role\":\"assistant\"", rootOutputMessages); + + // ===================================================================== + // 2. Subagent invoke_agent spans (≥2, children of root) + // ===================================================================== + var subagentInvokeActivities = allInvokeActivities.Where(a => + a.ParentSpanId == mainInvoke.SpanId).ToList(); + Assert.True(subagentInvokeActivities.Count >= 2, + $"Expected ≥2 subagent invoke_agent spans, got {subagentInvokeActivities.Count}"); + + Assert.All(subagentInvokeActivities, sub => + { + Assert.Equal(ActivityKind.Client, sub.Kind); + Assert.Equal(mainInvoke.TraceId, sub.TraceId); + Assert.Equal("invoke_agent", sub.GetTagItem("gen_ai.operation.name")?.ToString()); + Assert.NotNull(sub.GetTagItem("gen_ai.provider.name")); + + // Subagent invoke_agent should have finish_reasons + var subFinish = sub.GetTagItem("gen_ai.response.finish_reasons") as string[]; + Assert.NotNull(subFinish); + Assert.Contains("stop", subFinish); + + // Subagent invoke_agent should have output messages (the poem) + var subOutputMessages = sub.GetTagItem("gen_ai.output.messages")?.ToString(); + Assert.NotNull(subOutputMessages); + Assert.Contains("\"role\":\"assistant\"", subOutputMessages); + }); + + // ===================================================================== + // 3. Main agent chat spans (children of root invoke_agent) + // ===================================================================== + var mainChatActivities = activities.Where(a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "chat" && + a.ParentSpanId == mainInvoke.SpanId).ToList(); + Assert.NotEmpty(mainChatActivities); + + Assert.All(mainChatActivities, chat => + { + Assert.Equal(ActivityKind.Client, chat.Kind); + Assert.Equal(mainInvoke.TraceId, chat.TraceId); + Assert.Equal("chat", chat.GetTagItem("gen_ai.operation.name")?.ToString()); + Assert.NotNull(chat.GetTagItem("gen_ai.provider.name")); + }); + + // At least one main chat span should have response model/id + var mainChatWithModel = mainChatActivities.FirstOrDefault(a => + a.GetTagItem("gen_ai.response.model") is not null); + if (mainChatWithModel is not null) + { + Assert.IsType(mainChatWithModel.GetTagItem("gen_ai.response.model")); + Assert.IsType(mainChatWithModel.GetTagItem("gen_ai.response.id")); + } + + // Main chat output messages should contain tool_call for "task" + var mainChatOutputs = string.Join(" ", mainChatActivities + .Select(a => a.GetTagItem("gen_ai.output.messages")?.ToString() ?? "")); + Assert.Contains("\"type\":\"tool_call\"", mainChatOutputs); + Assert.Contains("get_topic", mainChatOutputs); + + // Main chat input messages should contain user prompt and tool responses + var mainChatInputs = string.Join(" ", mainChatActivities + .Select(a => a.GetTagItem("gen_ai.input.messages")?.ToString() ?? "")); + Assert.Contains("\"role\":\"user\"", mainChatInputs); + Assert.Contains("\"role\":\"tool\"", mainChatInputs); + Assert.Contains("\"type\":\"tool_call_response\"", mainChatInputs); + + // ===================================================================== + // 4. Subagent chat spans (each parented to its subagent invoke_agent) + // ===================================================================== + foreach (var subInvoke in subagentInvokeActivities) + { + var subChatActivities = activities.Where(a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "chat" && + a.ParentSpanId == subInvoke.SpanId).ToList(); + Assert.NotEmpty(subChatActivities); + + Assert.All(subChatActivities, chat => + { + Assert.Equal(ActivityKind.Client, chat.Kind); + Assert.Equal(subInvoke.TraceId, chat.TraceId); + Assert.Equal("chat", chat.GetTagItem("gen_ai.operation.name")?.ToString()); + Assert.NotNull(chat.GetTagItem("gen_ai.provider.name")); + }); + + // Subagent chat should have response model and response id + var subChat = subChatActivities[0]; + if (subChat.GetTagItem("gen_ai.response.model") is not null) + { + Assert.IsType(subChat.GetTagItem("gen_ai.response.model")); + } + if (subChat.GetTagItem("gen_ai.response.id") is not null) + { + Assert.IsType(subChat.GetTagItem("gen_ai.response.id")); + } + + // Subagent chat should have token counts + if (subChat.GetTagItem("gen_ai.usage.input_tokens") is int subInputTokens) + { + Assert.True(subInputTokens > 0, "Subagent input tokens should be > 0"); + } + if (subChat.GetTagItem("gen_ai.usage.output_tokens") is int subOutputTokens) + { + Assert.True(subOutputTokens > 0, "Subagent output tokens should be > 0"); + } + + // Subagent chat output messages should contain the poem + var subChatOutput = subChat.GetTagItem("gen_ai.output.messages")?.ToString(); + Assert.NotNull(subChatOutput); + Assert.Contains("\"role\":\"assistant\"", subChatOutput); + } + + // ===================================================================== + // 5. execute_tool spans for get_topic + // ===================================================================== + var getTopicTools = activities.Where(a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "execute_tool" && + a.GetTagItem("gen_ai.tool.name")?.ToString() == "get_topic").ToList(); + Assert.True(getTopicTools.Count >= 2, + $"Expected ≥2 get_topic execute_tool spans, got {getTopicTools.Count}"); + + Assert.All(getTopicTools, tool => + { + Assert.Equal(ActivityKind.Internal, tool.Kind); + Assert.Equal(mainInvoke.TraceId, tool.TraceId); + Assert.Equal(mainInvoke.SpanId, tool.ParentSpanId); + Assert.Equal("function", tool.GetTagItem("gen_ai.tool.type")?.ToString()); + Assert.NotNull(tool.GetTagItem("gen_ai.tool.call.id")); + + // Sensitive data enabled — tool result should be present + var result = tool.GetTagItem("gen_ai.tool.call.result")?.ToString(); + Assert.NotNull(result); + Assert.True(result!.Contains("mountains") || result.Contains("rivers"), + $"Expected tool result containing 'mountains' or 'rivers', got '{result}'"); + }); + + // ===================================================================== + // 6. Token isolation: main agent tokens are not inflated by subagent tokens + // ===================================================================== + var allChatActivities = activities.Where(a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "chat").ToList(); + var mainTokenSum = mainChatActivities + .Select(a => a.GetTagItem("gen_ai.usage.input_tokens")).OfType().Sum(); + var subTokenSum = subagentInvokeActivities + .SelectMany(sub => activities.Where(a => + a.GetTagItem("gen_ai.operation.name")?.ToString() == "chat" && + a.ParentSpanId == sub.SpanId)) + .Select(a => a.GetTagItem("gen_ai.usage.input_tokens")).OfType().Sum(); + var totalTokenSum = allChatActivities + .Select(a => a.GetTagItem("gen_ai.usage.input_tokens")).OfType().Sum(); + + if (mainTokenSum > 0 && subTokenSum > 0) + { + // All tokens should be accounted for without duplication + Assert.Equal(mainTokenSum + subTokenSum, totalTokenSum); + } + + // ===================================================================== + // 7. Duration metrics — with model tags + // ===================================================================== + Assert.NotEmpty(durationRecordings); + + // All duration metrics should have provider.name + Assert.All(durationRecordings, r => + { + Assert.Contains(r.Tags, t => t.Key == "gen_ai.provider.name" && t.Value is not null); + }); + + // invoke_agent durations + var invokeAgentDurations = durationRecordings.Where(r => + r.Tags.Any(t => t.Key == "gen_ai.operation.name" && t.Value?.ToString() == "invoke_agent")).ToList(); + Assert.True(invokeAgentDurations.Count >= 3, + $"Expected ≥3 invoke_agent duration recordings (1 root + ≥2 subagents), got {invokeAgentDurations.Count}"); + Assert.All(invokeAgentDurations, r => Assert.True(r.Value > 0)); + + // chat durations + var chatDurations = durationRecordings.Where(r => + r.Tags.Any(t => t.Key == "gen_ai.operation.name" && t.Value?.ToString() == "chat")).ToList(); + Assert.NotEmpty(chatDurations); + Assert.All(chatDurations, r => Assert.True(r.Value > 0)); + + // execute_tool durations + Assert.Contains(durationRecordings, r => + r.Tags.Any(t => t.Key == "gen_ai.operation.name" && t.Value?.ToString() == "execute_tool") && + r.Value > 0); + + // Duration metrics with response.model should carry the model used + var durationsWithResponseModel = durationRecordings.Where(r => + r.Tags.Any(t => t.Key == "gen_ai.response.model" && t.Value is not null)).ToList(); + if (durationsWithResponseModel.Count > 0) + { + Assert.All(durationsWithResponseModel, r => + { + var model = r.Tags.First(t => t.Key == "gen_ai.response.model").Value?.ToString(); + Assert.False(string.IsNullOrWhiteSpace(model), "gen_ai.response.model should not be blank"); + }); + } + + // ===================================================================== + // 8. Token usage metrics — with model tags + // ===================================================================== + if (tokenRecordings.Count > 0) + { + Assert.All(tokenRecordings, r => + { + // Token type must be input or output + var tokenType = r.Tags.FirstOrDefault(t => t.Key == "gen_ai.token.type").Value?.ToString(); + Assert.True(tokenType is "input" or "output", + $"Expected token type 'input' or 'output', got '{tokenType}'"); + + // Must be tagged with operation.name = chat + Assert.Contains(r.Tags, t => t.Key == "gen_ai.operation.name" && t.Value?.ToString() == "chat"); + + // Must carry provider.name + Assert.Contains(r.Tags, t => t.Key == "gen_ai.provider.name" && t.Value is not null); + + // Must carry gen_ai.response.model (the model that produced the tokens) + var responseModel = r.Tags.FirstOrDefault(t => t.Key == "gen_ai.response.model").Value?.ToString(); + Assert.False(string.IsNullOrWhiteSpace(responseModel), + "Token metric should carry gen_ai.response.model"); + }); + + // Token recordings from subagent chat spans should exist and have their + // own model attribution (not the main agent's model attribution) + var subagentTokenRecordings = tokenRecordings.Where(r => + r.Tags.Any(t => t.Key == "gen_ai.response.model" && t.Value is not null)).ToList(); + Assert.NotEmpty(subagentTokenRecordings); + } + } + + // -- Helpers -- + + [Description("Encrypts a string")] + static string EncryptString([Description("String to encrypt")] string input) + => input.ToUpperInvariant(); + + private static ActivityListener CreateActivityListener(string sourceName, List activities) + { + var listener = new ActivityListener + { + ShouldListenTo = source => source.Name == sourceName, + Sample = (ref ActivityCreationOptions _) => ActivitySamplingResult.AllDataAndRecorded, + ActivityStopped = activity => activities.Add(activity), + }; + ActivitySource.AddActivityListener(listener); + return listener; + } + + private static MeterListener CreateMeterListener( + string meterName, + List<(int, KeyValuePair[])> tokenRecordings, + List<(double, KeyValuePair[])> durationRecordings) + { + var listener = new MeterListener(); + listener.InstrumentPublished = (instrument, mListener) => + { + if (instrument.Meter.Name == meterName) + { + mListener.EnableMeasurementEvents(instrument); + } + }; + listener.SetMeasurementEventCallback((instrument, measurement, tags, _) => + { + if (instrument.Name == "gen_ai.client.token.usage") + { + tokenRecordings.Add((measurement, tags.ToArray())); + } + }); + listener.SetMeasurementEventCallback((instrument, measurement, tags, _) => + { + if (instrument.Name == "gen_ai.client.operation.duration") + { + durationRecordings.Add((measurement, tags.ToArray())); + } + }); + listener.Start(); + return listener; + } +} diff --git a/go/README.md b/go/README.md index b010fc211..90fed1aac 100644 --- a/go/README.md +++ b/go/README.md @@ -141,6 +141,7 @@ Event types: `SessionLifecycleCreated`, `SessionLifecycleDeleted`, `SessionLifec - `Env` ([]string): Environment variables for CLI process (default: inherits from current process) - `GitHubToken` (string): GitHub token for authentication. When provided, takes priority over other auth methods. - `UseLoggedInUser` (\*bool): Whether to use logged-in user for authentication (default: true, but false when `GitHubToken` is provided). Cannot be used with `CLIUrl`. +- `Telemetry` (\*TelemetryConfig): OpenTelemetry instrumentation configuration. When provided, enables automatic tracing and metrics following [GenAI semantic conventions](../docs/opentelemetry-instrumentation.md). **SessionConfig:** @@ -152,6 +153,8 @@ Event types: `SessionLifecycleCreated`, `SessionLifecycleDeleted`, `SessionLifec - `Provider` (\*ProviderConfig): Custom API provider configuration (BYOK). See [Custom Providers](#custom-providers) section. - `Streaming` (bool): Enable streaming delta events - `InfiniteSessions` (\*InfiniteSessionConfig): Automatic context compaction configuration +- `AgentName` (string): Agent name for telemetry attribution. When set, the `invoke_agent` span includes a `gen_ai.agent.name` attribute. +- `AgentDescription` (string): Agent description for telemetry attribution. When set, the `invoke_agent` span includes a `gen_ai.agent.description` attribute. - `OnUserInputRequest` (UserInputHandler): Handler for user input requests from the agent (enables ask_user tool). See [User Input Requests](#user-input-requests) section. - `Hooks` (\*SessionHooks): Hook handlers for session lifecycle events. See [Session Hooks](#session-hooks) section. diff --git a/go/client.go b/go/client.go index c88a68ac3..09832a6bd 100644 --- a/go/client.go +++ b/go/client.go @@ -44,6 +44,8 @@ import ( "sync/atomic" "time" + "go.opentelemetry.io/otel/trace" + "github.com/github/copilot-sdk/go/internal/embeddedcli" "github.com/github/copilot-sdk/go/internal/jsonrpc2" "github.com/github/copilot-sdk/go/rpc" @@ -91,6 +93,7 @@ type Client struct { processDone chan struct{} processErrorPtr *error osProcess atomic.Pointer[os.Process] + telemetry *copilotTelemetry // RPC provides typed server-scoped RPC methods. // This field is nil until the client is connected via Start(). @@ -187,6 +190,9 @@ func NewClient(options *ClientOptions) *Client { if options.UseLoggedInUser != nil { opts.UseLoggedInUser = options.UseLoggedInUser } + if options.Telemetry != nil { + client.telemetry = newCopilotTelemetry(options.Telemetry) + } } // Default Env to current environment if not set @@ -528,6 +534,8 @@ func (c *Client) CreateSession(ctx context.Context, config *SessionConfig) (*Ses } session := newSession(response.SessionID, c.client, response.WorkspacePath) + session.telemetry = c.telemetry + session.configureTelemetryContext(config.Model, config.Provider, config.SystemMessage, config.Tools, config.Streaming, config.AgentName, config.AgentDescription) session.registerTools(config.Tools) session.registerPermissionHandler(config.OnPermissionRequest) @@ -627,6 +635,9 @@ func (c *Client) ResumeSessionWithOptions(ctx context.Context, sessionID string, } session := newSession(response.SessionID, c.client, response.WorkspacePath) + session.telemetry = c.telemetry + session.configureTelemetryContext(config.Model, config.Provider, config.SystemMessage, config.Tools, config.Streaming, config.AgentName, config.AgentDescription) + session.registerTools(config.Tools) session.registerPermissionHandler(config.OnPermissionRequest) if config.OnUserInputRequest != nil { @@ -1290,26 +1301,78 @@ func (c *Client) handleToolCallRequest(req toolCallRequest) (*toolCallResponse, return &toolCallResponse{Result: buildUnsupportedToolResult(req.ToolName)}, nil } - result := c.executeToolCall(req.SessionID, req.ToolCallID, req.ToolName, req.Arguments, handler) + toolDescription := session.getToolDescription(req.ToolName) + + result := c.executeToolCall(session, req.ToolCallID, req.ToolName, toolDescription, req.Arguments, handler) return &toolCallResponse{Result: result}, nil } // executeToolCall executes a tool handler and returns the result. func (c *Client) executeToolCall( - sessionID, toolCallID, toolName string, + session *Session, + toolCallID, toolName, toolDescription string, arguments any, handler ToolHandler, ) (result ToolResult) { invocation := ToolInvocation{ - SessionID: sessionID, + SessionID: session.SessionID, ToolCallID: toolCallID, ToolName: toolName, Arguments: arguments, } + var span trace.Span + var spanCtx context.Context + var startTime time.Time + var operationError error + + if c.telemetry != nil { + toolSpanCtx := context.Background() + if session.turnTracker != nil { + if tCtx := session.getToolCallParentContext(toolCallID); tCtx != nil { + toolSpanCtx = tCtx + } + } + spanCtx, span = c.telemetry.startExecuteToolSpan( + toolSpanCtx, toolName, toolCallID, toolDescription, arguments) + startTime = time.Now() + } + defer func() { if r := recover(); r != nil { result = buildFailedToolResult(fmt.Sprintf("tool panic: %v", r)) + if span != nil { + panicErr := &toolPanicError{Value: r} + recordSpanError(span, panicErr) + operationError = panicErr + } + } + if span != nil { + span.End() + } + if c.telemetry != nil && !startTime.IsZero() { + providerName := otelDefaultProviderName + var serverAddress string + var serverPort int + if session.turnTracker != nil { + providerName = session.turnTracker.getProviderName() + serverAddress = session.turnTracker.getServerAddress() + serverPort = session.turnTracker.getServerPort() + } + ctx := spanCtx + if ctx == nil { + ctx = context.Background() + } + c.telemetry.recordOperationDuration( + ctx, + time.Since(startTime).Seconds(), + "", "", + providerName, + serverAddress, + serverPort, + operationError, + otelExecuteTool, + ) } }() @@ -1317,7 +1380,13 @@ func (c *Client) executeToolCall( var err error result, err = handler(invocation) if err != nil { + operationError = err + if span != nil { + recordSpanError(span, err) + } result = buildFailedToolResult(err.Error()) + } else if c.telemetry != nil && span != nil { + c.telemetry.setExecuteToolResult(span, result) } } diff --git a/go/copilot_telemetry.go b/go/copilot_telemetry.go new file mode 100644 index 000000000..96c282b3e --- /dev/null +++ b/go/copilot_telemetry.go @@ -0,0 +1,1840 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +package copilot + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/url" + "os" + "runtime/debug" + "strconv" + "strings" + "sync" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" +) + +// sdkVersion returns the module version from build info, or empty string. +func sdkVersion() string { + bi, ok := debug.ReadBuildInfo() + if !ok { + return "" + } + return bi.Main.Version +} + +// TelemetryConfig configures OpenTelemetry instrumentation for the Copilot SDK. +type TelemetryConfig struct { + // EnableSensitiveData controls whether potentially sensitive data + // (message content, tool arguments/results) is included in telemetry. + // When nil, reads OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT + // env var. Defaults to false. + EnableSensitiveData *bool + + // SourceName is the name used for the Tracer and Meter. + // Defaults to "github.copilot.sdk". + SourceName string +} + +// copilotTelemetry provides OpenTelemetry instrumentation for the Copilot SDK, +// implementing the Semantic Conventions for Generative AI systems. +type copilotTelemetry struct { + tracer trace.Tracer + meter metric.Meter + + operationDurationHistogram metric.Float64Histogram + tokenUsageHistogram metric.Int64Histogram + timeToFirstChunkHistogram metric.Float64Histogram + timePerOutputChunkHistogram metric.Float64Histogram + + enableSensitiveData bool +} + +func newCopilotTelemetry(config *TelemetryConfig) *copilotTelemetry { + sourceName := otelDefaultSourceName + if config != nil && config.SourceName != "" { + sourceName = config.SourceName + } + + enableSensitive := false + if config != nil && config.EnableSensitiveData != nil { + enableSensitive = *config.EnableSensitiveData + } else { + enableSensitive = strings.EqualFold(os.Getenv(otelCaptureMessageContentEnvVar), "true") + } + + version := sdkVersion() + var tracerOpts []trace.TracerOption + var meterOpts []metric.MeterOption + if version != "" { + tracerOpts = append(tracerOpts, trace.WithInstrumentationVersion(version)) + meterOpts = append(meterOpts, metric.WithInstrumentationVersion(version)) + } + + tracer := otel.GetTracerProvider().Tracer(sourceName, tracerOpts...) + m := otel.GetMeterProvider().Meter(sourceName, meterOpts...) + + opDur, _ := m.Float64Histogram( + otelMetricOperationDuration, + metric.WithUnit(otelSecondsUnit), + metric.WithDescription(otelDescOperationDuration), + metric.WithExplicitBucketBoundaries(otelDurationBucketBoundaries...), + ) + + tokUsage, _ := m.Int64Histogram( + otelMetricTokenUsage, + metric.WithUnit(otelTokensUnit), + metric.WithDescription(otelDescTokenUsage), + metric.WithExplicitBucketBoundaries(otelTokenBucketBoundaries...), + ) + + ttfc, _ := m.Float64Histogram( + otelMetricTimeToFirstChunk, + metric.WithUnit(otelSecondsUnit), + metric.WithDescription(otelDescTimeToFirstChunk), + metric.WithExplicitBucketBoundaries(otelDurationBucketBoundaries...), + ) + + tpoc, _ := m.Float64Histogram( + otelMetricTimePerOutputChunk, + metric.WithUnit(otelSecondsUnit), + metric.WithDescription(otelDescTimePerOutputChunk), + metric.WithExplicitBucketBoundaries(otelDurationBucketBoundaries...), + ) + + return &copilotTelemetry{ + tracer: tracer, + meter: m, + operationDurationHistogram: opDur, + tokenUsageHistogram: tokUsage, + timeToFirstChunkHistogram: ttfc, + timePerOutputChunkHistogram: tpoc, + enableSensitiveData: enableSensitive, + } +} + +// startInvokeAgentSpan starts an invoke_agent span for a session turn. +func (ct *copilotTelemetry) startInvokeAgentSpan( + ctx context.Context, + sessionID string, + model string, + providerName string, + serverAddress string, + serverPort int, + agentName string, + agentDescription string, +) (context.Context, trace.Span) { + displayName := otelInvokeAgent + if agentName != "" { + displayName = otelInvokeAgent + " " + agentName + } + + attrs := []attribute.KeyValue{ + attribute.String(otelGenAIOperationName, otelInvokeAgent), + attribute.String(otelGenAIProviderName, providerName), + attribute.String(otelGenAIAgentID, sessionID), + attribute.String(otelGenAIConversationID, sessionID), + } + + if model != "" { + attrs = append(attrs, attribute.String(otelGenAIRequestModel, model)) + } + + if agentName != "" { + attrs = append(attrs, attribute.String(otelGenAIAgentName, agentName)) + } + if agentDescription != "" { + attrs = append(attrs, attribute.String(otelGenAIAgentDescription, agentDescription)) + } + + if serverAddress != "" { + attrs = append(attrs, attribute.String(otelServerAddress, serverAddress)) + if serverPort > 0 { + attrs = append(attrs, attribute.Int(otelServerPort, serverPort)) + } + } + + ctx, span := ct.tracer.Start(ctx, displayName, + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(attrs...), + ) + return ctx, span +} + +// startChatSpan starts a chat child span for an individual LLM turn. +func (ct *copilotTelemetry) startChatSpan( + ctx context.Context, + model string, + providerName string, + serverAddress string, + serverPort int, + conversationID string, +) (context.Context, trace.Span) { + displayName := otelChat + if model != "" { + displayName = otelChat + " " + model + } + + attrs := []attribute.KeyValue{ + attribute.String(otelGenAIOperationName, otelChat), + attribute.String(otelGenAIProviderName, providerName), + } + + if model != "" { + attrs = append(attrs, attribute.String(otelGenAIRequestModel, model)) + } + if conversationID != "" { + attrs = append(attrs, attribute.String(otelGenAIConversationID, conversationID)) + } + if serverAddress != "" { + attrs = append(attrs, attribute.String(otelServerAddress, serverAddress)) + if serverPort > 0 { + attrs = append(attrs, attribute.Int(otelServerPort, serverPort)) + } + } + + ctx, span := ct.tracer.Start(ctx, displayName, + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(attrs...), + ) + return ctx, span +} + +// startExecuteToolSpan starts an execute_tool span for a tool call. +func (ct *copilotTelemetry) startExecuteToolSpan( + ctx context.Context, + toolName string, + toolCallID string, + description string, + arguments any, +) (context.Context, trace.Span) { + displayName := otelExecuteTool + " " + toolName + + attrs := []attribute.KeyValue{ + attribute.String(otelGenAIOperationName, otelExecuteTool), + attribute.String(otelGenAIToolName, toolName), + attribute.String(otelGenAIToolCallID, toolCallID), + attribute.String(otelGenAIToolType, "function"), + } + + if description != "" { + attrs = append(attrs, attribute.String(otelGenAIToolDescription, description)) + } + + if ct.enableSensitiveData && arguments != nil { + attrs = append(attrs, attribute.String(otelGenAIToolCallArguments, serializeTagValue(arguments))) + } + + ctx, span := ct.tracer.Start(ctx, displayName, + trace.WithSpanKind(trace.SpanKindInternal), + trace.WithAttributes(attrs...), + ) + return ctx, span +} + +// setExecuteToolResult sets the tool result on the span if sensitive data is enabled. +func (ct *copilotTelemetry) setExecuteToolResult(span trace.Span, result any) { + if !ct.enableSensitiveData || span == nil || result == nil { + return + } + span.SetAttributes(attribute.String(otelGenAIToolCallResult, serializeTagValue(result))) +} + +// recordTokenUsageMetrics records token usage metrics at turn completion. +func (ct *copilotTelemetry) recordTokenUsageMetrics( + ctx context.Context, + inputTokens *int, + outputTokens *int, + requestModel string, + responseModel string, + providerName string, + serverAddress string, + serverPort int, + err error, + operationName string, +) { + baseTags := createMetricAttributes(operationName, requestModel, responseModel, providerName, serverAddress, serverPort) + if err != nil { + baseTags = append(baseTags, attribute.String(otelErrorType, errorTypeName(err))) + } + + if inputTokens != nil { + tags := make([]attribute.KeyValue, len(baseTags)+1) + copy(tags, baseTags) + tags[len(baseTags)] = attribute.String(otelGenAITokenType, otelTokenTypeInput) + ct.tokenUsageHistogram.Record(ctx, int64(*inputTokens), metric.WithAttributes(tags...)) + } + if outputTokens != nil { + tags := make([]attribute.KeyValue, len(baseTags)+1) + copy(tags, baseTags) + tags[len(baseTags)] = attribute.String(otelGenAITokenType, otelTokenTypeOutput) + ct.tokenUsageHistogram.Record(ctx, int64(*outputTokens), metric.WithAttributes(tags...)) + } +} + +// recordOperationDuration records operation duration metric. +func (ct *copilotTelemetry) recordOperationDuration( + ctx context.Context, + durationSeconds float64, + requestModel string, + responseModel string, + providerName string, + serverAddress string, + serverPort int, + err error, + operationName string, +) { + attrs := createMetricAttributes(operationName, requestModel, responseModel, providerName, serverAddress, serverPort) + if err != nil { + attrs = append(attrs, attribute.String(otelErrorType, errorTypeName(err))) + } + ct.operationDurationHistogram.Record(ctx, durationSeconds, metric.WithAttributes(attrs...)) +} + +// recordTimeToFirstChunk records time-to-first-chunk metric. +func (ct *copilotTelemetry) recordTimeToFirstChunk( + ctx context.Context, + durationSeconds float64, + requestModel string, + responseModel string, + providerName string, + serverAddress string, + serverPort int, +) { + attrs := createMetricAttributes(otelChat, requestModel, responseModel, providerName, serverAddress, serverPort) + ct.timeToFirstChunkHistogram.Record(ctx, durationSeconds, metric.WithAttributes(attrs...)) +} + +// recordTimePerOutputChunk records time-per-output-chunk metric. +func (ct *copilotTelemetry) recordTimePerOutputChunk( + ctx context.Context, + durationSeconds float64, + requestModel string, + responseModel string, + providerName string, + serverAddress string, + serverPort int, +) { + attrs := createMetricAttributes(otelChat, requestModel, responseModel, providerName, serverAddress, serverPort) + ct.timePerOutputChunkHistogram.Record(ctx, durationSeconds, metric.WithAttributes(attrs...)) +} + +// sessionTurnError represents an error from the session or subagent. +type sessionTurnError struct { + Message string +} + +func (e *sessionTurnError) Error() string { + return e.Message +} + +// sessionDestroyedError represents a session destroyed while a turn is in progress. +type sessionDestroyedError struct { + Message string +} + +func (e *sessionDestroyedError) Error() string { + return e.Message +} + +// toolPanicError represents a panic during tool execution. +type toolPanicError struct { + Value any +} + +func (e *toolPanicError) Error() string { + return fmt.Sprintf("tool panic: %v", e.Value) +} + +// errorTypeName returns the error type name for telemetry. +func errorTypeName(err error) string { + switch err.(type) { + case *sessionTurnError: + return "SessionTurnError" + case *sessionDestroyedError: + return "SessionDestroyedError" + case *toolPanicError: + return "ToolPanicError" + default: + return fmt.Sprintf("%T", err) + } +} + +// recordSpanError records an error on a span. +func recordSpanError(span trace.Span, err error) { + if span == nil || err == nil { + return + } + span.SetAttributes(attribute.String(otelErrorType, errorTypeName(err))) + span.SetStatus(codes.Error, err.Error()) +} + +// normalizeProviderName normalizes a provider type string to its OpenTelemetry semantic convention name. +func normalizeProviderName(providerType string) string { + if providerType == "" { + return otelDefaultProviderName + } + switch strings.ToLower(strings.TrimSpace(providerType)) { + case "openai": + return "openai" + case "azure": + return "azure.ai.openai" + case "anthropic": + return "anthropic" + default: + return otelDefaultProviderName + } +} + +// parseServerAddress extracts host and port from a URL. +func parseServerAddress(baseURL string) (address string, port int) { + if baseURL == "" { + return "", 0 + } + u, err := url.Parse(baseURL) + if err != nil || u.Host == "" { + return "", 0 + } + host := u.Hostname() + if host == "" { + return "", 0 + } + portStr := u.Port() + if portStr != "" { + if p, err := strconv.Atoi(portStr); err == nil && p > 0 { + return host, p + } + } + switch strings.ToLower(u.Scheme) { + case "https": + return host, 443 + case "http": + return host, 80 + } + return host, 0 +} + +// createMetricAttributes builds the common metric attribute set. +func createMetricAttributes( + operationName string, + requestModel string, + responseModel string, + providerName string, + serverAddress string, + serverPort int, +) []attribute.KeyValue { + attrs := []attribute.KeyValue{ + attribute.String(otelGenAIOperationName, operationName), + attribute.String(otelGenAIProviderName, providerName), + } + if requestModel != "" { + attrs = append(attrs, attribute.String(otelGenAIRequestModel, requestModel)) + } + if responseModel != "" { + attrs = append(attrs, attribute.String(otelGenAIResponseModel, responseModel)) + } + if serverAddress != "" { + attrs = append(attrs, attribute.String(otelServerAddress, serverAddress)) + if serverPort > 0 { + attrs = append(attrs, attribute.Int(otelServerPort, serverPort)) + } + } + return attrs +} + +func serializeTagValue(value any) string { + switch v := value.(type) { + case string: + return v + case json.RawMessage: + return string(v) + default: + b, err := json.Marshal(v) + if err != nil { + return fmt.Sprintf("%v", v) + } + return string(b) + } +} + +// ============================================================================ +// OTel message types for parts-based JSON +// ============================================================================ + +type otelPart struct { + Type string `json:"type"` + Content string `json:"content,omitempty"` + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Arguments any `json:"arguments,omitempty"` + Response any `json:"response,omitempty"` + MCPServerName string `json:"-"` // not serialized directly; used for server_tool_call nesting +} + +type otelMsg struct { + Role string `json:"-"` + Parts []otelPart `json:"-"` + FinishReason string `json:"-"` +} + +// ============================================================================ +// Subagent state +// ============================================================================ + +type subagentState struct { + invokeAgentSpan trace.Span + spanCtx context.Context + startTime time.Time + chatSpan trace.Span + agentName string + responseModel string + responseID string + inputTokens int + outputTokens int + cacheReadTokens int + cacheCreationTokens int + inputMessages []otelMsg + outputMessages []otelMsg +} + +// ============================================================================ +// agentTurnTracker — two-level span hierarchy: invoke_agent → chat +// ============================================================================ + +type agentTurnTracker struct { + telemetry *copilotTelemetry + sessionID string + mu sync.Mutex + + // Context fields set once at construction + requestModel string + agentName string + agentDescription string + providerName string + serverAddress string + serverPort int + systemInstructionsJSON string + toolDefinitionsJSON string + isStreaming bool + + // Per-invoke_agent mutable state + agentSpan trace.Span + agentSpanCtx context.Context + agentStartTime time.Time + agentInputMsgs []otelMsg + agentOutputMsgs []otelMsg + activeSubagents map[string]*subagentState + + // Pending tool call parent contexts + pendingToolParents map[string]context.Context + + // MCP server tool tracking: toolCallID → mcpServerName + serverToolCallIDs map[string]string + + // Agent-level accumulated usage across all chat turns + agentResponseModel string + agentResponseID string + agentTotalInputTokens int + agentTotalOutputTokens int + agentTotalCacheReadTokens int + agentTotalCacheCreationTokens int + agentTotalCost float64 + agentTotalAIU float64 + + // Per-chat-turn mutable state (reset on each assistant.turn_start) + turnSpan trace.Span + turnStartTime time.Time + firstOutputChunkRecorded bool + lastOutputChunkTime time.Time + turnResponseModel string + turnResponseID string + turnInputTokens int + turnOutputTokens int + turnCacheReadTokens int + turnCacheCreationTokens int + turnInputMsgs []otelMsg + turnOutputMsgs []otelMsg + + // Copilot-specific per-turn attributes + turnCost *float64 + turnServerDuration *float64 + turnInitiator *string + turnAIU *float64 + turnID *string + turnInteractionID *string + + // Stashed user prompt for the first chat turn + pendingUserPrompt string +} + +func newAgentTurnTracker(telemetry *copilotTelemetry, sessionID string, model string, provider *ProviderConfig, systemMessage *SystemMessageConfig, tools []Tool, streaming bool, agentName string, agentDescription string) *agentTurnTracker { + providerName := otelDefaultProviderName + var serverAddress string + var serverPort int + if provider != nil { + providerName = normalizeProviderName(provider.Type) + serverAddress, serverPort = parseServerAddress(provider.BaseURL) + } + return &agentTurnTracker{ + telemetry: telemetry, + sessionID: sessionID, + requestModel: model, + providerName: providerName, + serverAddress: serverAddress, + serverPort: serverPort, + agentName: agentName, + agentDescription: agentDescription, + systemInstructionsJSON: buildSystemInstructionsJSON(systemMessage), + toolDefinitionsJSON: buildToolDefinitionsJSON(tools), + isStreaming: streaming, + } +} + +// getProviderName returns the provider name. +func (t *agentTurnTracker) getProviderName() string { + return t.providerName +} + +// getServerAddress returns the server address. +func (t *agentTurnTracker) getServerAddress() string { + return t.serverAddress +} + +// getServerPort returns the server port. +func (t *agentTurnTracker) getServerPort() int { + return t.serverPort +} + +// getToolCallParentContext gets the parent context for a tool call span. +// For subagent tool calls this returns the subagent's invoke_agent context; +// for main agent tool calls this returns the root invoke_agent context. +func (t *agentTurnTracker) getToolCallParentContext(toolCallID string) context.Context { + t.mu.Lock() + defer t.mu.Unlock() + if t.pendingToolParents != nil { + if ctx, ok := t.pendingToolParents[toolCallID]; ok { + delete(t.pendingToolParents, toolCallID) + return ctx + } + } + return t.agentSpanCtx +} + +// completeOnDispose closes any active spans with an error status. +func (t *agentTurnTracker) completeOnDispose() { + t.mu.Lock() + defer t.mu.Unlock() + if t.agentSpan != nil { + disposeErr := &sessionDestroyedError{Message: "Session destroyed while agent turn was in progress"} + t.completeChatTurnLocked(disposeErr) + t.completeAgentTurnLocked(disposeErr) + } +} + +// beginSend is called at the start of Send() to start a span and record the user message. +func (t *agentTurnTracker) beginSend(ctx context.Context, prompt string) { + t.mu.Lock() + defer t.mu.Unlock() + if t.agentSpan == nil { + spanCtx, span := t.telemetry.startInvokeAgentSpan( + ctx, + t.sessionID, + t.requestModel, + t.providerName, + t.serverAddress, + t.serverPort, + t.agentName, + t.agentDescription, + ) + t.agentSpan = span + t.agentSpanCtx = spanCtx + t.agentStartTime = time.Now() + t.agentInputMsgs = nil + t.agentOutputMsgs = nil + } + + // Agent-level input = what the caller sent (all user prompts). + if prompt != "" { + t.agentInputMsgs = append(t.agentInputMsgs, otelMsg{ + Role: "user", + Parts: []otelPart{{Type: "text", Content: prompt}}, + }) + } + + // Stash user prompt for the first chat turn's input messages. + t.pendingUserPrompt = prompt +} + +// processEvent handles telemetry enrichment for dispatched events. +func (t *agentTurnTracker) processEvent(event SessionEvent) { + t.mu.Lock() + defer t.mu.Unlock() + + // Route subagent events by parentToolCallId. + parentToolCallID := getParentToolCallID(event) + if parentToolCallID != "" { + if t.activeSubagents != nil { + if sub, ok := t.activeSubagents[parentToolCallID]; ok { + t.processSubagentEventLocked(sub, event) + } + } + return + } + + // Handle subagent lifecycle events. + switch event.Type { + case SubagentStarted: + t.beginSubagentLocked(event) + return + case SubagentCompleted: + if event.Data.ToolCallID != nil { + t.completeSubagentLocked(*event.Data.ToolCallID, nil) + } + return + case SubagentFailed: + if event.Data.ToolCallID != nil { + agentName := "" + if event.Data.AgentName != nil { + agentName = *event.Data.AgentName + } + errMsg := "" + if event.Data.Error != nil { + if event.Data.Error.String != nil { + errMsg = *event.Data.Error.String + } else if event.Data.Error.ErrorClass != nil { + errMsg = event.Data.Error.ErrorClass.Message + } + } + t.completeSubagentLocked(*event.Data.ToolCallID, + &sessionTurnError{Message: fmt.Sprintf("Subagent '%s' failed: %s", agentName, errMsg)}) + } + return + } + + // Record chunk timing for main agent events during a turn. + t.recordOutputChunkMetricLocked() + + // Per-turn event processing (writes to the chat child span). + if t.turnSpan != nil { + switch event.Type { + case AssistantMessage: + parts := []otelPart{} + if event.Data.ReasoningText != nil && *event.Data.ReasoningText != "" { + parts = append(parts, otelPart{Type: "reasoning", Content: *event.Data.ReasoningText}) + } + if event.Data.Content != nil && *event.Data.Content != "" { + parts = append(parts, otelPart{Type: "text", Content: *event.Data.Content}) + } + if len(parts) > 0 { + t.turnOutputMsgs = append(t.turnOutputMsgs, otelMsg{Role: "assistant", Parts: parts}) + } + + case AssistantUsage: + if event.Data.Model != nil { + t.turnResponseModel = *event.Data.Model + } + if event.Data.APICallID != nil && *event.Data.APICallID != "" { + t.turnResponseID = *event.Data.APICallID + } else if event.Data.ProviderCallID != nil && *event.Data.ProviderCallID != "" { + t.turnResponseID = *event.Data.ProviderCallID + } + if event.Data.InputTokens != nil { + t.turnInputTokens += int(*event.Data.InputTokens) + } + if event.Data.OutputTokens != nil { + t.turnOutputTokens += int(*event.Data.OutputTokens) + } + if event.Data.CacheReadTokens != nil { + t.turnCacheReadTokens += int(*event.Data.CacheReadTokens) + } + if event.Data.CacheWriteTokens != nil { + t.turnCacheCreationTokens += int(*event.Data.CacheWriteTokens) + } + + // Copilot-specific vendor attributes + if event.Data.Cost != nil { + v := ptrFloat64OrZero(t.turnCost) + *event.Data.Cost + t.turnCost = &v + } + if event.Data.Duration != nil { + v := ptrFloat64OrZero(t.turnServerDuration) + *event.Data.Duration + t.turnServerDuration = &v + } + if event.Data.Initiator != nil { + t.turnInitiator = event.Data.Initiator + } + if event.Data.CopilotUsage != nil { + v := ptrFloat64OrZero(t.turnAIU) + event.Data.CopilotUsage.TotalNanoAiu + t.turnAIU = &v + } + + case SessionModelChange: + if event.Data.NewModel != nil { + t.turnResponseModel = *event.Data.NewModel + } + + case ToolExecutionStart: + isServerTool := event.Data.MCPServerName != nil && *event.Data.MCPServerName != "" + if isServerTool && event.Data.ToolCallID != nil { + if t.serverToolCallIDs == nil { + t.serverToolCallIDs = make(map[string]string) + } + t.serverToolCallIDs[*event.Data.ToolCallID] = *event.Data.MCPServerName + } + + partType := "tool_call" + if isServerTool { + partType = "server_tool_call" + } + p := otelPart{Type: partType} + if event.Data.ToolCallID != nil { + p.ID = *event.Data.ToolCallID + } + if event.Data.ToolName != nil { + p.Name = *event.Data.ToolName + } + p.Arguments = event.Data.Arguments + if isServerTool && event.Data.MCPServerName != nil { + p.MCPServerName = *event.Data.MCPServerName + } + t.turnOutputMsgs = append(t.turnOutputMsgs, otelMsg{Role: "assistant", Parts: []otelPart{p}}) + + // Store parent context for tool call span parenting. + if t.agentSpan != nil && event.Data.ToolCallID != nil { + if t.pendingToolParents == nil { + t.pendingToolParents = make(map[string]context.Context) + } + t.pendingToolParents[*event.Data.ToolCallID] = t.agentSpanCtx + } + + case ToolExecutionComplete: + resultContent := "" + if event.Data.Result != nil { + resultContent = event.Data.Result.Content + } else if event.Data.Error != nil { + if event.Data.Error.String != nil { + resultContent = *event.Data.Error.String + } else if event.Data.Error.ErrorClass != nil { + resultContent = event.Data.Error.ErrorClass.Message + } + } + + var serverName string + isServerTool := false + if event.Data.ToolCallID != nil && t.serverToolCallIDs != nil { + if name, ok := t.serverToolCallIDs[*event.Data.ToolCallID]; ok { + isServerTool = true + serverName = name + delete(t.serverToolCallIDs, *event.Data.ToolCallID) + } + } + partType := "tool_call_response" + if isServerTool { + partType = "server_tool_call_response" + } + p := otelPart{Type: partType, Response: resultContent, MCPServerName: serverName} + if event.Data.ToolCallID != nil { + p.ID = *event.Data.ToolCallID + } + t.turnInputMsgs = append(t.turnInputMsgs, otelMsg{Role: "tool", Parts: []otelPart{p}}) + } + } + + // Copilot-specific lifecycle events emitted as span events. + target := t.turnSpan + if target == nil { + target = t.agentSpan + } + if target != nil { + switch event.Type { + case SessionTruncation: + attrs := []attribute.KeyValue{} + if event.Data.TokenLimit != nil { + attrs = append(attrs, attribute.Int64(otelCopilotTokenLimit, int64(*event.Data.TokenLimit))) + } + if event.Data.PreTruncationTokensInMessages != nil { + attrs = append(attrs, attribute.Int64(otelCopilotPreTokens, int64(*event.Data.PreTruncationTokensInMessages))) + } + if event.Data.PostTruncationTokensInMessages != nil { + attrs = append(attrs, attribute.Int64(otelCopilotPostTokens, int64(*event.Data.PostTruncationTokensInMessages))) + } + if event.Data.PreTruncationMessagesLength != nil { + attrs = append(attrs, attribute.Int64(otelCopilotPreMessages, int64(*event.Data.PreTruncationMessagesLength))) + } + if event.Data.PostTruncationMessagesLength != nil { + attrs = append(attrs, attribute.Int64(otelCopilotPostMessages, int64(*event.Data.PostTruncationMessagesLength))) + } + if event.Data.TokensRemovedDuringTruncation != nil { + attrs = append(attrs, attribute.Int64(otelCopilotTokensRemoved, int64(*event.Data.TokensRemovedDuringTruncation))) + } + if event.Data.MessagesRemovedDuringTruncation != nil { + attrs = append(attrs, attribute.Int64(otelCopilotMessagesRemoved, int64(*event.Data.MessagesRemovedDuringTruncation))) + } + if event.Data.PerformedBy != nil { + attrs = append(attrs, attribute.String(otelCopilotPerformedBy, *event.Data.PerformedBy)) + } + target.AddEvent(otelCopilotSessionTruncation, trace.WithAttributes(attrs...)) + + case SessionCompactionStart: + target.AddEvent(otelCopilotSessionCompactionStart) + + case SessionCompactionComplete: + attrs := []attribute.KeyValue{} + if event.Data.Success != nil { + attrs = append(attrs, attribute.Bool(otelCopilotSuccess, *event.Data.Success)) + } + if t.telemetry.enableSensitiveData && event.Data.Error != nil { + errStr := "" + if event.Data.Error.String != nil { + errStr = *event.Data.Error.String + } else if event.Data.Error.ErrorClass != nil { + errStr = event.Data.Error.ErrorClass.Message + } + if errStr != "" { + attrs = append(attrs, attribute.String(otelCopilotMessage, errStr)) + } + } + if event.Data.PreCompactionTokens != nil { + attrs = append(attrs, attribute.Int64(otelCopilotPreTokens, int64(*event.Data.PreCompactionTokens))) + } + if event.Data.PostCompactionTokens != nil { + attrs = append(attrs, attribute.Int64(otelCopilotPostTokens, int64(*event.Data.PostCompactionTokens))) + } + if event.Data.TokensRemoved != nil { + attrs = append(attrs, attribute.Int64(otelCopilotTokensRemoved, int64(*event.Data.TokensRemoved))) + } + if event.Data.MessagesRemoved != nil { + attrs = append(attrs, attribute.Int64(otelCopilotMessagesRemoved, int64(*event.Data.MessagesRemoved))) + } + target.AddEvent(otelCopilotSessionCompactionComplete, trace.WithAttributes(attrs...)) + + case SkillInvoked: + attrs := []attribute.KeyValue{} + if event.Data.Name != nil { + attrs = append(attrs, attribute.String(otelCopilotSkillName, *event.Data.Name)) + } + if event.Data.Path != nil { + attrs = append(attrs, attribute.String(otelCopilotSkillPath, *event.Data.Path)) + } + if event.Data.PluginName != nil { + attrs = append(attrs, attribute.String(otelCopilotSkillPluginName, *event.Data.PluginName)) + } + if event.Data.PluginVersion != nil { + attrs = append(attrs, attribute.String(otelCopilotSkillPluginVersion, *event.Data.PluginVersion)) + } + if t.telemetry.enableSensitiveData && event.Data.Content != nil { + attrs = append(attrs, attribute.String(otelCopilotSkillContent, *event.Data.Content)) + } + target.AddEvent(otelCopilotSkillInvoked, trace.WithAttributes(attrs...)) + } + } + + // Lifecycle events that start/end turns or the overall agent operation. + switch event.Type { + case AssistantTurnStart: + t.beginChatTurnLocked() + if event.Data.TurnID != nil { + t.turnID = event.Data.TurnID + } + if event.Data.InteractionID != nil { + t.turnInteractionID = event.Data.InteractionID + } + + case AssistantTurnEnd: + t.completeChatTurnLocked(nil) + + case SessionIdle: + t.completeChatTurnLocked(nil) + t.completeAgentTurnLocked(nil) + + case SessionError: + errMsg := "unknown error" + if event.Data.Message != nil { + errMsg = *event.Data.Message + } + ex := &sessionTurnError{Message: fmt.Sprintf("Session error: %s", errMsg)} + t.completeChatTurnLocked(ex) + t.completeAgentTurnLocked(ex) + } +} + +// completeTurnWithError completes the current turn with an error. +func (t *agentTurnTracker) completeTurnWithError(err error) { + t.mu.Lock() + defer t.mu.Unlock() + t.completeChatTurnLocked(err) + t.completeAgentTurnLocked(err) +} + +// ============================================================================ +// Chat turn lifecycle +// ============================================================================ + +// beginChatTurnLocked starts a new chat child span for an LLM turn. Caller must hold mu. +func (t *agentTurnTracker) beginChatTurnLocked() { + // If there's already an active turn, complete it first. + t.completeChatTurnLocked(nil) + + t.turnResponseModel = "" + t.turnResponseID = "" + t.turnInputTokens = 0 + t.turnOutputTokens = 0 + t.turnCacheReadTokens = 0 + t.turnCacheCreationTokens = 0 + t.firstOutputChunkRecorded = false + t.lastOutputChunkTime = time.Time{} + t.turnInputMsgs = nil + t.turnOutputMsgs = nil + t.turnCost = nil + t.turnServerDuration = nil + t.turnInitiator = nil + t.turnAIU = nil + t.turnID = nil + t.turnInteractionID = nil + + // Add stashed user prompt as input message for the first turn. + if t.pendingUserPrompt != "" { + t.turnInputMsgs = append(t.turnInputMsgs, otelMsg{ + Role: "user", + Parts: []otelPart{{Type: "text", Content: t.pendingUserPrompt}}, + }) + t.pendingUserPrompt = "" + } + + parentCtx := t.agentSpanCtx + if parentCtx == nil { + parentCtx = context.Background() + } + _, span := t.telemetry.startChatSpan( + parentCtx, + t.requestModel, + t.providerName, + t.serverAddress, + t.serverPort, + t.sessionID, + ) + t.turnSpan = span + t.turnStartTime = time.Now() +} + +// completeChatTurnLocked completes the current chat child span. Caller must hold mu. +func (t *agentTurnTracker) completeChatTurnLocked(err error) { + span := t.turnSpan + if span == nil { + return + } + + startTime := t.turnStartTime + inputMessages := t.turnInputMsgs + outputMessages := t.turnOutputMsgs + responseModel := t.turnResponseModel + responseID := t.turnResponseID + inputTokens := t.turnInputTokens + outputTokens := t.turnOutputTokens + cacheReadTokens := t.turnCacheReadTokens + cacheCreationTokens := t.turnCacheCreationTokens + turnCost := t.turnCost + turnServerDuration := t.turnServerDuration + turnInitiator := t.turnInitiator + turnAIU := t.turnAIU + turnID := t.turnID + turnInteractionID := t.turnInteractionID + + // Reset per-turn state + t.turnSpan = nil + t.turnStartTime = time.Time{} + t.turnInputMsgs = nil + t.turnOutputMsgs = nil + t.turnResponseModel = "" + t.turnResponseID = "" + t.turnInputTokens = 0 + t.turnOutputTokens = 0 + t.turnCacheReadTokens = 0 + t.turnCacheCreationTokens = 0 + t.turnCost = nil + t.turnServerDuration = nil + t.turnInitiator = nil + t.turnAIU = nil + t.turnID = nil + t.turnInteractionID = nil + + if err != nil { + recordSpanError(span, err) + } + + finishReason := "stop" + if err != nil { + finishReason = "error" + } + span.SetAttributes(attribute.StringSlice(otelGenAIResponseFinishReasons, []string{finishReason})) + + // Agent-level output = only the final turn's output. + if len(outputMessages) > 0 { + t.agentOutputMsgs = make([]otelMsg, len(outputMessages)) + for i, m := range outputMessages { + t.agentOutputMsgs[i] = otelMsg{Role: m.Role, Parts: m.Parts, FinishReason: finishReason} + } + } + + // Accumulate agent-level usage across turns. + if responseModel != "" { + t.agentResponseModel = responseModel + } + if responseID != "" { + t.agentResponseID = responseID + } + t.agentTotalInputTokens += inputTokens + t.agentTotalOutputTokens += outputTokens + t.agentTotalCacheReadTokens += cacheReadTokens + t.agentTotalCacheCreationTokens += cacheCreationTokens + if turnCost != nil { + t.agentTotalCost += *turnCost + } + if turnAIU != nil { + t.agentTotalAIU += *turnAIU + } + + // Set usage-related span attributes for this LLM turn + if responseModel != "" { + span.SetAttributes(attribute.String(otelGenAIResponseModel, responseModel)) + } + if responseID != "" { + span.SetAttributes(attribute.String(otelGenAIResponseID, responseID)) + } + if inputTokens > 0 { + span.SetAttributes(attribute.Int(otelGenAIUsageInputTokens, inputTokens)) + } + if outputTokens > 0 { + span.SetAttributes(attribute.Int(otelGenAIUsageOutputTokens, outputTokens)) + } + if cacheReadTokens > 0 { + span.SetAttributes(attribute.Int(otelGenAIUsageCacheReadInputTokens, cacheReadTokens)) + } + if cacheCreationTokens > 0 { + span.SetAttributes(attribute.Int(otelGenAIUsageCacheCreationInputTokens, cacheCreationTokens)) + } + + // Copilot vendor-prefixed attributes on chat spans + if turnCost != nil { + span.SetAttributes(attribute.Float64(otelCopilotCost, *turnCost)) + } + if turnServerDuration != nil { + span.SetAttributes(attribute.Float64(otelCopilotServerDuration, *turnServerDuration)) + } + if turnInitiator != nil { + span.SetAttributes(attribute.String(otelCopilotInitiator, *turnInitiator)) + } + if turnAIU != nil { + span.SetAttributes(attribute.Float64(otelCopilotAIU, *turnAIU)) + } + if turnID != nil { + span.SetAttributes(attribute.String(otelCopilotTurnID, *turnID)) + } + if turnInteractionID != nil { + span.SetAttributes(attribute.String(otelCopilotInteractionID, *turnInteractionID)) + } + + // Set input/output message content as span attributes (sensitive) + if t.telemetry.enableSensitiveData { + inputMessagesJSON := buildPartsMessagesJSON(inputMessages, "") + if inputMessagesJSON != "" { + span.SetAttributes(attribute.String(otelGenAIInputMessages, inputMessagesJSON)) + } + outputMessagesJSON := buildPartsMessagesJSON(outputMessages, finishReason) + if outputMessagesJSON != "" { + span.SetAttributes(attribute.String(otelGenAIOutputMessages, outputMessagesJSON)) + } + } + + // Token usage metrics (per-turn) + var inTok, outTok *int + if inputTokens > 0 { + inTok = &inputTokens + } + if outputTokens > 0 { + outTok = &outputTokens + } + ctx := context.Background() + if span != nil { + ctx = trace.ContextWithSpan(ctx, span) + } + t.telemetry.recordTokenUsageMetrics( + ctx, + inTok, + outTok, + t.requestModel, + responseModel, + t.providerName, + t.serverAddress, + t.serverPort, + err, + otelChat, + ) + + // Per-turn operation duration + if !startTime.IsZero() { + durationSeconds := time.Since(startTime).Seconds() + t.telemetry.recordOperationDuration( + ctx, + durationSeconds, + t.requestModel, + responseModel, + t.providerName, + t.serverAddress, + t.serverPort, + err, + otelChat, + ) + } + + t.firstOutputChunkRecorded = false + t.lastOutputChunkTime = time.Time{} + span.End() +} + +// ============================================================================ +// Agent turn lifecycle +// ============================================================================ + +// completeAgentTurnLocked completes the invoke_agent span and records overall operation duration. +// Caller must hold mu. +func (t *agentTurnTracker) completeAgentTurnLocked(err error) { + span := t.agentSpan + if span == nil { + return + } + + startTime := t.agentStartTime + agentInputMsgs := t.agentInputMsgs + agentOutputMsgs := t.agentOutputMsgs + + // Reset agent state + t.agentSpan = nil + t.agentSpanCtx = nil + t.agentStartTime = time.Time{} + t.pendingUserPrompt = "" + t.agentInputMsgs = nil + t.agentOutputMsgs = nil + + // Complete any remaining subagents before closing the parent. + if len(t.activeSubagents) > 0 { + for key := range t.activeSubagents { + t.completeSubagentLocked(key, err) + } + } + t.activeSubagents = nil + t.pendingToolParents = nil + t.serverToolCallIDs = nil + + if err != nil { + recordSpanError(span, err) + } + + finishReason := "stop" + if err != nil { + finishReason = "error" + } + span.SetAttributes(attribute.StringSlice(otelGenAIResponseFinishReasons, []string{finishReason})) + + // Set accumulated usage across all chat turns on the invoke_agent span. + agentResponseModel := t.agentResponseModel + agentResponseID := t.agentResponseID + agentTotalInputTokens := t.agentTotalInputTokens + agentTotalOutputTokens := t.agentTotalOutputTokens + agentTotalCacheReadTokens := t.agentTotalCacheReadTokens + agentTotalCacheCreationTokens := t.agentTotalCacheCreationTokens + agentTotalCost := t.agentTotalCost + agentTotalAIU := t.agentTotalAIU + + t.agentResponseModel = "" + t.agentResponseID = "" + t.agentTotalInputTokens = 0 + t.agentTotalOutputTokens = 0 + t.agentTotalCacheReadTokens = 0 + t.agentTotalCacheCreationTokens = 0 + t.agentTotalCost = 0 + t.agentTotalAIU = 0 + + if agentResponseModel != "" { + span.SetAttributes(attribute.String(otelGenAIResponseModel, agentResponseModel)) + } + if agentResponseID != "" { + span.SetAttributes(attribute.String(otelGenAIResponseID, agentResponseID)) + } + if agentTotalInputTokens > 0 { + span.SetAttributes(attribute.Int(otelGenAIUsageInputTokens, agentTotalInputTokens)) + } + if agentTotalOutputTokens > 0 { + span.SetAttributes(attribute.Int(otelGenAIUsageOutputTokens, agentTotalOutputTokens)) + } + if agentTotalCacheReadTokens > 0 { + span.SetAttributes(attribute.Int(otelGenAIUsageCacheReadInputTokens, agentTotalCacheReadTokens)) + } + if agentTotalCacheCreationTokens > 0 { + span.SetAttributes(attribute.Int(otelGenAIUsageCacheCreationInputTokens, agentTotalCacheCreationTokens)) + } + + // Copilot vendor-prefixed attributes on invoke_agent span + if agentTotalCost > 0 { + span.SetAttributes(attribute.Float64(otelCopilotCost, agentTotalCost)) + } + if agentTotalAIU > 0 { + span.SetAttributes(attribute.Float64(otelCopilotAIU, agentTotalAIU)) + } + + // Agent-level input = caller's message; output = agent's final response. + if t.telemetry.enableSensitiveData { + inputJSON := buildPartsMessagesJSON(agentInputMsgs, "") + if inputJSON != "" { + span.SetAttributes(attribute.String(otelGenAIInputMessages, inputJSON)) + } + outputJSON := buildPartsMessagesJSON(agentOutputMsgs, "") + if outputJSON != "" { + span.SetAttributes(attribute.String(otelGenAIOutputMessages, outputJSON)) + } + if t.systemInstructionsJSON != "" { + span.SetAttributes(attribute.String(otelGenAISystemInstructions, t.systemInstructionsJSON)) + } + } + if t.toolDefinitionsJSON != "" { + span.SetAttributes(attribute.String(otelGenAIToolDefinitions, t.toolDefinitionsJSON)) + } + + agentCtx := context.Background() + if span != nil { + agentCtx = trace.ContextWithSpan(agentCtx, span) + } + if !startTime.IsZero() { + durationSeconds := time.Since(startTime).Seconds() + t.telemetry.recordOperationDuration( + agentCtx, + durationSeconds, + t.requestModel, + agentResponseModel, + t.providerName, + t.serverAddress, + t.serverPort, + err, + otelInvokeAgent, + ) + } + + span.End() +} + +// ============================================================================ +// Streaming chunk timing +// ============================================================================ + +// recordOutputChunkMetricLocked records streaming chunk timing metrics. Caller must hold mu. +func (t *agentTurnTracker) recordOutputChunkMetricLocked() { + if !t.isStreaming || t.turnStartTime.IsZero() { + return + } + + now := time.Now() + elapsed := now.Sub(t.turnStartTime) + + if !t.firstOutputChunkRecorded { + t.firstOutputChunkRecorded = true + t.lastOutputChunkTime = now + t.telemetry.recordTimeToFirstChunk( + context.Background(), + elapsed.Seconds(), + t.requestModel, + "", // response model not yet known during streaming + t.providerName, + t.serverAddress, + t.serverPort, + ) + return + } + + delta := now.Sub(t.lastOutputChunkTime) + t.lastOutputChunkTime = now + t.telemetry.recordTimePerOutputChunk( + context.Background(), + delta.Seconds(), + t.requestModel, + "", // response model not yet known during streaming + t.providerName, + t.serverAddress, + t.serverPort, + ) +} + +// ============================================================================ +// Subagent support +// ============================================================================ + +// beginSubagentLocked creates nested invoke_agent + chat spans for a subagent. Caller must hold mu. +func (t *agentTurnTracker) beginSubagentLocked(event SessionEvent) { + parentCtx := t.agentSpanCtx + if parentCtx == nil { + parentCtx = context.Background() + } + + agentName := "" + if event.Data.AgentName != nil { + agentName = *event.Data.AgentName + } + agentDesc := "" + if event.Data.AgentDescription != nil { + agentDesc = *event.Data.AgentDescription + } + + invokeCtx, invokeSpan := t.telemetry.startInvokeAgentSpan( + parentCtx, + t.sessionID, + t.requestModel, + t.providerName, + t.serverAddress, + t.serverPort, + agentName, + agentDesc, + ) + + _, chatSpan := t.telemetry.startChatSpan( + invokeCtx, + t.requestModel, + t.providerName, + t.serverAddress, + t.serverPort, + t.sessionID, + ) + + state := &subagentState{ + invokeAgentSpan: invokeSpan, + spanCtx: invokeCtx, + startTime: time.Now(), + chatSpan: chatSpan, + agentName: agentName, + } + + if t.activeSubagents == nil { + t.activeSubagents = make(map[string]*subagentState) + } + if event.Data.ToolCallID != nil { + t.activeSubagents[*event.Data.ToolCallID] = state + } +} + +// processSubagentEventLocked routes an event to its owning subagent's spans. Caller must hold mu. +func (t *agentTurnTracker) processSubagentEventLocked(sub *subagentState, event SessionEvent) { + switch event.Type { + case AssistantUsage: + if event.Data.Model != nil { + sub.responseModel = *event.Data.Model + if sub.chatSpan != nil { + sub.chatSpan.SetAttributes(attribute.String(otelGenAIResponseModel, *event.Data.Model)) + } + sub.invokeAgentSpan.SetAttributes(attribute.String(otelGenAIResponseModel, *event.Data.Model)) + } + if event.Data.APICallID != nil && *event.Data.APICallID != "" { + sub.responseID = *event.Data.APICallID + } else if event.Data.ProviderCallID != nil && *event.Data.ProviderCallID != "" { + sub.responseID = *event.Data.ProviderCallID + } + if event.Data.InputTokens != nil { + sub.inputTokens += int(*event.Data.InputTokens) + } + if event.Data.OutputTokens != nil { + sub.outputTokens += int(*event.Data.OutputTokens) + } + if event.Data.CacheReadTokens != nil { + sub.cacheReadTokens += int(*event.Data.CacheReadTokens) + } + if event.Data.CacheWriteTokens != nil { + sub.cacheCreationTokens += int(*event.Data.CacheWriteTokens) + } + + case AssistantMessage: + parts := []otelPart{} + if event.Data.ReasoningText != nil && *event.Data.ReasoningText != "" { + parts = append(parts, otelPart{Type: "reasoning", Content: *event.Data.ReasoningText}) + } + if event.Data.Content != nil && *event.Data.Content != "" { + parts = append(parts, otelPart{Type: "text", Content: *event.Data.Content}) + } + if len(parts) > 0 { + sub.outputMessages = append(sub.outputMessages, otelMsg{Role: "assistant", Parts: parts}) + } + + case ToolExecutionStart: + isServerTool := event.Data.MCPServerName != nil && *event.Data.MCPServerName != "" + if isServerTool && event.Data.ToolCallID != nil { + if t.serverToolCallIDs == nil { + t.serverToolCallIDs = make(map[string]string) + } + t.serverToolCallIDs[*event.Data.ToolCallID] = *event.Data.MCPServerName + } + + partType := "tool_call" + if isServerTool { + partType = "server_tool_call" + } + p := otelPart{Type: partType} + if event.Data.ToolCallID != nil { + p.ID = *event.Data.ToolCallID + } + if event.Data.ToolName != nil { + p.Name = *event.Data.ToolName + } + p.Arguments = event.Data.Arguments + if isServerTool && event.Data.MCPServerName != nil { + p.MCPServerName = *event.Data.MCPServerName + } + sub.outputMessages = append(sub.outputMessages, otelMsg{Role: "assistant", Parts: []otelPart{p}}) + + // For subagent tool calls, parent is the subagent's invoke_agent. + if event.Data.ToolCallID != nil { + if t.pendingToolParents == nil { + t.pendingToolParents = make(map[string]context.Context) + } + t.pendingToolParents[*event.Data.ToolCallID] = sub.spanCtx + } + + case ToolExecutionComplete: + resultContent := "" + if event.Data.Result != nil { + resultContent = event.Data.Result.Content + } else if event.Data.Error != nil { + if event.Data.Error.String != nil { + resultContent = *event.Data.Error.String + } else if event.Data.Error.ErrorClass != nil { + resultContent = event.Data.Error.ErrorClass.Message + } + } + + var serverName string + isServerTool := false + if event.Data.ToolCallID != nil && t.serverToolCallIDs != nil { + if name, ok := t.serverToolCallIDs[*event.Data.ToolCallID]; ok { + isServerTool = true + serverName = name + delete(t.serverToolCallIDs, *event.Data.ToolCallID) + } + } + partType := "tool_call_response" + if isServerTool { + partType = "server_tool_call_response" + } + p := otelPart{Type: partType, Response: resultContent, MCPServerName: serverName} + if event.Data.ToolCallID != nil { + p.ID = *event.Data.ToolCallID + } + sub.inputMessages = append(sub.inputMessages, otelMsg{Role: "tool", Parts: []otelPart{p}}) + } +} + +// completeSubagentLocked completes a subagent's chat and invoke_agent spans. Caller must hold mu. +func (t *agentTurnTracker) completeSubagentLocked(toolCallID string, err error) { + if t.activeSubagents == nil { + return + } + sub, ok := t.activeSubagents[toolCallID] + if !ok { + return + } + delete(t.activeSubagents, toolCallID) + + finishReason := "stop" + if err != nil { + finishReason = "error" + } + + // -- Complete the chat child span -- + if sub.chatSpan != nil { + if err != nil { + recordSpanError(sub.chatSpan, err) + } + sub.chatSpan.SetAttributes(attribute.StringSlice(otelGenAIResponseFinishReasons, []string{finishReason})) + + if sub.responseModel != "" { + sub.chatSpan.SetAttributes(attribute.String(otelGenAIResponseModel, sub.responseModel)) + } + if sub.responseID != "" { + sub.chatSpan.SetAttributes(attribute.String(otelGenAIResponseID, sub.responseID)) + } + if sub.inputTokens > 0 { + sub.chatSpan.SetAttributes(attribute.Int(otelGenAIUsageInputTokens, sub.inputTokens)) + } + if sub.outputTokens > 0 { + sub.chatSpan.SetAttributes(attribute.Int(otelGenAIUsageOutputTokens, sub.outputTokens)) + } + if sub.cacheReadTokens > 0 { + sub.chatSpan.SetAttributes(attribute.Int(otelGenAIUsageCacheReadInputTokens, sub.cacheReadTokens)) + } + if sub.cacheCreationTokens > 0 { + sub.chatSpan.SetAttributes(attribute.Int(otelGenAIUsageCacheCreationInputTokens, sub.cacheCreationTokens)) + } + + if t.telemetry.enableSensitiveData { + if len(sub.inputMessages) > 0 { + inputJSON := buildPartsMessagesJSON(sub.inputMessages, "") + if inputJSON != "" { + sub.chatSpan.SetAttributes(attribute.String(otelGenAIInputMessages, inputJSON)) + } + } + if len(sub.outputMessages) > 0 { + outputJSON := buildPartsMessagesJSON(sub.outputMessages, finishReason) + if outputJSON != "" { + sub.chatSpan.SetAttributes(attribute.String(otelGenAIOutputMessages, outputJSON)) + } + } + } + + var inTok, outTok *int + if sub.inputTokens > 0 { + inTok = &sub.inputTokens + } + if sub.outputTokens > 0 { + outTok = &sub.outputTokens + } + reqModel := sub.responseModel + if reqModel == "" { + reqModel = t.requestModel + } + subChatCtx := context.Background() + if sub.chatSpan != nil { + subChatCtx = trace.ContextWithSpan(subChatCtx, sub.chatSpan) + } + t.telemetry.recordTokenUsageMetrics( + subChatCtx, + inTok, outTok, + reqModel, sub.responseModel, + t.providerName, t.serverAddress, t.serverPort, + err, otelChat, + ) + + sub.chatSpan.End() + } + + // -- Complete the invoke_agent span -- + if err != nil { + recordSpanError(sub.invokeAgentSpan, err) + } + sub.invokeAgentSpan.SetAttributes(attribute.StringSlice(otelGenAIResponseFinishReasons, []string{finishReason})) + + if sub.responseModel != "" { + sub.invokeAgentSpan.SetAttributes(attribute.String(otelGenAIResponseModel, sub.responseModel)) + } + if sub.responseID != "" { + sub.invokeAgentSpan.SetAttributes(attribute.String(otelGenAIResponseID, sub.responseID)) + } + if sub.inputTokens > 0 { + sub.invokeAgentSpan.SetAttributes(attribute.Int(otelGenAIUsageInputTokens, sub.inputTokens)) + } + if sub.outputTokens > 0 { + sub.invokeAgentSpan.SetAttributes(attribute.Int(otelGenAIUsageOutputTokens, sub.outputTokens)) + } + if sub.cacheReadTokens > 0 { + sub.invokeAgentSpan.SetAttributes(attribute.Int(otelGenAIUsageCacheReadInputTokens, sub.cacheReadTokens)) + } + if sub.cacheCreationTokens > 0 { + sub.invokeAgentSpan.SetAttributes(attribute.Int(otelGenAIUsageCacheCreationInputTokens, sub.cacheCreationTokens)) + } + + if t.telemetry.enableSensitiveData && len(sub.outputMessages) > 0 { + msgsWithFinishReason := make([]otelMsg, len(sub.outputMessages)) + for i, m := range sub.outputMessages { + msgsWithFinishReason[i] = otelMsg{Role: m.Role, Parts: m.Parts, FinishReason: finishReason} + } + outputJSON := buildPartsMessagesJSON(msgsWithFinishReason, "") + if outputJSON != "" { + sub.invokeAgentSpan.SetAttributes(attribute.String(otelGenAIOutputMessages, outputJSON)) + } + } + + reqModel := sub.responseModel + if reqModel == "" { + reqModel = t.requestModel + } + subAgentCtx := context.Background() + if sub.invokeAgentSpan != nil { + subAgentCtx = trace.ContextWithSpan(subAgentCtx, sub.invokeAgentSpan) + } + durationSeconds := time.Since(sub.startTime).Seconds() + t.telemetry.recordOperationDuration( + subAgentCtx, + durationSeconds, + reqModel, sub.responseModel, + t.providerName, t.serverAddress, t.serverPort, + err, otelInvokeAgent, + ) + + sub.invokeAgentSpan.End() +} + +// ============================================================================ +// Helper functions +// ============================================================================ + +// getParentToolCallID extracts parentToolCallId from events that can route to subagents. +func getParentToolCallID(event SessionEvent) string { + switch event.Type { + case AssistantUsage, AssistantMessage, AssistantMessageDelta, + ToolExecutionStart, ToolExecutionComplete: + if event.Data.ParentToolCallID != nil { + return *event.Data.ParentToolCallID + } + } + return "" +} + +func ptrFloat64OrZero(p *float64) float64 { + if p == nil { + return 0 + } + return *p +} + +// buildPartsMessagesJSON builds the JSON array of messages for span attributes (parts-based format). +func buildPartsMessagesJSON(messages []otelMsg, defaultFinishReason string) string { + if len(messages) == 0 { + return "" + } + + var result []any + for _, message := range messages { + if len(message.Parts) == 0 { + continue + } + + var parts []any + for _, part := range message.Parts { + obj := map[string]any{"type": part.Type} + + switch part.Type { + case "server_tool_call": + if part.ID != "" { + obj["id"] = part.ID + } + if part.Name != "" { + obj["name"] = part.Name + } + stc := map[string]any{"type": "mcp"} + if part.MCPServerName != "" { + stc["server_name"] = part.MCPServerName + } + if part.Arguments != nil { + stc["arguments"] = part.Arguments + } + obj["server_tool_call"] = stc + + case "server_tool_call_response": + if part.ID != "" { + obj["id"] = part.ID + } + stcr := map[string]any{"type": "mcp"} + if part.MCPServerName != "" { + stcr["server_name"] = part.MCPServerName + } + if part.Response != nil { + stcr["response"] = part.Response + } + obj["server_tool_call_response"] = stcr + + default: + if part.Content != "" { + obj["content"] = part.Content + } + if part.ID != "" { + obj["id"] = part.ID + } + if part.Name != "" { + obj["name"] = part.Name + } + if part.Arguments != nil { + obj["arguments"] = part.Arguments + } + if part.Response != nil { + obj["response"] = part.Response + } + } + + parts = append(parts, obj) + } + + msg := map[string]any{"role": message.Role, "parts": parts} + effectiveFinishReason := message.FinishReason + if effectiveFinishReason == "" { + effectiveFinishReason = defaultFinishReason + } + if effectiveFinishReason != "" { + msg["finish_reason"] = effectiveFinishReason + } + result = append(result, msg) + } + + if len(result) == 0 { + return "" + } + b, _ := jsonMarshalNoHTMLEscape(result) + return string(b) +} + +// buildSystemInstructionsJSON builds JSON for system instructions. +func buildSystemInstructionsJSON(systemMessage *SystemMessageConfig) string { + if systemMessage == nil || systemMessage.Content == "" { + return "" + } + type item struct { + Type string `json:"type"` + Content string `json:"content"` + } + b, _ := jsonMarshalNoHTMLEscape([]item{{Type: "text", Content: systemMessage.Content}}) + return string(b) +} + +// buildToolDefinitionsJSON builds JSON for tool definitions. +func buildToolDefinitionsJSON(tools []Tool) string { + if len(tools) == 0 { + return "" + } + type toolDef struct { + Type string `json:"type"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + Parameters map[string]any `json:"parameters,omitempty"` + } + var defs []toolDef + for _, t := range tools { + defs = append(defs, toolDef{ + Type: "function", + Name: t.Name, + Description: t.Description, + Parameters: t.Parameters, + }) + } + b, _ := jsonMarshalNoHTMLEscape(defs) + return string(b) +} + +// jsonMarshalNoHTMLEscape marshals v to JSON without HTML-escaping (<, >, &), +// matching the behavior of JSON.stringify (Node), json.dumps (Python), and +// JavaScriptEncoder.UnsafeRelaxedJsonEscaping (.NET). +func jsonMarshalNoHTMLEscape(v any) ([]byte, error) { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + enc.SetEscapeHTML(false) + if err := enc.Encode(v); err != nil { + return nil, err + } + b := buf.Bytes() + // Encode appends a trailing newline; trim it. + if len(b) > 0 && b[len(b)-1] == '\n' { + b = b[:len(b)-1] + } + return b, nil +} diff --git a/go/copilot_telemetry_consts.go b/go/copilot_telemetry_consts.go new file mode 100644 index 000000000..0f0704746 --- /dev/null +++ b/go/copilot_telemetry_consts.go @@ -0,0 +1,122 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +package copilot + +// String constants for OpenTelemetry Semantic Conventions for Generative AI systems. +// Based on the Semantic Conventions for Generative AI systems v1.40, +// defined at https://opentelemetry.io/docs/specs/semconv/gen-ai/ +// The specification is still experimental and subject to change. + +const ( + otelDefaultSourceName = "github.copilot.sdk" + otelDefaultProviderName = "github" + otelCaptureMessageContentEnvVar = "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT" + otelSecondsUnit = "s" + otelTokensUnit = "{token}" + + otelTokenTypeInput = "input" + otelTokenTypeOutput = "output" + + // Error attributes + otelErrorType = "error.type" + + // Server attributes + otelServerAddress = "server.address" + otelServerPort = "server.port" + + // GenAI operation names + otelInvokeAgent = "invoke_agent" + otelChat = "chat" + otelExecuteTool = "execute_tool" + + // GenAI attributes + otelGenAIOperationName = "gen_ai.operation.name" + otelGenAIProviderName = "gen_ai.provider.name" + + otelGenAIAgentID = "gen_ai.agent.id" + + otelGenAIAgentName = "gen_ai.agent.name" + otelGenAIAgentDescription = "gen_ai.agent.description" + + otelGenAIConversationID = "gen_ai.conversation.id" + + otelGenAIRequestModel = "gen_ai.request.model" + + otelGenAIResponseID = "gen_ai.response.id" + otelGenAIResponseModel = "gen_ai.response.model" + otelGenAIResponseFinishReasons = "gen_ai.response.finish_reasons" + + otelGenAIUsageInputTokens = "gen_ai.usage.input_tokens" + otelGenAIUsageOutputTokens = "gen_ai.usage.output_tokens" + otelGenAIUsageCacheReadInputTokens = "gen_ai.usage.cache_read.input_tokens" + otelGenAIUsageCacheCreationInputTokens = "gen_ai.usage.cache_creation.input_tokens" + + otelGenAITokenType = "gen_ai.token.type" + + otelGenAIInputMessages = "gen_ai.input.messages" + otelGenAIOutputMessages = "gen_ai.output.messages" + otelGenAISystemInstructions = "gen_ai.system_instructions" + + otelGenAIToolDefinitions = "gen_ai.tool.definitions" + otelGenAIToolCallID = "gen_ai.tool.call.id" + otelGenAIToolCallArguments = "gen_ai.tool.call.arguments" + otelGenAIToolCallResult = "gen_ai.tool.call.result" + otelGenAIToolName = "gen_ai.tool.name" + otelGenAIToolDescription = "gen_ai.tool.description" + otelGenAIToolType = "gen_ai.tool.type" + + // Metric names + otelMetricOperationDuration = "gen_ai.client.operation.duration" + otelMetricTokenUsage = "gen_ai.client.token.usage" + otelMetricTimeToFirstChunk = "gen_ai.client.operation.time_to_first_chunk" + otelMetricTimePerOutputChunk = "gen_ai.client.operation.time_per_output_chunk" + + // Metric descriptions + otelDescOperationDuration = "GenAI operation duration." + otelDescTokenUsage = "Number of input and output tokens used." + otelDescTimeToFirstChunk = "Time to receive the first chunk from a streaming response." + otelDescTimePerOutputChunk = "Time elapsed between streamed output chunks after the first chunk." + + // Vendor-prefixed span attributes + otelCopilotCost = "github.copilot.cost" + otelCopilotServerDuration = "github.copilot.server_duration" + otelCopilotInitiator = "github.copilot.initiator" + otelCopilotAIU = "github.copilot.aiu" + otelCopilotTurnID = "github.copilot.turn_id" + otelCopilotInteractionID = "github.copilot.interaction_id" + + // Vendor-prefixed span event names + otelCopilotSessionTruncation = "github.copilot.session.truncation" + otelCopilotSessionCompactionStart = "github.copilot.session.compaction_start" + otelCopilotSessionCompactionComplete = "github.copilot.session.compaction_complete" + otelCopilotSkillInvoked = "github.copilot.skill.invoked" + + // Event attribute keys + otelCopilotMessage = "github.copilot.message" + otelCopilotTokenLimit = "github.copilot.token_limit" + otelCopilotPreTokens = "github.copilot.pre_tokens" + otelCopilotPostTokens = "github.copilot.post_tokens" + otelCopilotPreMessages = "github.copilot.pre_messages" + otelCopilotPostMessages = "github.copilot.post_messages" + otelCopilotTokensRemoved = "github.copilot.tokens_removed" + otelCopilotMessagesRemoved = "github.copilot.messages_removed" + otelCopilotPerformedBy = "github.copilot.performed_by" + otelCopilotSuccess = "github.copilot.success" + otelCopilotSkillName = "github.copilot.skill.name" + otelCopilotSkillPath = "github.copilot.skill.path" + otelCopilotSkillContent = "github.copilot.skill.content" + otelCopilotSkillPluginName = "github.copilot.skill.plugin_name" + otelCopilotSkillPluginVersion = "github.copilot.skill.plugin_version" +) + +// Explicit bucket boundaries for histograms, matching .NET implementation. +var ( + otelDurationBucketBoundaries = []float64{ + 0.01, 0.02, 0.04, 0.08, 0.16, 0.32, 0.64, 1.28, 2.56, 5.12, 10.24, 20.48, 40.96, 81.92, + } + otelTokenBucketBoundaries = []float64{ + 1, 4, 16, 64, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, + } +) diff --git a/go/copilot_telemetry_test.go b/go/copilot_telemetry_test.go new file mode 100644 index 000000000..c7f34aa38 --- /dev/null +++ b/go/copilot_telemetry_test.go @@ -0,0 +1,1343 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +package copilot + +import ( + "context" + "encoding/json" + "testing" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" +) + +// setupTestTelemetry sets up in-memory trace and metric exporters for testing, +// installs them as the global OTel providers, and returns cleanup/read helpers. +func setupTestTelemetry(t *testing.T, config *TelemetryConfig) ( + ct *copilotTelemetry, + spanExporter *tracetest.InMemoryExporter, + metricReader *sdkmetric.ManualReader, +) { + t.Helper() + + spanExporter = tracetest.NewInMemoryExporter() + tp := sdktrace.NewTracerProvider(sdktrace.WithSyncer(spanExporter)) + + metricReader = sdkmetric.NewManualReader() + mp := sdkmetric.NewMeterProvider(sdkmetric.WithReader(metricReader)) + + otel.SetTracerProvider(tp) + otel.SetMeterProvider(mp) + + t.Cleanup(func() { + _ = tp.Shutdown(context.Background()) + _ = mp.Shutdown(context.Background()) + }) + + ct = newCopilotTelemetry(config) + return ct, spanExporter, metricReader +} + +func collectMetrics(t *testing.T, reader *sdkmetric.ManualReader) metricdata.ResourceMetrics { + t.Helper() + var rm metricdata.ResourceMetrics + if err := reader.Collect(context.Background(), &rm); err != nil { + t.Fatalf("Failed to collect metrics: %v", err) + } + return rm +} + +func findSpanAttr(span tracetest.SpanStub, key string) *attribute.KeyValue { + for _, a := range span.Attributes { + if string(a.Key) == key { + return &a + } + } + return nil +} + +func findMetricByName(rm metricdata.ResourceMetrics, name string) *metricdata.Metrics { + for _, sm := range rm.ScopeMetrics { + for i, m := range sm.Metrics { + if m.Name == name { + return &sm.Metrics[i] + } + } + } + return nil +} + +func ptrString(s string) *string { return &s } +func ptrFloat(f float64) *float64 { return &f } +func ptrBool(b bool) *bool { return &b } + +func findEventByName(span tracetest.SpanStub, name string) *sdktrace.Event { + for i, e := range span.Events { + if e.Name == name { + return &span.Events[i] + } + } + return nil +} + +func findEventAttr(event *sdktrace.Event, key string) *attribute.KeyValue { + for _, a := range event.Attributes { + if string(a.Key) == key { + return &a + } + } + return nil +} + +// --- Tests --- + +func TestOpenTelemetry_NormalizeProviderName(t *testing.T) { + tests := []struct { + input string + expected string + }{ + {"openai", "openai"}, + {"OpenAI", "openai"}, + {"azure", "azure.ai.openai"}, + {"Azure", "azure.ai.openai"}, + {"anthropic", "anthropic"}, + {"Anthropic", "anthropic"}, + {"unknown", "github"}, + {"", "github"}, + {" openai ", "openai"}, + } + for _, tc := range tests { + t.Run(tc.input, func(t *testing.T) { + result := normalizeProviderName(tc.input) + if result != tc.expected { + t.Errorf("normalizeProviderName(%q) = %q, want %q", tc.input, result, tc.expected) + } + }) + } +} + +func TestOpenTelemetry_ParseServerAddress(t *testing.T) { + tests := []struct { + input string + expectedHost string + expectedPort int + }{ + {"https://api.example.com:8080/v1", "api.example.com", 8080}, + {"http://localhost:3000", "localhost", 3000}, + {"https://api.openai.com/v1", "api.openai.com", 443}, + {"http://example.com/path", "example.com", 80}, + {"", "", 0}, + {"not-a-url", "", 0}, + } + for _, tc := range tests { + t.Run(tc.input, func(t *testing.T) { + host, port := parseServerAddress(tc.input) + if host != tc.expectedHost { + t.Errorf("parseServerAddress(%q) host = %q, want %q", tc.input, host, tc.expectedHost) + } + if port != tc.expectedPort { + t.Errorf("parseServerAddress(%q) port = %d, want %d", tc.input, port, tc.expectedPort) + } + }) + } +} + +func TestOpenTelemetry_InvokeAgentSpan(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + ctx, span := ct.startInvokeAgentSpan( + context.Background(), + "session-123", + "gpt-4", + "openai", + "api.openai.com", + 443, + "", + "", + ) + _ = ctx + span.End() + + spans := exporter.GetSpans() + if len(spans) != 1 { + t.Fatalf("Expected 1 span, got %d", len(spans)) + } + s := spans[0] + if s.Name != otelInvokeAgent { + t.Errorf("Span name = %q, want %q", s.Name, otelInvokeAgent) + } + if s.SpanKind != 3 { // trace.SpanKindClient = 3 + t.Errorf("SpanKind = %d, want Client(3)", s.SpanKind) + } + + assertAttr := func(key, expected string) { + for _, a := range s.Attributes { + if string(a.Key) == key { + if a.Value.AsString() != expected { + t.Errorf("Attribute %s = %q, want %q", key, a.Value.AsString(), expected) + } + return + } + } + t.Errorf("Attribute %s not found", key) + } + + assertAttr(otelGenAIOperationName, otelInvokeAgent) + assertAttr(otelGenAIProviderName, "openai") + assertAttr(otelGenAIAgentID, "session-123") + assertAttr(otelGenAIConversationID, "session-123") + assertAttr(otelGenAIRequestModel, "gpt-4") + assertAttr(otelServerAddress, "api.openai.com") +} + +func TestOpenTelemetry_ChatSpan(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + ctx, _ := ct.startInvokeAgentSpan( + context.Background(), "s1", "gpt-4", "openai", "api.openai.com", 443, "", "", + ) + _, chatSpan := ct.startChatSpan(ctx, "gpt-4", "openai", "api.openai.com", 443, "s1") + chatSpan.End() + + spans := exporter.GetSpans() + if len(spans) < 1 { + t.Fatalf("Expected at least 1 span, got %d", len(spans)) + } + s := spans[0] // chat span ends first + if s.Name != "chat gpt-4" { + t.Errorf("Span name = %q, want %q", s.Name, "chat gpt-4") + } + + assertAttr := func(key, expected string) { + a := findSpanAttr(s, key) + if a == nil { + t.Errorf("Attribute %s not found", key) + return + } + if a.Value.AsString() != expected { + t.Errorf("Attribute %s = %q, want %q", key, a.Value.AsString(), expected) + } + } + + assertAttr(otelGenAIOperationName, otelChat) + assertAttr(otelGenAIProviderName, "openai") + assertAttr(otelGenAIConversationID, "s1") +} + +func TestOpenTelemetry_ExecuteToolSpan(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + _, span := ct.startExecuteToolSpan( + context.Background(), + "get_weather", + "call-456", + "Gets current weather", + map[string]any{"location": "Seattle"}, + ) + span.End() + + spans := exporter.GetSpans() + if len(spans) != 1 { + t.Fatalf("Expected 1 span, got %d", len(spans)) + } + s := spans[0] + if s.Name != "execute_tool get_weather" { + t.Errorf("Span name = %q, want %q", s.Name, "execute_tool get_weather") + } + if s.SpanKind != 1 { // trace.SpanKindInternal = 1 + t.Errorf("SpanKind = %d, want Internal(1)", s.SpanKind) + } + + assertAttr := func(key, expected string) { + for _, a := range s.Attributes { + if string(a.Key) == key { + if a.Value.AsString() != expected { + t.Errorf("Attribute %s = %q, want %q", key, a.Value.AsString(), expected) + } + return + } + } + t.Errorf("Attribute %s not found", key) + } + + assertAttr(otelGenAIOperationName, otelExecuteTool) + assertAttr(otelGenAIToolName, "get_weather") + assertAttr(otelGenAIToolCallID, "call-456") + assertAttr(otelGenAIToolType, "function") + assertAttr(otelGenAIToolDescription, "Gets current weather") +} + +func TestOpenTelemetry_ExecuteToolSpanSensitiveData(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{ + EnableSensitiveData: Bool(true), + }) + + args := map[string]any{"location": "Seattle"} + _, span := ct.startExecuteToolSpan( + context.Background(), + "get_weather", + "call-456", + "Gets weather", + args, + ) + ct.setExecuteToolResult(span, "Sunny, 72°F") + span.End() + + spans := exporter.GetSpans() + if len(spans) != 1 { + t.Fatalf("Expected 1 span, got %d", len(spans)) + } + s := spans[0] + + if findSpanAttr(s, otelGenAIToolCallArguments) == nil { + t.Error("Expected tool call arguments attribute when sensitive data enabled") + } + if findSpanAttr(s, otelGenAIToolCallResult) == nil { + t.Error("Expected tool call result attribute when sensitive data enabled") + } +} + +func TestOpenTelemetry_SensitiveDataGating(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{ + EnableSensitiveData: Bool(false), + }) + + args := map[string]any{"secret": "password123"} + _, span := ct.startExecuteToolSpan( + context.Background(), + "tool", + "call-1", + "", + args, + ) + ct.setExecuteToolResult(span, "result") + span.End() + + spans := exporter.GetSpans() + s := spans[0] + + for _, a := range s.Attributes { + if string(a.Key) == otelGenAIToolCallArguments { + t.Error("Tool call arguments should NOT be present when sensitive data disabled") + } + if string(a.Key) == otelGenAIToolCallResult { + t.Error("Tool call result should NOT be present when sensitive data disabled") + } + } +} + +func TestOpenTelemetry_MetricRecording(t *testing.T) { + ct, _, reader := setupTestTelemetry(t, &TelemetryConfig{}) + + ctx := context.Background() + + ct.recordOperationDuration(ctx, 1.5, "gpt-4", "gpt-4", "openai", "api.openai.com", 443, nil, otelInvokeAgent) + ct.tokenUsageHistogram.Record(ctx, 100) + ct.recordTimeToFirstChunk(ctx, 0.5, "gpt-4", "gpt-4", "openai", "", 0) + ct.recordTimePerOutputChunk(ctx, 0.1, "gpt-4", "gpt-4", "openai", "", 0) + + rm := collectMetrics(t, reader) + + if m := findMetricByName(rm, otelMetricOperationDuration); m == nil { + t.Error("Expected operation duration metric") + } + if m := findMetricByName(rm, otelMetricTokenUsage); m == nil { + t.Error("Expected token usage metric") + } + if m := findMetricByName(rm, otelMetricTimeToFirstChunk); m == nil { + t.Error("Expected time to first chunk metric") + } + if m := findMetricByName(rm, otelMetricTimePerOutputChunk); m == nil { + t.Error("Expected time per output chunk metric") + } +} + +func TestOpenTelemetry_NoTelemetryWhenNilConfig(t *testing.T) { + session := &Session{ + SessionID: "test-session", + telemetry: nil, + toolHandlers: make(map[string]ToolHandler), + } + // This should not panic + session.dispatchEvent(SessionEvent{Type: SessionIdle}) +} + +func TestOpenTelemetry_CustomSourceName(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{ + SourceName: "my.custom.source", + }) + + _, span := ct.startInvokeAgentSpan(context.Background(), "s1", "", "github", "", 0, "", "") + span.End() + + spans := exporter.GetSpans() + if len(spans) != 1 { + t.Fatalf("Expected 1 span, got %d", len(spans)) + } + + s := spans[0] + if s.InstrumentationScope.Name != "my.custom.source" { + t.Errorf("InstrumentationScope.Name = %q, want %q", s.InstrumentationScope.Name, "my.custom.source") + } +} + +func TestOpenTelemetry_DefaultSourceName(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + _, span := ct.startInvokeAgentSpan(context.Background(), "s1", "", "github", "", 0, "", "") + span.End() + + spans := exporter.GetSpans() + s := spans[0] + if s.InstrumentationScope.Name != otelDefaultSourceName { + t.Errorf("Default source name = %q, want %q", s.InstrumentationScope.Name, otelDefaultSourceName) + } +} + +func TestOpenTelemetry_AgentMetadata(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + _, span := ct.startInvokeAgentSpan( + context.Background(), "s1", "gpt-4", "github", "", 0, "my-agent", "A helpful agent", + ) + span.End() + + spans := exporter.GetSpans() + s := spans[0] + + if a := findSpanAttr(s, otelGenAIAgentName); a == nil || a.Value.AsString() != "my-agent" { + t.Error("Expected gen_ai.agent.name = 'my-agent'") + } + if a := findSpanAttr(s, otelGenAIAgentDescription); a == nil || a.Value.AsString() != "A helpful agent" { + t.Error("Expected gen_ai.agent.description = 'A helpful agent'") + } +} + +func TestOpenTelemetry_InvokeAgentSpan_ServerPort(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + _, span := ct.startInvokeAgentSpan( + context.Background(), "session-123", "gpt-4", "openai", "api.openai.com", 443, "", "", + ) + span.End() + + spans := exporter.GetSpans() + s := spans[0] + + a := findSpanAttr(s, otelServerPort) + if a == nil { + t.Error("Expected server.port attribute") + return + } + if a.Value.AsInt64() != 443 { + t.Errorf("server.port = %d, want 443", a.Value.AsInt64()) + } +} + +func TestOpenTelemetry_EnvVarOverride(t *testing.T) { + t.Setenv(otelCaptureMessageContentEnvVar, "true") + + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + args := map[string]any{"secret": "password123"} + _, span := ct.startExecuteToolSpan( + context.Background(), "sensitive_tool", "call-1", "Does sensitive thing", args, + ) + span.End() + + spans := exporter.GetSpans() + s := spans[0] + + if findSpanAttr(s, otelGenAIToolCallArguments) == nil { + t.Error("Expected tool call arguments attribute when OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true") + } +} + +// ============================================================================ +// Two-level span hierarchy tests +// ============================================================================ + +func newTestSession(ct *copilotTelemetry, streaming bool) *Session { + session := &Session{ + SessionID: "session-test", + telemetry: ct, + toolHandlers: make(map[string]ToolHandler), + toolDescriptions: make(map[string]string), + } + session.configureTelemetryContext("gpt-4", &ProviderConfig{ + Type: "openai", + BaseURL: "https://api.openai.com:443/v1", + }, &SystemMessageConfig{Content: "Be helpful"}, nil, streaming, "", "") + return session +} + +func TestOpenTelemetry_TwoLevelSpanHierarchy(t *testing.T) { + ct, exporter, reader := setupTestTelemetry(t, &TelemetryConfig{EnableSensitiveData: Bool(true)}) + + session := newTestSession(ct, false) + + // Simulate send + session.turnTracker.beginSend(context.Background(), "Hello") + + // AssistantTurnStart creates the chat child span + session.dispatchEvent(SessionEvent{ + Type: AssistantTurnStart, + Data: Data{TurnID: ptrString("turn-1"), InteractionID: ptrString("int-1")}, + }) + + // Assistant responds + session.dispatchEvent(SessionEvent{ + Type: AssistantMessage, + Data: Data{Content: ptrString("Hi there!")}, + }) + + // Usage + session.dispatchEvent(SessionEvent{ + Type: AssistantUsage, + Data: Data{ + Model: ptrString("gpt-4-0613"), + APICallID: ptrString("api-1"), + InputTokens: ptrFloat(10), + OutputTokens: ptrFloat(20), + }, + }) + + // Turn end + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + + // Session idle completes the invoke_agent span + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + spans := exporter.GetSpans() + if len(spans) != 2 { + t.Fatalf("Expected 2 spans (chat + invoke_agent), got %d", len(spans)) + } + + // First ended span is the chat span + chatSpan := spans[0] + if a := findSpanAttr(chatSpan, otelGenAIOperationName); a == nil || a.Value.AsString() != otelChat { + t.Error("Expected chat span operation name") + } + if a := findSpanAttr(chatSpan, otelGenAIUsageInputTokens); a == nil || a.Value.AsInt64() != 10 { + t.Error("Expected chat span input tokens = 10") + } + if a := findSpanAttr(chatSpan, otelGenAIUsageOutputTokens); a == nil || a.Value.AsInt64() != 20 { + t.Error("Expected chat span output tokens = 20") + } + if a := findSpanAttr(chatSpan, otelCopilotTurnID); a == nil || a.Value.AsString() != "turn-1" { + t.Error("Expected chat span turn_id = turn-1") + } + if a := findSpanAttr(chatSpan, otelCopilotInteractionID); a == nil || a.Value.AsString() != "int-1" { + t.Error("Expected chat span interaction_id = int-1") + } + + // Second ended span is the invoke_agent span + agentSpan := spans[1] + if a := findSpanAttr(agentSpan, otelGenAIOperationName); a == nil || a.Value.AsString() != otelInvokeAgent { + t.Error("Expected invoke_agent span operation name") + } + // Accumulated usage on invoke_agent + if a := findSpanAttr(agentSpan, otelGenAIUsageInputTokens); a == nil || a.Value.AsInt64() != 10 { + t.Error("Expected invoke_agent accumulated input tokens = 10") + } + + // Verify metrics recorded + rm := collectMetrics(t, reader) + if m := findMetricByName(rm, otelMetricOperationDuration); m == nil { + t.Error("Expected operation duration metric") + } + if m := findMetricByName(rm, otelMetricTokenUsage); m == nil { + t.Error("Expected token usage metric") + } +} + +func TestOpenTelemetry_MultiTurnAccumulatedUsage(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "Hello") + + // Turn 1 + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + session.dispatchEvent(SessionEvent{Type: AssistantUsage, Data: Data{ + Model: ptrString("gpt-4"), InputTokens: ptrFloat(100), OutputTokens: ptrFloat(50), + }}) + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + + // Turn 2 + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + session.dispatchEvent(SessionEvent{Type: AssistantUsage, Data: Data{ + Model: ptrString("gpt-4"), InputTokens: ptrFloat(200), OutputTokens: ptrFloat(100), + }}) + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + + // Complete + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + spans := exporter.GetSpans() + if len(spans) != 3 { + t.Fatalf("Expected 3 spans (2 chat + 1 invoke_agent), got %d", len(spans)) + } + + // The invoke_agent span (last) should have accumulated tokens + agentSpan := spans[2] + if a := findSpanAttr(agentSpan, otelGenAIUsageInputTokens); a == nil || a.Value.AsInt64() != 300 { + t.Errorf("Expected invoke_agent accumulated input tokens = 300, got %v", a) + } + if a := findSpanAttr(agentSpan, otelGenAIUsageOutputTokens); a == nil || a.Value.AsInt64() != 150 { + t.Errorf("Expected invoke_agent accumulated output tokens = 150, got %v", a) + } +} + +func TestOpenTelemetry_VendorPrefixedAttributes(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "Hello") + + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + session.dispatchEvent(SessionEvent{Type: AssistantUsage, Data: Data{ + Model: ptrString("gpt-4"), + InputTokens: ptrFloat(10), + OutputTokens: ptrFloat(20), + Cost: ptrFloat(0.005), + Duration: ptrFloat(1.5), + Initiator: ptrString("user"), + CopilotUsage: &CopilotUsage{TotalNanoAiu: 42.0}, + }}) + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + spans := exporter.GetSpans() + chatSpan := spans[0] + + if a := findSpanAttr(chatSpan, otelCopilotCost); a == nil || a.Value.AsFloat64() != 0.005 { + t.Errorf("Expected copilot.cost = 0.005, got %v", a) + } + if a := findSpanAttr(chatSpan, otelCopilotServerDuration); a == nil || a.Value.AsFloat64() != 1.5 { + t.Errorf("Expected copilot.server_duration = 1.5, got %v", a) + } + if a := findSpanAttr(chatSpan, otelCopilotInitiator); a == nil || a.Value.AsString() != "user" { + t.Errorf("Expected copilot.initiator = 'user'") + } + if a := findSpanAttr(chatSpan, otelCopilotAIU); a == nil || a.Value.AsFloat64() != 42.0 { + t.Errorf("Expected copilot.aiu = 42.0, got %v", a) + } + + // Check accumulated cost/aiu on invoke_agent + agentSpan := spans[1] + if a := findSpanAttr(agentSpan, otelCopilotCost); a == nil || a.Value.AsFloat64() != 0.005 { + t.Errorf("Expected invoke_agent copilot.cost = 0.005") + } + if a := findSpanAttr(agentSpan, otelCopilotAIU); a == nil || a.Value.AsFloat64() != 42.0 { + t.Errorf("Expected invoke_agent copilot.aiu = 42.0") + } +} + +func TestOpenTelemetry_RichMessageContent(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{EnableSensitiveData: Bool(true)}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "Hello") + + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + + // Message with reasoning + session.dispatchEvent(SessionEvent{ + Type: AssistantMessage, + Data: Data{ + ReasoningText: ptrString("Let me think..."), + Content: ptrString("The answer is 42"), + }, + }) + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + spans := exporter.GetSpans() + chatSpan := spans[0] + + a := findSpanAttr(chatSpan, otelGenAIOutputMessages) + if a == nil { + t.Fatal("Expected gen_ai.output.messages attribute") + } + + // Verify it contains reasoning part + var msgs []map[string]any + if err := json.Unmarshal([]byte(a.Value.AsString()), &msgs); err != nil { + t.Fatalf("Failed to parse output messages: %v", err) + } + if len(msgs) != 1 { + t.Fatalf("Expected 1 message, got %d", len(msgs)) + } + parts, ok := msgs[0]["parts"].([]any) + if !ok || len(parts) != 2 { + t.Fatalf("Expected 2 parts (reasoning + text), got %v", msgs[0]["parts"]) + } + part0 := parts[0].(map[string]any) + if part0["type"] != "reasoning" || part0["content"] != "Let me think..." { + t.Errorf("First part should be reasoning, got %v", part0) + } + part1 := parts[1].(map[string]any) + if part1["type"] != "text" || part1["content"] != "The answer is 42" { + t.Errorf("Second part should be text, got %v", part1) + } +} + +func TestOpenTelemetry_ToolCallParts(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{EnableSensitiveData: Bool(true)}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "Hello") + + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + + // Tool execution start + session.dispatchEvent(SessionEvent{ + Type: ToolExecutionStart, + Data: Data{ + ToolCallID: ptrString("tc-1"), + ToolName: ptrString("read_file"), + Arguments: map[string]any{"path": "/foo.txt"}, + }, + }) + + // Tool execution complete + session.dispatchEvent(SessionEvent{ + Type: ToolExecutionComplete, + Data: Data{ + ToolCallID: ptrString("tc-1"), + Result: &Result{Content: "file contents"}, + }, + }) + + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + spans := exporter.GetSpans() + chatSpan := spans[0] + + // Check output messages contain tool_call + outAttr := findSpanAttr(chatSpan, otelGenAIOutputMessages) + if outAttr == nil { + t.Fatal("Expected gen_ai.output.messages") + } + var outMsgs []map[string]any + json.Unmarshal([]byte(outAttr.Value.AsString()), &outMsgs) + if len(outMsgs) < 1 { + t.Fatal("Expected at least 1 output message") + } + parts := outMsgs[0]["parts"].([]any) + p0 := parts[0].(map[string]any) + if p0["type"] != "tool_call" { + t.Errorf("Expected tool_call part type, got %v", p0["type"]) + } + + // Check input messages contain tool_call_response + inAttr := findSpanAttr(chatSpan, otelGenAIInputMessages) + if inAttr == nil { + t.Fatal("Expected gen_ai.input.messages") + } + var inMsgs []map[string]any + json.Unmarshal([]byte(inAttr.Value.AsString()), &inMsgs) + // First input msg = user "Hello", second = tool response + foundToolResponse := false + for _, m := range inMsgs { + if m["role"] == "tool" { + foundToolResponse = true + break + } + } + if !foundToolResponse { + t.Error("Expected tool role in input messages") + } +} + +func TestOpenTelemetry_MCPServerToolTracking(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{EnableSensitiveData: Bool(true)}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "Hello") + + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + + // Server tool execution start + session.dispatchEvent(SessionEvent{ + Type: ToolExecutionStart, + Data: Data{ + ToolCallID: ptrString("tc-mcp-1"), + ToolName: ptrString("mcp_search"), + MCPServerName: ptrString("github-server"), + Arguments: map[string]any{"query": "test"}, + }, + }) + + // Server tool execution complete + session.dispatchEvent(SessionEvent{ + Type: ToolExecutionComplete, + Data: Data{ + ToolCallID: ptrString("tc-mcp-1"), + Result: &Result{Content: "results"}, + }, + }) + + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + spans := exporter.GetSpans() + chatSpan := spans[0] + + // Check output messages contain server_tool_call with nested mcp + outAttr := findSpanAttr(chatSpan, otelGenAIOutputMessages) + if outAttr == nil { + t.Fatal("Expected gen_ai.output.messages") + } + var outMsgs []map[string]any + json.Unmarshal([]byte(outAttr.Value.AsString()), &outMsgs) + if len(outMsgs) < 1 { + t.Fatal("Expected at least 1 output message") + } + parts := outMsgs[0]["parts"].([]any) + p0 := parts[0].(map[string]any) + if p0["type"] != "server_tool_call" { + t.Errorf("Expected server_tool_call type, got %v", p0["type"]) + } + stc, ok := p0["server_tool_call"].(map[string]any) + if !ok { + t.Fatal("Expected server_tool_call nested object") + } + if stc["type"] != "mcp" { + t.Errorf("Expected server_tool_call.type = 'mcp', got %v", stc["type"]) + } + if stc["server_name"] != "github-server" { + t.Errorf("Expected server_name = 'github-server', got %v", stc["server_name"]) + } +} + +func TestOpenTelemetry_SessionTruncationEvent(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "Hello") + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + + session.dispatchEvent(SessionEvent{ + Type: SessionTruncation, + Data: Data{ + TokenLimit: ptrFloat(8000), + PreTruncationTokensInMessages: ptrFloat(10000), + PostTruncationTokensInMessages: ptrFloat(7500), + PreTruncationMessagesLength: ptrFloat(50), + PostTruncationMessagesLength: ptrFloat(40), + TokensRemovedDuringTruncation: ptrFloat(2500), + MessagesRemovedDuringTruncation: ptrFloat(10), + PerformedBy: ptrString("system"), + }, + }) + + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + spans := exporter.GetSpans() + chatSpan := spans[0] + + evt := findEventByName(chatSpan, otelCopilotSessionTruncation) + if evt == nil { + t.Fatal("Expected session.truncation span event") + } + if a := findEventAttr(evt, otelCopilotTokenLimit); a == nil || a.Value.AsInt64() != 8000 { + t.Errorf("Expected token_limit = 8000") + } + if a := findEventAttr(evt, otelCopilotPerformedBy); a == nil || a.Value.AsString() != "system" { + t.Errorf("Expected performed_by = 'system'") + } +} + +func TestOpenTelemetry_CompactionEvents(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "Hello") + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + + session.dispatchEvent(SessionEvent{Type: SessionCompactionStart}) + session.dispatchEvent(SessionEvent{ + Type: SessionCompactionComplete, + Data: Data{ + Success: ptrBool(true), + PreCompactionTokens: ptrFloat(10000), + PostCompactionTokens: ptrFloat(5000), + TokensRemoved: ptrFloat(5000), + MessagesRemoved: ptrFloat(20), + }, + }) + + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + spans := exporter.GetSpans() + chatSpan := spans[0] + + startEvt := findEventByName(chatSpan, otelCopilotSessionCompactionStart) + if startEvt == nil { + t.Error("Expected compaction_start span event") + } + + completeEvt := findEventByName(chatSpan, otelCopilotSessionCompactionComplete) + if completeEvt == nil { + t.Fatal("Expected compaction_complete span event") + } + if a := findEventAttr(completeEvt, otelCopilotSuccess); a == nil || !a.Value.AsBool() { + t.Error("Expected success = true") + } +} + +func TestOpenTelemetry_SkillInvokedEvent(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{EnableSensitiveData: Bool(true)}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "Hello") + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + + session.dispatchEvent(SessionEvent{ + Type: SkillInvoked, + Data: Data{ + Name: ptrString("code-review"), + Path: ptrString("/skills/code-review"), + PluginName: ptrString("my-plugin"), + PluginVersion: ptrString("1.0.0"), + Content: ptrString("skill content"), + }, + }) + + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + spans := exporter.GetSpans() + chatSpan := spans[0] + + evt := findEventByName(chatSpan, otelCopilotSkillInvoked) + if evt == nil { + t.Fatal("Expected skill.invoked span event") + } + if a := findEventAttr(evt, otelCopilotSkillName); a == nil || a.Value.AsString() != "code-review" { + t.Error("Expected skill name = 'code-review'") + } + if a := findEventAttr(evt, otelCopilotSkillPath); a == nil || a.Value.AsString() != "/skills/code-review" { + t.Error("Expected skill path") + } + if a := findEventAttr(evt, otelCopilotSkillPluginName); a == nil || a.Value.AsString() != "my-plugin" { + t.Error("Expected plugin name") + } + if a := findEventAttr(evt, otelCopilotSkillContent); a == nil || a.Value.AsString() != "skill content" { + t.Error("Expected skill content (sensitive)") + } +} + +func TestOpenTelemetry_SessionErrorCompletesTurn(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "") + + errMsg := "model rate limited" + session.dispatchEvent(SessionEvent{ + Type: SessionError, + Data: Data{Message: &errMsg}, + }) + + spans := exporter.GetSpans() + if len(spans) != 1 { + t.Fatalf("Expected 1 span, got %d", len(spans)) + } + + s := spans[0] + if a := findSpanAttr(s, otelErrorType); a == nil { + t.Error("Expected error.type attribute on error span") + } else if a.Value.AsString() != "SessionTurnError" { + t.Errorf("Expected error.type = SessionTurnError, got %s", a.Value.AsString()) + } +} + +func TestOpenTelemetry_CompleteOnDispose(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "Hello") + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + + // Simulate dispose while turn is active + session.turnTracker.completeOnDispose() + + spans := exporter.GetSpans() + // Should have chat span + invoke_agent span, both with error + if len(spans) < 1 { + t.Fatalf("Expected at least 1 span after dispose, got %d", len(spans)) + } + + // Check that spans have error status with correct error type + for _, s := range spans { + if a := findSpanAttr(s, otelErrorType); a == nil { + t.Errorf("Span %q should have error.type after dispose", s.Name) + } else if a.Value.AsString() != "SessionDestroyedError" { + t.Errorf("Span %q expected error.type = SessionDestroyedError, got %s", s.Name, a.Value.AsString()) + } + } +} + +func TestOpenTelemetry_StreamingChunkMetrics(t *testing.T) { + ct, _, reader := setupTestTelemetry(t, &TelemetryConfig{}) + + session := newTestSession(ct, true) // streaming=true + session.turnTracker.beginSend(context.Background(), "") + + // Start a chat turn + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + + // Simulate streaming deltas + delta1 := "Hello" + session.dispatchEvent(SessionEvent{ + Type: AssistantMessageDelta, + Data: Data{DeltaContent: &delta1}, + }) + + time.Sleep(10 * time.Millisecond) + + delta2 := " world" + session.dispatchEvent(SessionEvent{ + Type: AssistantMessageDelta, + Data: Data{DeltaContent: &delta2}, + }) + + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + rm := collectMetrics(t, reader) + + if m := findMetricByName(rm, otelMetricTimeToFirstChunk); m == nil { + t.Error("Expected time_to_first_chunk metric for streaming") + } + if m := findMetricByName(rm, otelMetricTimePerOutputChunk); m == nil { + t.Error("Expected time_per_output_chunk metric for streaming") + } +} + +func TestOpenTelemetry_SubagentSpans(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "Hello") + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + + // Subagent started + session.dispatchEvent(SessionEvent{ + Type: SubagentStarted, + Data: Data{ + ToolCallID: ptrString("sub-tc-1"), + AgentName: ptrString("code-reviewer"), + AgentDescription: ptrString("Reviews code"), + }, + }) + + // Subagent receives usage (routed by parentToolCallId) + session.dispatchEvent(SessionEvent{ + Type: AssistantUsage, + Data: Data{ + ParentToolCallID: ptrString("sub-tc-1"), + Model: ptrString("gpt-4"), + InputTokens: ptrFloat(50), + OutputTokens: ptrFloat(30), + }, + }) + + // Subagent sends message + session.dispatchEvent(SessionEvent{ + Type: AssistantMessage, + Data: Data{ + ParentToolCallID: ptrString("sub-tc-1"), + Content: ptrString("LGTM!"), + }, + }) + + // Subagent completed + session.dispatchEvent(SessionEvent{ + Type: SubagentCompleted, + Data: Data{ToolCallID: ptrString("sub-tc-1")}, + }) + + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + spans := exporter.GetSpans() + // Should have: subagent chat, subagent invoke_agent, main chat, main invoke_agent + if len(spans) < 4 { + t.Fatalf("Expected at least 4 spans, got %d", len(spans)) + } + + // Find the subagent invoke_agent span (has agent name) + var subInvokeSpan *tracetest.SpanStub + for i, s := range spans { + if a := findSpanAttr(s, otelGenAIAgentName); a != nil && a.Value.AsString() == "code-reviewer" { + subInvokeSpan = &spans[i] + break + } + } + if subInvokeSpan == nil { + t.Fatal("Expected subagent invoke_agent span with agent name 'code-reviewer'") + } + if a := findSpanAttr(*subInvokeSpan, otelGenAIUsageInputTokens); a == nil || a.Value.AsInt64() != 50 { + t.Error("Expected subagent invoke_agent input tokens = 50") + } +} + +func TestOpenTelemetry_ToolCallParentContext(t *testing.T) { + ct, _, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "Hello") + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + + // Simulate tool execution start to register pending parent + session.dispatchEvent(SessionEvent{ + Type: ToolExecutionStart, + Data: Data{ + ToolCallID: ptrString("tc-1"), + ToolName: ptrString("read_file"), + }, + }) + + // getToolCallParentContext should return a non-nil context + ctx := session.getToolCallParentContext("tc-1") + if ctx == nil { + t.Error("Expected non-nil tool call parent context") + } + + // Second call should still return the agent context (one-time consumption) + ctx2 := session.getToolCallParentContext("tc-1") + if ctx2 == nil { + t.Error("Expected fallback to agent span context") + } + + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) +} + +func TestOpenTelemetry_ModelChangeUpdatesResponseModel(t *testing.T) { + ct, _, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "") + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + + newModel := "gpt-4-turbo" + session.dispatchEvent(SessionEvent{ + Type: SessionModelChange, + Data: Data{NewModel: &newModel}, + }) + + session.turnTracker.mu.Lock() + if session.turnTracker.turnResponseModel != "gpt-4-turbo" { + t.Errorf("Expected response model to be updated to gpt-4-turbo, got %q", session.turnTracker.turnResponseModel) + } + session.turnTracker.mu.Unlock() + + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) +} + +func TestOpenTelemetry_SpanReuseWithinTurn(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + session := newTestSession(ct, false) + + // First send + session.turnTracker.beginSend(context.Background(), "First question") + + // Second send within the same turn (no SessionIdle yet) + session.turnTracker.beginSend(context.Background(), "Another question") + + // Complete + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + // Verify only 1 invoke_agent span was created (reused) + spans := exporter.GetSpans() + agentSpanCount := 0 + for _, s := range spans { + if a := findSpanAttr(s, otelGenAIOperationName); a != nil && a.Value.AsString() == otelInvokeAgent { + agentSpanCount++ + } + } + if agentSpanCount != 1 { + t.Errorf("Expected 1 invoke_agent span (reused within turn), got %d", agentSpanCount) + } +} + +func TestOpenTelemetry_FinishReasons(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "Hello") + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + spans := exporter.GetSpans() + for _, s := range spans { + a := findSpanAttr(s, otelGenAIResponseFinishReasons) + if a == nil { + t.Errorf("Span %q missing finish_reasons", s.Name) + continue + } + reasons := a.Value.AsStringSlice() + if len(reasons) != 1 || reasons[0] != "stop" { + t.Errorf("Span %q finish_reasons = %v, want [\"stop\"]", s.Name, reasons) + } + } +} + +func TestOpenTelemetry_MessageContentExcludedWhenSensitiveDisabled(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{EnableSensitiveData: Bool(false)}) + + session := &Session{ + SessionID: "session-no-sensitive", + telemetry: ct, + toolHandlers: make(map[string]ToolHandler), + toolDescriptions: make(map[string]string), + } + sysMsg := &SystemMessageConfig{Content: "You are helpful."} + tools := []Tool{{Name: "myTool", Description: "A tool", Parameters: map[string]any{"type": "object"}}} + session.configureTelemetryContext("gpt-4", nil, sysMsg, tools, false, "", "") + + session.turnTracker.beginSend(context.Background(), "Hello") + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + session.dispatchEvent(SessionEvent{ + Type: AssistantMessage, + Data: Data{Content: ptrString("Hi there!")}, + }) + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + spans := exporter.GetSpans() + for _, span := range spans { + if findSpanAttr(span, otelGenAIInputMessages) != nil { + t.Errorf("Span %q: gen_ai.input.messages should be excluded when sensitive disabled", span.Name) + } + if findSpanAttr(span, otelGenAIOutputMessages) != nil { + t.Errorf("Span %q: gen_ai.output.messages should be excluded when sensitive disabled", span.Name) + } + if findSpanAttr(span, otelGenAISystemInstructions) != nil { + t.Errorf("Span %q: gen_ai.system_instructions should be excluded when sensitive disabled", span.Name) + } + } + + // Tool definitions NOT sensitive-gated — should be on invoke_agent span + agentSpan := spans[len(spans)-1] + if findSpanAttr(agentSpan, otelGenAIToolDefinitions) == nil { + t.Error("gen_ai.tool.definitions should always be included") + } +} + +func TestOpenTelemetry_CacheTokenAttributes(t *testing.T) { + ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "Hello") + + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + session.dispatchEvent(SessionEvent{ + Type: AssistantUsage, + Data: Data{ + InputTokens: ptrFloat(100), + OutputTokens: ptrFloat(50), + CacheReadTokens: ptrFloat(50), + CacheWriteTokens: ptrFloat(25), + }, + }) + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + spans := exporter.GetSpans() + chatSpan := spans[0] + + if a := findSpanAttr(chatSpan, otelGenAIUsageCacheReadInputTokens); a == nil || a.Value.AsInt64() != 50 { + t.Error("Expected gen_ai.usage.cache_read.input_tokens = 50") + } + if a := findSpanAttr(chatSpan, otelGenAIUsageCacheCreationInputTokens); a == nil || a.Value.AsInt64() != 25 { + t.Error("Expected gen_ai.usage.cache_creation.input_tokens = 25") + } +} + +func TestOpenTelemetry_ChunkTimingUsesChat(t *testing.T) { + ct, _, reader := setupTestTelemetry(t, &TelemetryConfig{}) + + session := newTestSession(ct, true) // streaming=true + session.turnTracker.beginSend(context.Background(), "") + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + + session.dispatchEvent(SessionEvent{ + Type: AssistantMessageDelta, + Data: Data{DeltaContent: ptrString("Hello")}, + }) + + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + rm := collectMetrics(t, reader) + + // Verify chunk timing metrics use "chat" operation name + m := findMetricByName(rm, otelMetricTimeToFirstChunk) + if m == nil { + t.Fatal("Expected time_to_first_chunk metric") + } + histo := m.Data.(metricdata.Histogram[float64]) + for _, dp := range histo.DataPoints { + for _, a := range dp.Attributes.ToSlice() { + if string(a.Key) == otelGenAIOperationName { + if a.Value.AsString() != otelChat { + t.Errorf("chunk timing operation name = %q, want %q", a.Value.AsString(), otelChat) + } + } + } + } +} + +func TestOpenTelemetry_TokenUsageMetricsUseChat(t *testing.T) { + ct, _, reader := setupTestTelemetry(t, &TelemetryConfig{}) + + session := newTestSession(ct, false) + session.turnTracker.beginSend(context.Background(), "Hello") + session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) + session.dispatchEvent(SessionEvent{ + Type: AssistantUsage, + Data: Data{ + Model: ptrString("gpt-4"), + InputTokens: ptrFloat(100), + OutputTokens: ptrFloat(50), + }, + }) + session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) + session.dispatchEvent(SessionEvent{Type: SessionIdle}) + + rm := collectMetrics(t, reader) + + m := findMetricByName(rm, otelMetricTokenUsage) + if m == nil { + t.Fatal("Expected token usage metric") + } + histo := m.Data.(metricdata.Histogram[int64]) + for _, dp := range histo.DataPoints { + for _, a := range dp.Attributes.ToSlice() { + if string(a.Key) == otelGenAIOperationName { + if a.Value.AsString() != otelChat { + t.Errorf("token usage operation name = %q, want %q", a.Value.AsString(), otelChat) + } + } + } + } +} diff --git a/go/go.mod b/go/go.mod index c835cc889..aa724b87e 100644 --- a/go/go.mod +++ b/go/go.mod @@ -1,8 +1,24 @@ module github.com/github/copilot-sdk/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require ( github.com/google/jsonschema-go v0.4.2 github.com/klauspost/compress v1.18.3 + go.opentelemetry.io/otel v1.40.0 + go.opentelemetry.io/otel/metric v1.40.0 + go.opentelemetry.io/otel/sdk v1.40.0 + go.opentelemetry.io/otel/sdk/metric v1.40.0 + go.opentelemetry.io/otel/trace v1.40.0 +) + +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/uuid v1.6.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + golang.org/x/sys v0.40.0 // indirect ) diff --git a/go/go.sum b/go/go.sum index 0cc670e8f..a960c169c 100644 --- a/go/go.sum +++ b/go/go.sum @@ -1,6 +1,39 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw= github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/go/session.go b/go/session.go index 12d1b1afa..6e253f2f5 100644 --- a/go/session.go +++ b/go/session.go @@ -57,6 +57,7 @@ type Session struct { nextHandlerID uint64 handlerMutex sync.RWMutex toolHandlers map[string]ToolHandler + toolDescriptions map[string]string toolHandlersM sync.RWMutex permissionHandler PermissionHandlerFunc permissionMux sync.RWMutex @@ -64,6 +65,8 @@ type Session struct { userInputMux sync.RWMutex hooks *SessionHooks hooksMux sync.RWMutex + telemetry *copilotTelemetry + turnTracker *agentTurnTracker // RPC provides typed session-scoped RPC methods. RPC *rpc.SessionRpc @@ -79,12 +82,13 @@ func (s *Session) WorkspacePath() string { // newSession creates a new session wrapper with the given session ID and client. func newSession(sessionID string, client *jsonrpc2.Client, workspacePath string) *Session { return &Session{ - SessionID: sessionID, - workspacePath: workspacePath, - client: client, - handlers: make([]sessionHandler, 0), - toolHandlers: make(map[string]ToolHandler), - RPC: rpc.NewSessionRpc(client, sessionID), + SessionID: sessionID, + workspacePath: workspacePath, + client: client, + handlers: make([]sessionHandler, 0), + toolHandlers: make(map[string]ToolHandler), + toolDescriptions: make(map[string]string), + RPC: rpc.NewSessionRpc(client, sessionID), } } @@ -111,6 +115,10 @@ func newSession(sessionID string, client *jsonrpc2.Client, workspacePath string) // log.Printf("Failed to send message: %v", err) // } func (s *Session) Send(ctx context.Context, options MessageOptions) (string, error) { + if s.turnTracker != nil { + s.turnTracker.beginSend(ctx, options.Prompt) + } + req := sessionSendRequest{ SessionID: s.SessionID, Prompt: options.Prompt, @@ -120,6 +128,9 @@ func (s *Session) Send(ctx context.Context, options MessageOptions) (string, err result, err := s.client.Request("session.send", req) if err != nil { + if s.turnTracker != nil { + s.turnTracker.completeTurnWithError(err) + } return "", fmt.Errorf("failed to send message: %w", err) } @@ -206,9 +217,18 @@ func (s *Session) SendAndWait(ctx context.Context, options MessageOptions) (*Ses mu.Unlock() return result, nil case err := <-errCh: + // Complete telemetry spans on session error (idempotent if already completed). + if s.turnTracker != nil { + s.turnTracker.completeTurnWithError(err) + } return nil, err - case <-ctx.Done(): // TODO: remove once session.Send honors the context - return nil, fmt.Errorf("waiting for session.idle: %w", ctx.Err()) + case <-ctx.Done(): + ctxErr := fmt.Errorf("waiting for session.idle: %w", ctx.Err()) + // Complete telemetry spans on timeout/cancellation (idempotent if already completed). + if s.turnTracker != nil { + s.turnTracker.completeTurnWithError(ctxErr) + } + return nil, ctxErr } } @@ -267,11 +287,13 @@ func (s *Session) registerTools(tools []Tool) { defer s.toolHandlersM.Unlock() s.toolHandlers = make(map[string]ToolHandler) + s.toolDescriptions = make(map[string]string) for _, tool := range tools { if tool.Name == "" || tool.Handler == nil { continue } s.toolHandlers[tool.Name] = tool.Handler + s.toolDescriptions[tool.Name] = tool.Description } } @@ -284,6 +306,14 @@ func (s *Session) getToolHandler(name string) (ToolHandler, bool) { return handler, ok } +// getToolDescription retrieves a registered tool's description by name. +func (s *Session) getToolDescription(name string) string { + s.toolHandlersM.RLock() + desc := s.toolDescriptions[name] + s.toolHandlersM.RUnlock() + return desc +} + // registerPermissionHandler registers a permission handler for this session. // // When the assistant needs permission to perform certain actions (e.g., file @@ -457,6 +487,10 @@ func (s *Session) handleHooksInvoke(hookType string, rawInput json.RawMessage) ( // This is an internal method; handlers are called synchronously and any panics // are recovered to prevent crashing the event dispatcher. func (s *Session) dispatchEvent(event SessionEvent) { + if s.turnTracker != nil { + s.turnTracker.processEvent(event) + } + s.handlerMutex.RLock() handlers := make([]SessionEventHandler, 0, len(s.handlers)) for _, h := range s.handlers { @@ -526,6 +560,11 @@ func (s *Session) GetMessages(ctx context.Context) ([]SessionEvent, error) { // log.Printf("Failed to destroy session: %v", err) // } func (s *Session) Destroy() error { + // Close any open telemetry spans before destroying. + if s.turnTracker != nil { + s.turnTracker.completeOnDispose() + } + _, err := s.client.Request("session.destroy", sessionDestroyRequest{SessionID: s.SessionID}) if err != nil { return fmt.Errorf("failed to destroy session: %w", err) @@ -576,3 +615,29 @@ func (s *Session) Abort(ctx context.Context) error { return nil } + +// configureTelemetryContext sets telemetry context from session configuration. +func (s *Session) configureTelemetryContext( + model string, + provider *ProviderConfig, + systemMessage *SystemMessageConfig, + tools []Tool, + streaming bool, + agentName string, + agentDescription string, +) { + if s.telemetry == nil { + return + } + s.turnTracker = newAgentTurnTracker(s.telemetry, s.SessionID, model, provider, systemMessage, tools, streaming, agentName, agentDescription) +} + +// getToolCallParentContext returns the parent context for a tool call span. +// For subagent tool calls this returns the subagent's invoke_agent context; +// for main agent tool calls this returns the root invoke_agent context. +func (s *Session) getToolCallParentContext(toolCallID string) context.Context { + if s.turnTracker == nil { + return nil + } + return s.turnTracker.getToolCallParentContext(toolCallID) +} diff --git a/go/types.go b/go/types.go index 225cc1266..9d13ec799 100644 --- a/go/types.go +++ b/go/types.go @@ -54,6 +54,9 @@ type ClientOptions struct { // Default: true (but defaults to false when GitHubToken is provided). // Use Bool(false) to explicitly disable. UseLoggedInUser *bool + // Telemetry configures OpenTelemetry instrumentation. + // When nil, no telemetry is emitted (opt-in). + Telemetry *TelemetryConfig } // Bool returns a pointer to the given bool value. @@ -406,6 +409,12 @@ type SessionConfig struct { // InfiniteSessions configures infinite sessions for persistent workspaces and automatic compaction. // When enabled (default), sessions automatically manage context limits and persist state. InfiniteSessions *InfiniteSessionConfig + // AgentName identifies the agent for telemetry attribution. + // When set, the invoke_agent span includes a gen_ai.agent.name attribute. + AgentName string + // AgentDescription describes the agent for telemetry attribution. + // When set, the invoke_agent span includes a gen_ai.agent.description attribute. + AgentDescription string } // Tool describes a caller-implemented tool that can be invoked by Copilot @@ -490,6 +499,10 @@ type ResumeSessionConfig struct { // DisableResume, when true, skips emitting the session.resume event. // Useful for reconnecting to a session without triggering resume-related side effects. DisableResume bool + // AgentName identifies the agent for telemetry attribution. + AgentName string + // AgentDescription describes the agent for telemetry attribution. + AgentDescription string } // ProviderConfig configures a custom model provider diff --git a/nodejs/README.md b/nodejs/README.md index 31558b8ab..43c0cbaca 100644 --- a/nodejs/README.md +++ b/nodejs/README.md @@ -78,6 +78,7 @@ new CopilotClient(options?: CopilotClientOptions) - `autoRestart?: boolean` - Auto-restart on crash (default: true) - `githubToken?: string` - GitHub token for authentication. When provided, takes priority over other auth methods. - `useLoggedInUser?: boolean` - Whether to use logged-in user for authentication (default: true, but false when `githubToken` is provided). Cannot be used with `cliUrl`. +- `telemetry?: TelemetryConfig` - OpenTelemetry instrumentation configuration. When provided, enables automatic tracing and metrics following [GenAI semantic conventions](../docs/opentelemetry-instrumentation.md). #### Methods @@ -105,6 +106,8 @@ Create a new conversation session. - `tools?: Tool[]` - Custom tools exposed to the CLI - `systemMessage?: SystemMessageConfig` - System message customization (see below) - `infiniteSessions?: InfiniteSessionConfig` - Configure automatic context compaction (see below) +- `agentName?: string` - Agent name for telemetry attribution. When set, the `invoke_agent` span includes a `gen_ai.agent.name` attribute. +- `agentDescription?: string` - Agent description for telemetry attribution. When set, the `invoke_agent` span includes a `gen_ai.agent.description` attribute. - `provider?: ProviderConfig` - Custom API provider configuration (BYOK - Bring Your Own Key). See [Custom Providers](#custom-providers) section. - `onUserInputRequest?: UserInputHandler` - Handler for user input requests from the agent. Enables the `ask_user` tool. See [User Input Requests](#user-input-requests) section. - `hooks?: SessionHooks` - Hook handlers for session lifecycle events. See [Session Hooks](#session-hooks) section. diff --git a/nodejs/package-lock.json b/nodejs/package-lock.json index 9b79d3137..b3a742867 100644 --- a/nodejs/package-lock.json +++ b/nodejs/package-lock.json @@ -10,10 +10,14 @@ "license": "MIT", "dependencies": { "@github/copilot": "^0.0.420", + "@opentelemetry/api": "^1.9.0", "vscode-jsonrpc": "^8.2.1", "zod": "^4.3.6" }, "devDependencies": { + "@opentelemetry/resources": "^2.5.1", + "@opentelemetry/sdk-metrics": "^2.5.1", + "@opentelemetry/sdk-trace-base": "^2.5.1", "@types/node": "^25.2.0", "@typescript-eslint/eslint-plugin": "^8.54.0", "@typescript-eslint/parser": "^8.54.0", @@ -32,6 +36,22 @@ }, "engines": { "node": ">=20.0.0" + }, + "peerDependencies": { + "@opentelemetry/resources": "^2.5.1", + "@opentelemetry/sdk-metrics": "^2.5.1", + "@opentelemetry/sdk-trace-base": "^2.5.1" + }, + "peerDependenciesMeta": { + "@opentelemetry/resources": { + "optional": true + }, + "@opentelemetry/sdk-metrics": { + "optional": true + }, + "@opentelemetry/sdk-trace-base": { + "optional": true + } } }, "node_modules/@apidevtools/json-schema-ref-parser": { @@ -847,6 +867,93 @@ "dev": true, "license": "MIT" }, + "node_modules/@opentelemetry/api": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz", + "integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==", + "license": "Apache-2.0", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/core": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.5.1.tgz", + "integrity": "sha512-Dwlc+3HAZqpgTYq0MUyZABjFkcrKTePwuiFVLjahGD8cx3enqihmpAmdgNFO1R4m/sIe5afjJrA25Prqy4NXlA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/resources": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.5.1.tgz", + "integrity": "sha512-BViBCdE/GuXRlp9k7nS1w6wJvY5fnFX5XvuEtWsTAOQFIO89Eru7lGW3WbfbxtCuZ/GbrJfAziXG0w0dpxL7eQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "2.5.1", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-metrics": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-2.5.1.tgz", + "integrity": "sha512-RKMn3QKi8nE71ULUo0g/MBvq1N4icEBo7cQSKnL3URZT16/YH3nSVgWegOjwx7FRBTrjOIkMJkCUn/ZFIEfn4A==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "2.5.1", + "@opentelemetry/resources": "2.5.1" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.9.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-trace-base": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-2.5.1.tgz", + "integrity": "sha512-iZH3Gw8cxQn0gjpOjJMmKLd9GIaNh/E3v3ST67vyzLSxHBs14HsG4dy7jMYyC5WXGdBVEcM7U/XTF5hCQxjDMw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "2.5.1", + "@opentelemetry/resources": "2.5.1", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/semantic-conventions": { + "version": "1.40.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.40.0.tgz", + "integrity": "sha512-cifvXDhcqMwwTlTK04GBNeIe7yyo28Mfby85QXFe1Yk8nmi36Ab/5UQwptOx84SsoGNRg+EVSjwzfSZMy6pmlw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=14" + } + }, "node_modules/@rollup/rollup-android-arm-eabi": { "version": "4.57.1", "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.57.1.tgz", diff --git a/nodejs/package.json b/nodejs/package.json index dddda66a3..094ea4578 100644 --- a/nodejs/package.json +++ b/nodejs/package.json @@ -41,10 +41,24 @@ "license": "MIT", "dependencies": { "@github/copilot": "^0.0.420", + "@opentelemetry/api": "^1.9.0", "vscode-jsonrpc": "^8.2.1", "zod": "^4.3.6" }, + "peerDependencies": { + "@opentelemetry/resources": "^2.5.1", + "@opentelemetry/sdk-metrics": "^2.5.1", + "@opentelemetry/sdk-trace-base": "^2.5.1" + }, + "peerDependenciesMeta": { + "@opentelemetry/resources": { "optional": true }, + "@opentelemetry/sdk-metrics": { "optional": true }, + "@opentelemetry/sdk-trace-base": { "optional": true } + }, "devDependencies": { + "@opentelemetry/resources": "^2.5.1", + "@opentelemetry/sdk-metrics": "^2.5.1", + "@opentelemetry/sdk-trace-base": "^2.5.1", "@types/node": "^25.2.0", "@typescript-eslint/eslint-plugin": "^8.54.0", "@typescript-eslint/parser": "^8.54.0", diff --git a/nodejs/src/client.ts b/nodejs/src/client.ts index 6d841c7cc..686be69fe 100644 --- a/nodejs/src/client.ts +++ b/nodejs/src/client.ts @@ -23,6 +23,7 @@ import { StreamMessageWriter, } from "vscode-jsonrpc/node.js"; import { createServerRpc } from "./generated/rpc.js"; +import { CopilotTelemetry } from "./copilot-telemetry.js"; import { getSdkProtocolVersion } from "./sdkProtocolVersion.js"; import { CopilotSession } from "./session.js"; import type { @@ -137,7 +138,7 @@ export class CopilotClient { private sessions: Map = new Map(); private stderrBuffer: string = ""; // Captures CLI stderr for error messages private options: Required< - Omit + Omit > & { cliUrl?: string; githubToken?: string; @@ -154,6 +155,7 @@ export class CopilotClient { > = new Map(); private _rpc: ReturnType | null = null; private processExitPromise: Promise | null = null; // Rejects when CLI process exits + private readonly _telemetry: CopilotTelemetry | undefined; /** * Typed server-scoped RPC methods. @@ -226,6 +228,9 @@ export class CopilotClient { // Default useLoggedInUser to false when githubToken is provided, otherwise true useLoggedInUser: options.useLoggedInUser ?? (options.githubToken ? false : true), }; + + // Initialize telemetry if configured (opt-in) + this._telemetry = options.telemetry ? new CopilotTelemetry(options.telemetry) : undefined; } /** @@ -556,7 +561,19 @@ export class CopilotClient { sessionId: string; workspacePath?: string; }; - const session = new CopilotSession(sessionId, this.connection!, workspacePath); + const session = new CopilotSession( + sessionId, + this.connection!, + workspacePath, + this._telemetry, + config.model, + config.provider, + config.systemMessage, + config.tools, + config.streaming, + config.agentName, + config.agentDescription + ); session.registerTools(config.tools); session.registerPermissionHandler(config.onPermissionRequest); if (config.onUserInputRequest) { @@ -642,7 +659,19 @@ export class CopilotClient { sessionId: string; workspacePath?: string; }; - const session = new CopilotSession(resumedSessionId, this.connection!, workspacePath); + const session = new CopilotSession( + resumedSessionId, + this.connection!, + workspacePath, + this._telemetry, + config.model, + config.provider, + config.systemMessage, + config.tools, + config.streaming, + config.agentName, + config.agentDescription + ); session.registerTools(config.tools); session.registerPermissionHandler(config.onPermissionRequest); if (config.onUserInputRequest) { @@ -1396,13 +1425,25 @@ export class CopilotClient { return { result: this.buildUnsupportedToolResult(params.toolName) }; } - return await this.executeToolCall(handler, params); + return await this.executeToolCall(handler, params, session); } private async executeToolCall( handler: ToolHandler, - request: ToolCallRequestPayload + request: ToolCallRequestPayload, + session: CopilotSession ): Promise { + const telemetry = session.telemetry; + const span = telemetry?.startExecuteToolSpan( + request.toolName, + request.toolCallId, + session.getToolDescription(request.toolName), + request.arguments, + session.getTelemetryToolCallParentContext(request.toolCallId) + ); + const startTime = telemetry ? performance.now() : 0; + let operationError: Error | undefined; + try { const invocation = { sessionId: request.sessionId, @@ -1412,8 +1453,16 @@ export class CopilotClient { }; const result = await handler(request.arguments, invocation); + if (span && telemetry) { + telemetry.setExecuteToolResult(span, result); + } + return { result: this.normalizeToolResult(result) }; } catch (error) { + operationError = error instanceof Error ? error : new Error(String(error)); + if (span) { + CopilotTelemetry.recordError(span, operationError); + } const message = error instanceof Error ? error.message : String(error); return { result: { @@ -1425,6 +1474,21 @@ export class CopilotClient { toolTelemetry: {}, }, }; + } finally { + if (span && telemetry) { + const durationSeconds = (performance.now() - startTime) / 1000; + telemetry.recordOperationDuration( + durationSeconds, + undefined, + undefined, + session.telemetryProviderName, + session.telemetryServerAddress, + session.telemetryServerPort, + operationError, + "execute_tool" + ); + span.end(); + } } } diff --git a/nodejs/src/copilot-telemetry.ts b/nodejs/src/copilot-telemetry.ts new file mode 100644 index 000000000..997223e39 --- /dev/null +++ b/nodejs/src/copilot-telemetry.ts @@ -0,0 +1,1808 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + *--------------------------------------------------------------------------------------------*/ + +/** + * OpenTelemetry instrumentation for the Copilot SDK, implementing + * the Semantic Conventions for Generative AI systems. + * + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/ + * @internal This module is NOT exported from the public API. + */ + +import { + type Span, + type Context, + type Tracer, + type Meter, + type Histogram, + SpanKind, + SpanStatusCode, + trace, + metrics, + context, +} from "@opentelemetry/api"; +import type { + TelemetryConfig, + ProviderConfig, + SystemMessageConfig, + Tool, + SessionEvent, +} from "./types.js"; + +// Read version from package.json at module load time. +import { createRequire } from "node:module"; +const _require = createRequire(import.meta.url); +const SDK_VERSION: string | undefined = (_require("../package.json") as { version?: string }) + .version; + +// ============================================================================ +// Error subclasses +// ============================================================================ + +class SessionDestroyedError extends Error { + constructor(message: string) { + super(message); + this.name = "SessionDestroyedError"; + } +} + +class SessionTurnError extends Error { + constructor(message: string) { + super(message); + this.name = "SessionTurnError"; + } +} + +// ============================================================================ +// Constants (matching .NET OpenTelemetryConsts) +// ============================================================================ + +const DEFAULT_SOURCE_NAME = "github.copilot.sdk"; +const DEFAULT_PROVIDER_NAME = "github"; +const CAPTURE_MESSAGE_CONTENT_ENV_VAR = "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"; + +const SECONDS_UNIT = "s"; +const TOKENS_UNIT = "{token}"; + +// Attribute keys +const ATTR = { + GEN_AI_OPERATION_NAME: "gen_ai.operation.name", + GEN_AI_PROVIDER_NAME: "gen_ai.provider.name", + GEN_AI_AGENT_ID: "gen_ai.agent.id", + GEN_AI_AGENT_NAME: "gen_ai.agent.name", + GEN_AI_AGENT_DESCRIPTION: "gen_ai.agent.description", + GEN_AI_CONVERSATION_ID: "gen_ai.conversation.id", + GEN_AI_REQUEST_MODEL: "gen_ai.request.model", + GEN_AI_RESPONSE_MODEL: "gen_ai.response.model", + GEN_AI_RESPONSE_ID: "gen_ai.response.id", + GEN_AI_RESPONSE_FINISH_REASONS: "gen_ai.response.finish_reasons", + GEN_AI_USAGE_INPUT_TOKENS: "gen_ai.usage.input_tokens", + GEN_AI_USAGE_OUTPUT_TOKENS: "gen_ai.usage.output_tokens", + GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS: "gen_ai.usage.cache_read.input_tokens", + GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS: "gen_ai.usage.cache_creation.input_tokens", + GEN_AI_TOKEN_TYPE: "gen_ai.token.type", + GEN_AI_TOOL_NAME: "gen_ai.tool.name", + GEN_AI_TOOL_CALL_ID: "gen_ai.tool.call.id", + GEN_AI_TOOL_TYPE: "gen_ai.tool.type", + GEN_AI_TOOL_DESCRIPTION: "gen_ai.tool.description", + GEN_AI_TOOL_CALL_ARGUMENTS: "gen_ai.tool.call.arguments", + GEN_AI_TOOL_CALL_RESULT: "gen_ai.tool.call.result", + GEN_AI_TOOL_DEFINITIONS: "gen_ai.tool.definitions", + GEN_AI_INPUT_MESSAGES: "gen_ai.input.messages", + GEN_AI_OUTPUT_MESSAGES: "gen_ai.output.messages", + GEN_AI_SYSTEM_INSTRUCTIONS: "gen_ai.system_instructions", + SERVER_ADDRESS: "server.address", + SERVER_PORT: "server.port", + ERROR_TYPE: "error.type", + + // Vendor-prefixed span attributes for Copilot-specific data + COPILOT_COST: "github.copilot.cost", + COPILOT_SERVER_DURATION: "github.copilot.server_duration", + COPILOT_INITIATOR: "github.copilot.initiator", + COPILOT_AIU: "github.copilot.aiu", + COPILOT_TURN_ID: "github.copilot.turn_id", + COPILOT_INTERACTION_ID: "github.copilot.interaction_id", +} as const; + +// Vendor-prefixed span event names and attribute keys +const COPILOT_EVENT = { + SESSION_TRUNCATION: "github.copilot.session.truncation", + SESSION_COMPACTION_START: "github.copilot.session.compaction_start", + SESSION_COMPACTION_COMPLETE: "github.copilot.session.compaction_complete", + SKILL_INVOKED: "github.copilot.skill.invoked", + + // Attribute keys for custom events + MESSAGE: "github.copilot.message", + TOKEN_LIMIT: "github.copilot.token_limit", + PRE_TOKENS: "github.copilot.pre_tokens", + POST_TOKENS: "github.copilot.post_tokens", + PRE_MESSAGES: "github.copilot.pre_messages", + POST_MESSAGES: "github.copilot.post_messages", + TOKENS_REMOVED: "github.copilot.tokens_removed", + MESSAGES_REMOVED: "github.copilot.messages_removed", + PERFORMED_BY: "github.copilot.performed_by", + SUCCESS: "github.copilot.success", + SKILL_NAME: "github.copilot.skill.name", + SKILL_PATH: "github.copilot.skill.path", + SKILL_CONTENT: "github.copilot.skill.content", + SKILL_PLUGIN_NAME: "github.copilot.skill.plugin_name", + SKILL_PLUGIN_VERSION: "github.copilot.skill.plugin_version", +} as const; + +// Operation names +const INVOKE_AGENT = "invoke_agent"; +const CHAT = "chat"; +const EXECUTE_TOOL = "execute_tool"; + +// Histogram metric names +const METRIC_OPERATION_DURATION = "gen_ai.client.operation.duration"; +const METRIC_TOKEN_USAGE = "gen_ai.client.token.usage"; +const METRIC_TIME_TO_FIRST_CHUNK = "gen_ai.client.operation.time_to_first_chunk"; +const METRIC_TIME_PER_OUTPUT_CHUNK = "gen_ai.client.operation.time_per_output_chunk"; + +// Bucket boundaries matching .NET +const DURATION_BOUNDARIES = [ + 0.01, 0.02, 0.04, 0.08, 0.16, 0.32, 0.64, 1.28, 2.56, 5.12, 10.24, 20.48, 40.96, 81.92, +]; +const TOKEN_BOUNDARIES = [ + 1, 4, 16, 64, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, +]; + +// ============================================================================ +// OTel message types for parts-based JSON +// ============================================================================ + +interface OtelPart { + type: string; + content?: string; + id?: string; + name?: string; + arguments?: unknown; + response?: unknown; + mcpServerName?: string; +} + +interface OtelMsg { + role: string; + parts: OtelPart[]; + finishReason?: string; +} + +// ============================================================================ +// Subagent state +// ============================================================================ + +interface SubagentState { + invokeAgentSpan: Span; + invokeAgentContext: Context; + invokeAgentStartTime: number; + chatSpan: Span | undefined; + agentName: string | undefined; + responseModel: string | undefined; + responseId: string | undefined; + inputTokens: number; + outputTokens: number; + cacheReadTokens: number; + cacheCreationTokens: number; + inputMessages: OtelMsg[]; + outputMessages: OtelMsg[]; +} + +// ============================================================================ +// CopilotTelemetry +// ============================================================================ + +/** + * Provides OpenTelemetry instrumentation for the Copilot SDK. + * @internal + */ +export class CopilotTelemetry { + readonly tracer: Tracer; + private readonly meter: Meter; + + readonly operationDurationHistogram: Histogram; + readonly tokenUsageHistogram: Histogram; + readonly timeToFirstChunkHistogram: Histogram; + readonly timePerOutputChunkHistogram: Histogram; + + readonly enableSensitiveData: boolean; + + constructor(config: TelemetryConfig) { + const sourceName = config.sourceName ?? DEFAULT_SOURCE_NAME; + + this.enableSensitiveData = + config.enableSensitiveData ?? + process.env[CAPTURE_MESSAGE_CONTENT_ENV_VAR]?.toLowerCase() === "true"; + + this.tracer = trace.getTracer(sourceName, SDK_VERSION); + this.meter = metrics.getMeter(sourceName, SDK_VERSION); + + this.operationDurationHistogram = this.meter.createHistogram(METRIC_OPERATION_DURATION, { + unit: SECONDS_UNIT, + description: "GenAI operation duration.", + advice: { + explicitBucketBoundaries: DURATION_BOUNDARIES, + }, + }); + + this.tokenUsageHistogram = this.meter.createHistogram(METRIC_TOKEN_USAGE, { + unit: TOKENS_UNIT, + description: "Number of input and output tokens used.", + advice: { + explicitBucketBoundaries: TOKEN_BOUNDARIES, + }, + }); + + this.timeToFirstChunkHistogram = this.meter.createHistogram(METRIC_TIME_TO_FIRST_CHUNK, { + unit: SECONDS_UNIT, + description: "Time to receive the first chunk from a streaming response.", + advice: { + explicitBucketBoundaries: DURATION_BOUNDARIES, + }, + }); + + this.timePerOutputChunkHistogram = this.meter.createHistogram( + METRIC_TIME_PER_OUTPUT_CHUNK, + { + unit: SECONDS_UNIT, + description: "Time elapsed between streamed output chunks after the first chunk.", + advice: { + explicitBucketBoundaries: DURATION_BOUNDARIES, + }, + } + ); + } + + // ======================================================================== + // Span creation + // ======================================================================== + + /** Starts an invoke_agent span for a session turn. */ + startInvokeAgentSpan( + sessionId: string, + model: string | undefined, + providerName: string, + serverAddress: string | undefined, + serverPort: number | undefined, + agentName?: string, + agentDescription?: string, + parentContext?: Context + ): Span { + const displayName = agentName ? `${INVOKE_AGENT} ${agentName}` : INVOKE_AGENT; + + const attributes: Record = { + [ATTR.GEN_AI_OPERATION_NAME]: INVOKE_AGENT, + [ATTR.GEN_AI_PROVIDER_NAME]: providerName, + [ATTR.GEN_AI_AGENT_ID]: sessionId, + [ATTR.GEN_AI_CONVERSATION_ID]: sessionId, + }; + + if (model) { + attributes[ATTR.GEN_AI_REQUEST_MODEL] = model; + } + if (agentName) { + attributes[ATTR.GEN_AI_AGENT_NAME] = agentName; + } + if (agentDescription) { + attributes[ATTR.GEN_AI_AGENT_DESCRIPTION] = agentDescription; + } + if (serverAddress) { + attributes[ATTR.SERVER_ADDRESS] = serverAddress; + if (serverPort != null) { + attributes[ATTR.SERVER_PORT] = serverPort; + } + } + + const span = this.tracer.startSpan( + displayName, + { kind: SpanKind.CLIENT, attributes }, + parentContext + ); + + return span; + } + + /** Starts a chat child span for an individual LLM turn. */ + startChatSpan( + model: string | undefined, + providerName: string, + serverAddress: string | undefined, + serverPort: number | undefined, + parentContext: Context, + conversationId?: string + ): Span { + const displayName = model ? `${CHAT} ${model}` : CHAT; + + const attributes: Record = { + [ATTR.GEN_AI_OPERATION_NAME]: CHAT, + [ATTR.GEN_AI_PROVIDER_NAME]: providerName, + }; + + if (model) { + attributes[ATTR.GEN_AI_REQUEST_MODEL] = model; + } + if (conversationId) { + attributes[ATTR.GEN_AI_CONVERSATION_ID] = conversationId; + } + if (serverAddress) { + attributes[ATTR.SERVER_ADDRESS] = serverAddress; + if (serverPort != null) { + attributes[ATTR.SERVER_PORT] = serverPort; + } + } + + return this.tracer.startSpan( + displayName, + { kind: SpanKind.CLIENT, attributes }, + parentContext + ); + } + + /** Starts an execute_tool span for a tool call. */ + startExecuteToolSpan( + toolName: string, + toolCallId: string, + description: string | undefined, + args: unknown, + parentContext?: Context + ): Span { + const displayName = `${EXECUTE_TOOL} ${toolName}`; + + const attributes: Record = { + [ATTR.GEN_AI_OPERATION_NAME]: EXECUTE_TOOL, + [ATTR.GEN_AI_TOOL_NAME]: toolName, + [ATTR.GEN_AI_TOOL_CALL_ID]: toolCallId, + [ATTR.GEN_AI_TOOL_TYPE]: "function", + }; + + if (description) { + attributes[ATTR.GEN_AI_TOOL_DESCRIPTION] = description; + } + + if (this.enableSensitiveData && args != null) { + attributes[ATTR.GEN_AI_TOOL_CALL_ARGUMENTS] = serializeValue(args); + } + + const span = this.tracer.startSpan( + displayName, + { kind: SpanKind.INTERNAL, attributes }, + parentContext ?? context.active() + ); + + return span; + } + + /** Sets the execute_tool result attribute (sensitive-gated). */ + setExecuteToolResult(span: Span, result: unknown): void { + if (!this.enableSensitiveData || result == null) { + return; + } + span.setAttribute(ATTR.GEN_AI_TOOL_CALL_RESULT, serializeValue(result)); + } + + // ======================================================================== + // Metric recording + // ======================================================================== + + /** Records token usage metrics at turn completion (so error.type can be included). */ + recordTokenUsageMetrics( + inputTokens: number | undefined, + outputTokens: number | undefined, + requestModel: string | undefined, + responseModel: string | undefined, + providerName: string, + serverAddress: string | undefined, + serverPort: number | undefined, + error: Error | undefined, + operationName: string = INVOKE_AGENT + ): void { + const baseTags = createMetricTags( + operationName, + requestModel, + responseModel, + providerName, + serverAddress, + serverPort + ); + if (error) { + baseTags[ATTR.ERROR_TYPE] = error.name || error.constructor.name; + } + + if (inputTokens != null) { + this.tokenUsageHistogram.record(inputTokens, { + ...baseTags, + [ATTR.GEN_AI_TOKEN_TYPE]: "input", + }); + } + if (outputTokens != null) { + this.tokenUsageHistogram.record(outputTokens, { + ...baseTags, + [ATTR.GEN_AI_TOKEN_TYPE]: "output", + }); + } + } + + /** Records operation duration metric. */ + recordOperationDuration( + durationSeconds: number, + requestModel: string | undefined, + responseModel: string | undefined, + providerName: string, + serverAddress: string | undefined, + serverPort: number | undefined, + error: Error | undefined, + operationName: string = INVOKE_AGENT + ): void { + const tags = createMetricTags( + operationName, + requestModel, + responseModel, + providerName, + serverAddress, + serverPort + ); + if (error) { + tags[ATTR.ERROR_TYPE] = error.name || error.constructor.name; + } + this.operationDurationHistogram.record(durationSeconds, tags); + } + + /** Records time-to-first-chunk metric. */ + recordTimeToFirstChunk( + durationSeconds: number, + requestModel: string | undefined, + responseModel: string | undefined, + providerName: string, + serverAddress: string | undefined, + serverPort: number | undefined + ): void { + this.timeToFirstChunkHistogram.record( + durationSeconds, + createMetricTags( + CHAT, + requestModel, + responseModel, + providerName, + serverAddress, + serverPort + ) + ); + } + + /** Records time-per-output-chunk metric. */ + recordTimePerOutputChunk( + durationSeconds: number, + requestModel: string | undefined, + responseModel: string | undefined, + providerName: string, + serverAddress: string | undefined, + serverPort: number | undefined + ): void { + this.timePerOutputChunkHistogram.record( + durationSeconds, + createMetricTags( + CHAT, + requestModel, + responseModel, + providerName, + serverAddress, + serverPort + ) + ); + } + + /** Records an error on a span. */ + static recordError(span: Span, error: Error): void { + span.setAttribute(ATTR.ERROR_TYPE, error.name || error.constructor.name); + span.setStatus({ code: SpanStatusCode.ERROR, message: error.message }); + } + + // ======================================================================== + // Utility + // ======================================================================== + + /** Normalizes a provider type string to its OTel semantic convention name. */ + static normalizeProviderName(providerType?: string): string { + if (!providerType) { + return DEFAULT_PROVIDER_NAME; + } + switch (providerType.trim().toLowerCase()) { + case "anthropic": + return "anthropic"; + case "azure": + return "azure.ai.openai"; + case "openai": + return "openai"; + default: + return DEFAULT_PROVIDER_NAME; + } + } + + /** Parses a URL into host and port components. */ + static parseServerAddress(baseUrl?: string): { + address: string | undefined; + port: number | undefined; + } { + if (!baseUrl) { + return { address: undefined, port: undefined }; + } + try { + const url = new URL(baseUrl); + if (!url.hostname) { + return { address: undefined, port: undefined }; + } + let port: number | undefined; + if (url.port) { + port = parseInt(url.port, 10); + } else if (url.protocol === "https:") { + port = 443; + } else if (url.protocol === "http:") { + port = 80; + } + return { address: url.hostname, port }; + } catch { + return { address: undefined, port: undefined }; + } + } +} + +// ============================================================================ +// Helper functions +// ============================================================================ + +function createMetricTags( + operationName: string, + requestModel: string | undefined, + responseModel: string | undefined, + providerName: string, + serverAddress: string | undefined, + serverPort: number | undefined +): Record { + const tags: Record = { + [ATTR.GEN_AI_OPERATION_NAME]: operationName, + [ATTR.GEN_AI_PROVIDER_NAME]: providerName, + }; + + if (requestModel) { + tags[ATTR.GEN_AI_REQUEST_MODEL] = requestModel; + } + if (responseModel) { + tags[ATTR.GEN_AI_RESPONSE_MODEL] = responseModel; + } + if (serverAddress) { + tags[ATTR.SERVER_ADDRESS] = serverAddress; + if (serverPort != null) { + tags[ATTR.SERVER_PORT] = serverPort; + } + } + + return tags; +} + +function serializeValue(value: unknown): string { + if (typeof value === "string") return value; + try { + return JSON.stringify(value); + } catch { + return String(value); + } +} + +// ============================================================================ +// Parts-based message JSON builder (matching .NET BuildMessagesJson) +// ============================================================================ + +function buildMessagesJson( + messages: OtelMsg[] | undefined, + finishReason?: string +): string | undefined { + if (!messages || messages.length === 0) { + return undefined; + } + + const result: unknown[] = []; + for (const message of messages) { + if (message.parts.length === 0) { + continue; + } + + const parts: unknown[] = []; + for (const part of message.parts) { + const obj: Record = { type: part.type }; + + switch (part.type) { + case "server_tool_call": + if (part.id != null) obj.id = part.id; + if (part.name != null) obj.name = part.name; + obj.server_tool_call = { + type: "mcp", + ...(part.mcpServerName != null && { server_name: part.mcpServerName }), + ...(part.arguments != null && { arguments: part.arguments }), + }; + break; + + case "server_tool_call_response": + if (part.id != null) obj.id = part.id; + obj.server_tool_call_response = { + type: "mcp", + ...(part.mcpServerName != null && { server_name: part.mcpServerName }), + ...(part.response != null && { response: part.response }), + }; + break; + + default: + if (part.content != null) obj.content = part.content; + if (part.id != null) obj.id = part.id; + if (part.name != null) obj.name = part.name; + if (part.arguments != null) obj.arguments = part.arguments; + if (part.response != null) obj.response = part.response; + break; + } + + parts.push(obj); + } + + const msg: Record = { role: message.role, parts }; + const effectiveFinishReason = message.finishReason ?? finishReason; + if (effectiveFinishReason) { + msg.finish_reason = effectiveFinishReason; + } + result.push(msg); + } + + return result.length > 0 ? JSON.stringify(result) : undefined; +} + +function buildSystemInstructionsJson( + systemMessage: SystemMessageConfig | undefined +): string | undefined { + const content = systemMessage?.content; + if (!content) { + return undefined; + } + return JSON.stringify([{ type: "text", content }]); +} + +function buildToolDefinitionsJson(tools: Tool[] | undefined): string | undefined { + if (!tools || tools.length === 0) { + return undefined; + } + + const defs = tools.map((tool) => { + const def: Record = { + type: "function", + name: tool.name, + }; + if (tool.description) { + def.description = tool.description; + } + if (tool.parameters) { + def.parameters = + typeof tool.parameters === "object" && + "toJSONSchema" in tool.parameters && + typeof (tool.parameters as { toJSONSchema?: unknown }).toJSONSchema === "function" + ? ( + tool.parameters as { toJSONSchema(): Record } + ).toJSONSchema() + : tool.parameters; + } + return def; + }); + + return JSON.stringify(defs); +} + +// ============================================================================ +// Helper to extract parentToolCallId from events +// ============================================================================ + +function getParentToolCallId(event: SessionEvent): string | undefined { + switch (event.type) { + case "assistant.usage": + case "assistant.message": + case "assistant.message_delta": + case "tool.execution_start": + case "tool.execution_complete": + return (event.data as { parentToolCallId?: string }).parentToolCallId ?? undefined; + default: + return undefined; + } +} + +// ============================================================================ +// AgentTurnTracker +// ============================================================================ + +/** + * Encapsulates per-turn telemetry state and logic for a Copilot session. + * Implements a two-level span hierarchy: invoke_agent → chat, matching the + * .NET AgentTurnTracker. Supports subagent spans and tool call parenting. + * @internal + */ +export class AgentTurnTracker { + private readonly _telemetry: CopilotTelemetry; + private readonly _sessionId: string; + + // Context fields set once at construction + private readonly _requestModel: string | undefined; + private readonly _agentName: string | undefined; + private readonly _agentDescription: string | undefined; + private readonly _systemInstructionsJson: string | undefined; + private readonly _toolDefinitionsJson: string | undefined; + private readonly _isStreaming: boolean; + readonly providerName: string; + readonly serverAddress: string | undefined; + readonly serverPort: number | undefined; + + // Per-invoke_agent mutable state + private _agentSpan: Span | undefined; + private _agentSpanContext: Context | undefined; + private _agentStartTime: number | undefined; + private _agentInputMessages: OtelMsg[] | undefined; + private _agentOutputMessages: OtelMsg[] | undefined; + private _activeSubagents: Map | undefined; + + // Pending tool call parent contexts + private _pendingToolParents: Map | undefined; + + // MCP server tool tracking: toolCallId → mcpServerName + private _serverToolCallIds: Map | undefined; + + // Agent-level accumulated usage across all chat turns + private _agentResponseModel: string | undefined; + private _agentResponseId: string | undefined; + private _agentTotalInputTokens = 0; + private _agentTotalOutputTokens = 0; + private _agentTotalCacheReadTokens = 0; + private _agentTotalCacheCreationTokens = 0; + private _agentTotalCost = 0; + private _agentTotalAiu = 0; + + // Per-chat-turn mutable state (reset on each assistant.turn_start) + private _turnSpan: Span | undefined; + private _turnStartTime: number | undefined; + private _firstOutputChunkRecorded = false; + private _lastOutputChunkTime = 0; + private _responseModel: string | undefined; + private _responseId: string | undefined; + private _inputTokens = 0; + private _outputTokens = 0; + private _cacheReadTokens = 0; + private _cacheCreationTokens = 0; + private _inputMessages: OtelMsg[] | undefined; + private _outputMessages: OtelMsg[] | undefined; + + // Copilot-specific per-turn attributes + private _turnCost: number | undefined; + private _turnServerDuration: number | undefined; + private _turnInitiator: string | undefined; + private _turnAiu: number | undefined; + private _turnId: string | undefined; + private _turnInteractionId: string | undefined; + + // Stashed user prompt for the first chat turn + private _pendingUserPrompt: string | undefined; + + constructor( + telemetry: CopilotTelemetry, + sessionId: string, + model: string | undefined, + provider: ProviderConfig | undefined, + systemMessage: SystemMessageConfig | undefined, + tools: Tool[] | undefined, + streaming: boolean | undefined, + agentName?: string, + agentDescription?: string + ) { + this._telemetry = telemetry; + this._sessionId = sessionId; + this._requestModel = model; + this._agentName = agentName; + this._agentDescription = agentDescription; + this.providerName = CopilotTelemetry.normalizeProviderName(provider?.type); + const parsed = CopilotTelemetry.parseServerAddress(provider?.baseUrl); + this.serverAddress = parsed.address; + this.serverPort = parsed.port; + this._systemInstructionsJson = buildSystemInstructionsJson(systemMessage); + this._toolDefinitionsJson = buildToolDefinitionsJson(tools); + this._isStreaming = streaming ?? false; + } + + /** The CopilotTelemetry instance (for client.ts tool instrumentation). */ + get telemetry(): CopilotTelemetry { + return this._telemetry; + } + + /** Returns the current invoke_agent span context (for parenting tool spans). */ + getSpanContext(): Context | undefined { + return this._agentSpanContext; + } + + /** + * Gets the parent context for a tool call span. Consumes the stored context (one-time use). + * For subagent tool calls this returns the subagent's invoke_agent context; + * for main agent tool calls this returns the root invoke_agent context. + */ + getToolCallParentContext(toolCallId: string): Context | undefined { + if (this._pendingToolParents) { + const ctx = this._pendingToolParents.get(toolCallId); + if (ctx) { + this._pendingToolParents.delete(toolCallId); + return ctx; + } + } + return this._agentSpanContext; + } + + /** + * Closes any active spans with an error status. Called when the session is + * destroyed while a turn may still be in progress, ensuring spans are not orphaned. + */ + completeOnDispose(): void { + if (this._agentSpan) { + const disposeError = new SessionDestroyedError( + "Session destroyed while agent turn was in progress" + ); + this._completeChatTurn(disposeError); + this._completeAgentTurn(disposeError); + } + } + + /** Called at start of send(); starts the invoke_agent span if needed. */ + beginSend(prompt: string | undefined): void { + if (!this._agentSpan) { + this._agentSpan = this._telemetry.startInvokeAgentSpan( + this._sessionId, + this._requestModel, + this.providerName, + this.serverAddress, + this.serverPort, + this._agentName, + this._agentDescription + ); + this._agentSpanContext = trace.setSpan(context.active(), this._agentSpan); + this._agentStartTime = performance.now(); + this._agentInputMessages = []; + } + + // Agent-level input = what the caller sent (all user prompts). + if (this._agentInputMessages && prompt) { + this._agentInputMessages.push({ + role: "user", + parts: [{ type: "text", content: prompt }], + }); + } + + // Stash user prompt for the first chat turn's input messages. + this._pendingUserPrompt = prompt; + } + + /** Called from _dispatchEvent; handles telemetry enrichment and turn completion. */ + processEvent(event: SessionEvent): void { + // Route subagent events by parentToolCallId. + const parentToolCallId = getParentToolCallId(event); + if (parentToolCallId) { + const subagent = this._activeSubagents?.get(parentToolCallId); + if (subagent) { + this._processSubagentEvent(subagent, event); + } + return; + } + + // Handle subagent lifecycle events. + switch (event.type) { + case "subagent.started": + this._beginSubagent(event as Extract); + return; + case "subagent.completed": { + const data = (event as Extract).data; + this._completeSubagent(data.toolCallId, undefined); + return; + } + case "subagent.failed": { + const data = (event as Extract).data; + const subagentError = new SessionTurnError( + `Subagent '${data.agentName}' failed: ${data.error}` + ); + this._completeSubagent(data.toolCallId, subagentError); + return; + } + } + + // Record chunk timing for main agent events during a turn. + this._recordOutputChunkMetric(); + + // Per-turn event processing (writes to the chat child span). + if (this._turnSpan) { + switch (event.type) { + case "assistant.message": { + const data = (event as Extract) + .data; + const parts: OtelPart[] = []; + if (data.reasoningText) { + parts.push({ type: "reasoning", content: data.reasoningText }); + } + if (data.content) { + parts.push({ type: "text", content: data.content }); + } + if (parts.length > 0) { + this._outputMessages?.push({ role: "assistant", parts }); + } + break; + } + + case "assistant.usage": { + const data = (event as Extract).data; + this._responseModel = data.model; + if (data.apiCallId) { + this._responseId = data.apiCallId; + } else if (data.providerCallId) { + this._responseId = data.providerCallId; + } + if (data.inputTokens != null) { + this._inputTokens += data.inputTokens; + } + if (data.outputTokens != null) { + this._outputTokens += data.outputTokens; + } + if (data.cacheReadTokens != null) { + this._cacheReadTokens += data.cacheReadTokens; + } + if (data.cacheWriteTokens != null) { + this._cacheCreationTokens += data.cacheWriteTokens; + } + + // Copilot-specific vendor attributes + if (data.cost != null) { + this._turnCost = (this._turnCost ?? 0) + data.cost; + } + if (data.duration != null) { + this._turnServerDuration = (this._turnServerDuration ?? 0) + data.duration; + } + if (data.initiator) { + this._turnInitiator = data.initiator; + } + if (data.copilotUsage) { + this._turnAiu = (this._turnAiu ?? 0) + data.copilotUsage.totalNanoAiu; + } + break; + } + + case "session.model_change": { + const data = (event as Extract) + .data; + this._responseModel = data.newModel; + break; + } + + case "tool.execution_start": { + const data = (event as Extract) + .data; + const isServerTool = !!data.mcpServerName; + if (isServerTool && data.toolCallId) { + this._serverToolCallIds ??= new Map(); + this._serverToolCallIds.set(data.toolCallId, data.mcpServerName!); + } + + this._outputMessages?.push({ + role: "assistant", + parts: [ + { + type: isServerTool ? "server_tool_call" : "tool_call", + id: data.toolCallId, + name: data.toolName, + arguments: data.arguments, + mcpServerName: data.mcpServerName, + }, + ], + }); + + // Store parent context for tool call span parenting. + if (this._agentSpan && data.toolCallId) { + this._pendingToolParents ??= new Map(); + this._pendingToolParents.set(data.toolCallId, this._agentSpanContext!); + } + break; + } + + case "tool.execution_complete": { + const data = ( + event as Extract + ).data; + const resultContent = data.result?.content ?? data.error?.message; + let serverName: string | undefined; + const isServerTool = this._serverToolCallIds?.has(data.toolCallId) ?? false; + if (isServerTool) { + serverName = this._serverToolCallIds!.get(data.toolCallId); + this._serverToolCallIds!.delete(data.toolCallId); + } + + this._inputMessages?.push({ + role: "tool", + parts: [ + { + type: isServerTool + ? "server_tool_call_response" + : "tool_call_response", + id: data.toolCallId, + response: resultContent, + mcpServerName: serverName, + }, + ], + }); + break; + } + } + } + + // Copilot-specific lifecycle events emitted as span events on the + // current activity (chat turn if active, otherwise invoke_agent). + { + const target = this._turnSpan ?? this._agentSpan; + if (target) { + switch (event.type) { + case "session.truncation": { + const data = ( + event as Extract + ).data; + const truncAttrs: Record = {}; + if (data.tokenLimit != 0) { + truncAttrs[COPILOT_EVENT.TOKEN_LIMIT] = data.tokenLimit; + } + if (data.preTruncationTokensInMessages != 0) { + truncAttrs[COPILOT_EVENT.PRE_TOKENS] = + data.preTruncationTokensInMessages; + } + if (data.postTruncationTokensInMessages != 0) { + truncAttrs[COPILOT_EVENT.POST_TOKENS] = + data.postTruncationTokensInMessages; + } + if (data.preTruncationMessagesLength != 0) { + truncAttrs[COPILOT_EVENT.PRE_MESSAGES] = + data.preTruncationMessagesLength; + } + if (data.postTruncationMessagesLength != 0) { + truncAttrs[COPILOT_EVENT.POST_MESSAGES] = + data.postTruncationMessagesLength; + } + if (data.tokensRemovedDuringTruncation != 0) { + truncAttrs[COPILOT_EVENT.TOKENS_REMOVED] = + data.tokensRemovedDuringTruncation; + } + if (data.messagesRemovedDuringTruncation != 0) { + truncAttrs[COPILOT_EVENT.MESSAGES_REMOVED] = + data.messagesRemovedDuringTruncation; + } + if (data.performedBy != null) { + truncAttrs[COPILOT_EVENT.PERFORMED_BY] = data.performedBy; + } + target.addEvent(COPILOT_EVENT.SESSION_TRUNCATION, truncAttrs); + break; + } + + case "session.compaction_start": { + target.addEvent(COPILOT_EVENT.SESSION_COMPACTION_START); + break; + } + + case "session.compaction_complete": { + const data = ( + event as Extract + ).data; + const attrs: Record = { + [COPILOT_EVENT.SUCCESS]: data.success, + }; + if (this._telemetry.enableSensitiveData && data.error != null) { + attrs[COPILOT_EVENT.MESSAGE] = data.error; + } + if (data.preCompactionTokens != null) { + attrs[COPILOT_EVENT.PRE_TOKENS] = data.preCompactionTokens; + } + if (data.postCompactionTokens != null) { + attrs[COPILOT_EVENT.POST_TOKENS] = data.postCompactionTokens; + } + if (data.tokensRemoved != null) { + attrs[COPILOT_EVENT.TOKENS_REMOVED] = data.tokensRemoved; + } + if (data.messagesRemoved != null) { + attrs[COPILOT_EVENT.MESSAGES_REMOVED] = data.messagesRemoved; + } + target.addEvent(COPILOT_EVENT.SESSION_COMPACTION_COMPLETE, attrs); + break; + } + + case "skill.invoked": { + const data = (event as Extract) + .data; + const attrs: Record = { + [COPILOT_EVENT.SKILL_NAME]: data.name, + [COPILOT_EVENT.SKILL_PATH]: data.path, + }; + if (data.pluginName != null) { + attrs[COPILOT_EVENT.SKILL_PLUGIN_NAME] = data.pluginName; + } + if (data.pluginVersion != null) { + attrs[COPILOT_EVENT.SKILL_PLUGIN_VERSION] = data.pluginVersion; + } + if (this._telemetry.enableSensitiveData) { + attrs[COPILOT_EVENT.SKILL_CONTENT] = data.content; + } + target.addEvent(COPILOT_EVENT.SKILL_INVOKED, attrs); + break; + } + } + } + } + + // Lifecycle events that start/end turns or the overall agent operation. + switch (event.type) { + case "assistant.turn_start": { + this._beginChatTurn(); + const data = (event as Extract) + .data; + this._turnId = data.turnId; + this._turnInteractionId = data.interactionId; + break; + } + + case "assistant.turn_end": + this._completeChatTurn(undefined); + break; + + case "session.idle": + this._completeChatTurn(undefined); + this._completeAgentTurn(undefined); + break; + + case "session.error": { + const data = (event as Extract).data; + const ex = new SessionTurnError( + `Session error: ${data.message ?? "unknown error"}` + ); + this._completeChatTurn(ex); + this._completeAgentTurn(ex); + break; + } + } + } + + /** Called from send() error path; completes turn with error. */ + completeTurnWithError(error: Error): void { + this._completeChatTurn(error); + this._completeAgentTurn(error); + } + + // ======================================================================== + // Chat turn lifecycle + // ======================================================================== + + /** Starts a new chat child span for an LLM turn. */ + private _beginChatTurn(): void { + // If there's already an active turn, complete it first. + this._completeChatTurn(undefined); + + this._responseModel = undefined; + this._responseId = undefined; + this._inputTokens = 0; + this._outputTokens = 0; + this._cacheReadTokens = 0; + this._cacheCreationTokens = 0; + this._firstOutputChunkRecorded = false; + this._lastOutputChunkTime = 0; + this._inputMessages = []; + this._outputMessages = []; + this._turnCost = undefined; + this._turnServerDuration = undefined; + this._turnInitiator = undefined; + this._turnAiu = undefined; + this._turnId = undefined; + this._turnInteractionId = undefined; + + // Add stashed user prompt as input message for the first turn. + if (this._pendingUserPrompt) { + this._inputMessages.push({ + role: "user", + parts: [{ type: "text", content: this._pendingUserPrompt }], + }); + this._pendingUserPrompt = undefined; + } + + const parentContext = this._agentSpanContext ?? context.active(); + this._turnSpan = this._telemetry.startChatSpan( + this._requestModel, + this.providerName, + this.serverAddress, + this.serverPort, + parentContext, + this._sessionId + ); + this._turnStartTime = performance.now(); + } + + /** Completes the current chat child span with per-turn attributes and metrics. */ + private _completeChatTurn(error: Error | undefined): void { + const span = this._turnSpan; + if (!span) { + return; + } + + const startTime = this._turnStartTime; + const inputMessages = this._inputMessages; + const outputMessages = this._outputMessages; + const responseModel = this._responseModel; + const responseId = this._responseId; + const inputTokens = this._inputTokens; + const outputTokens = this._outputTokens; + const cacheReadTokens = this._cacheReadTokens; + const cacheCreationTokens = this._cacheCreationTokens; + const turnCost = this._turnCost; + const turnServerDuration = this._turnServerDuration; + const turnInitiator = this._turnInitiator; + const turnAiu = this._turnAiu; + const turnId = this._turnId; + const turnInteractionId = this._turnInteractionId; + + // Reset per-turn state + this._turnSpan = undefined; + this._turnStartTime = undefined; + this._inputMessages = undefined; + this._outputMessages = undefined; + this._responseModel = undefined; + this._responseId = undefined; + this._inputTokens = 0; + this._outputTokens = 0; + this._cacheReadTokens = 0; + this._cacheCreationTokens = 0; + this._turnCost = undefined; + this._turnServerDuration = undefined; + this._turnInitiator = undefined; + this._turnAiu = undefined; + this._turnId = undefined; + this._turnInteractionId = undefined; + + if (error) { + CopilotTelemetry.recordError(span, error); + } + + const finishReason = error ? "error" : "stop"; + span.setAttribute(ATTR.GEN_AI_RESPONSE_FINISH_REASONS, [finishReason]); + + // Agent-level output = only the final turn's output. + if (outputMessages && outputMessages.length > 0) { + this._agentOutputMessages = outputMessages.map((m) => ({ + ...m, + finishReason, + })); + } + + // Accumulate agent-level usage across turns. + if (responseModel) this._agentResponseModel = responseModel; + if (responseId) this._agentResponseId = responseId; + this._agentTotalInputTokens += inputTokens; + this._agentTotalOutputTokens += outputTokens; + this._agentTotalCacheReadTokens += cacheReadTokens; + this._agentTotalCacheCreationTokens += cacheCreationTokens; + if (turnCost != null) this._agentTotalCost += turnCost; + if (turnAiu != null) this._agentTotalAiu += turnAiu; + + // Set usage-related span attributes for this LLM turn + if (responseModel) { + span.setAttribute(ATTR.GEN_AI_RESPONSE_MODEL, responseModel); + } + if (responseId) { + span.setAttribute(ATTR.GEN_AI_RESPONSE_ID, responseId); + } + if (inputTokens > 0) { + span.setAttribute(ATTR.GEN_AI_USAGE_INPUT_TOKENS, inputTokens); + } + if (outputTokens > 0) { + span.setAttribute(ATTR.GEN_AI_USAGE_OUTPUT_TOKENS, outputTokens); + } + if (cacheReadTokens > 0) { + span.setAttribute(ATTR.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, cacheReadTokens); + } + if (cacheCreationTokens > 0) { + span.setAttribute(ATTR.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS, cacheCreationTokens); + } + + // Copilot vendor-prefixed attributes on chat spans + if (turnCost != null) { + span.setAttribute(ATTR.COPILOT_COST, turnCost); + } + if (turnServerDuration != null) { + span.setAttribute(ATTR.COPILOT_SERVER_DURATION, turnServerDuration); + } + if (turnInitiator != null) { + span.setAttribute(ATTR.COPILOT_INITIATOR, turnInitiator); + } + if (turnAiu != null) { + span.setAttribute(ATTR.COPILOT_AIU, turnAiu); + } + if (turnId != null) { + span.setAttribute(ATTR.COPILOT_TURN_ID, turnId); + } + if (turnInteractionId != null) { + span.setAttribute(ATTR.COPILOT_INTERACTION_ID, turnInteractionId); + } + + // Set input/output message content as span attributes (sensitive) + if (this._telemetry.enableSensitiveData) { + const inputMessagesJson = buildMessagesJson(inputMessages); + if (inputMessagesJson) { + span.setAttribute(ATTR.GEN_AI_INPUT_MESSAGES, inputMessagesJson); + } + const outputMessagesJson = buildMessagesJson(outputMessages, finishReason); + if (outputMessagesJson) { + span.setAttribute(ATTR.GEN_AI_OUTPUT_MESSAGES, outputMessagesJson); + } + } + + // Token usage metrics (per-turn) + this._telemetry.recordTokenUsageMetrics( + inputTokens > 0 ? inputTokens : undefined, + outputTokens > 0 ? outputTokens : undefined, + this._requestModel, + responseModel, + this.providerName, + this.serverAddress, + this.serverPort, + error, + CHAT + ); + + // Per-turn operation duration + if (startTime != null) { + const durationSeconds = (performance.now() - startTime) / 1000; + this._telemetry.recordOperationDuration( + durationSeconds, + this._requestModel, + responseModel, + this.providerName, + this.serverAddress, + this.serverPort, + error, + CHAT + ); + } + + this._firstOutputChunkRecorded = false; + this._lastOutputChunkTime = 0; + span.end(); + } + + // ======================================================================== + // Agent turn lifecycle + // ======================================================================== + + /** Completes the invoke_agent span and records overall operation duration. */ + private _completeAgentTurn(error: Error | undefined): void { + const span = this._agentSpan; + if (!span) { + return; + } + + const startTime = this._agentStartTime; + const agentInputMessages = this._agentInputMessages; + const agentOutputMessages = this._agentOutputMessages; + + // Reset agent state + this._agentSpan = undefined; + this._agentSpanContext = undefined; + this._agentStartTime = undefined; + this._pendingUserPrompt = undefined; + this._agentInputMessages = undefined; + this._agentOutputMessages = undefined; + + // Complete any remaining subagents before closing the parent. + if (this._activeSubagents && this._activeSubagents.size > 0) { + for (const key of [...this._activeSubagents.keys()]) { + this._completeSubagent(key, error); + } + } + this._activeSubagents = undefined; + this._pendingToolParents = undefined; + this._serverToolCallIds = undefined; + + if (error) { + CopilotTelemetry.recordError(span, error); + } + + const finishReason = error ? "error" : "stop"; + span.setAttribute(ATTR.GEN_AI_RESPONSE_FINISH_REASONS, [finishReason]); + + // Set accumulated usage across all chat turns on the invoke_agent span. + const agentResponseModel = this._agentResponseModel; + const agentResponseId = this._agentResponseId; + const agentTotalInputTokens = this._agentTotalInputTokens; + const agentTotalOutputTokens = this._agentTotalOutputTokens; + const agentTotalCacheReadTokens = this._agentTotalCacheReadTokens; + const agentTotalCacheCreationTokens = this._agentTotalCacheCreationTokens; + const agentTotalCost = this._agentTotalCost; + const agentTotalAiu = this._agentTotalAiu; + + this._agentResponseModel = undefined; + this._agentResponseId = undefined; + this._agentTotalInputTokens = 0; + this._agentTotalOutputTokens = 0; + this._agentTotalCacheReadTokens = 0; + this._agentTotalCacheCreationTokens = 0; + this._agentTotalCost = 0; + this._agentTotalAiu = 0; + + if (agentResponseModel) { + span.setAttribute(ATTR.GEN_AI_RESPONSE_MODEL, agentResponseModel); + } + if (agentResponseId) { + span.setAttribute(ATTR.GEN_AI_RESPONSE_ID, agentResponseId); + } + if (agentTotalInputTokens > 0) { + span.setAttribute(ATTR.GEN_AI_USAGE_INPUT_TOKENS, agentTotalInputTokens); + } + if (agentTotalOutputTokens > 0) { + span.setAttribute(ATTR.GEN_AI_USAGE_OUTPUT_TOKENS, agentTotalOutputTokens); + } + if (agentTotalCacheReadTokens > 0) { + span.setAttribute(ATTR.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, agentTotalCacheReadTokens); + } + if (agentTotalCacheCreationTokens > 0) { + span.setAttribute( + ATTR.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS, + agentTotalCacheCreationTokens + ); + } + + // Copilot vendor-prefixed attributes on invoke_agent span + if (agentTotalCost > 0) { + span.setAttribute(ATTR.COPILOT_COST, agentTotalCost); + } + if (agentTotalAiu > 0) { + span.setAttribute(ATTR.COPILOT_AIU, agentTotalAiu); + } + + // Agent-level input = caller's message; output = agent's final response. + if (this._telemetry.enableSensitiveData) { + const inputJson = buildMessagesJson(agentInputMessages); + if (inputJson) { + span.setAttribute(ATTR.GEN_AI_INPUT_MESSAGES, inputJson); + } + const outputJson = buildMessagesJson(agentOutputMessages); + if (outputJson) { + span.setAttribute(ATTR.GEN_AI_OUTPUT_MESSAGES, outputJson); + } + if (this._systemInstructionsJson) { + span.setAttribute(ATTR.GEN_AI_SYSTEM_INSTRUCTIONS, this._systemInstructionsJson); + } + } + if (this._toolDefinitionsJson) { + span.setAttribute(ATTR.GEN_AI_TOOL_DEFINITIONS, this._toolDefinitionsJson); + } + + if (startTime != null) { + const durationSeconds = (performance.now() - startTime) / 1000; + this._telemetry.recordOperationDuration( + durationSeconds, + this._requestModel, + agentResponseModel, + this.providerName, + this.serverAddress, + this.serverPort, + error, + INVOKE_AGENT + ); + } + + span.end(); + } + + // ======================================================================== + // Streaming chunk timing + // ======================================================================== + + /** Records streaming chunk timing metrics. Uses the chat turn stopwatch. */ + private _recordOutputChunkMetric(): void { + if (!this._isStreaming || this._turnStartTime == null) { + return; + } + + const now = performance.now(); + const elapsed = now - this._turnStartTime; + + if (!this._firstOutputChunkRecorded) { + this._firstOutputChunkRecorded = true; + this._lastOutputChunkTime = now; + this._telemetry.recordTimeToFirstChunk( + elapsed / 1000, + this._requestModel, + undefined, // response model not yet known during streaming + this.providerName, + this.serverAddress, + this.serverPort + ); + return; + } + + const deltaSeconds = (now - this._lastOutputChunkTime) / 1000; + this._lastOutputChunkTime = now; + this._telemetry.recordTimePerOutputChunk( + deltaSeconds, + this._requestModel, + undefined, // response model not yet known during streaming + this.providerName, + this.serverAddress, + this.serverPort + ); + } + + // ======================================================================== + // Subagent support + // ======================================================================== + + /** Creates nested invoke_agent + chat spans for a subagent. */ + private _beginSubagent(event: Extract): void { + const data = event.data; + if (!data) return; + + const parentContext = this._agentSpanContext ?? context.active(); + const invokeSpan = this._telemetry.startInvokeAgentSpan( + this._sessionId, + this._requestModel, + this.providerName, + this.serverAddress, + this.serverPort, + data.agentName, + data.agentDescription, + parentContext + ); + + const invokeCtx = trace.setSpan(context.active(), invokeSpan); + const chatSpan = this._telemetry.startChatSpan( + this._requestModel, + this.providerName, + this.serverAddress, + this.serverPort, + invokeCtx, + this._sessionId + ); + + const state: SubagentState = { + invokeAgentSpan: invokeSpan, + invokeAgentContext: invokeCtx, + invokeAgentStartTime: performance.now(), + chatSpan, + agentName: data.agentName, + responseModel: undefined, + responseId: undefined, + inputTokens: 0, + outputTokens: 0, + cacheReadTokens: 0, + cacheCreationTokens: 0, + inputMessages: [], + outputMessages: [], + }; + + this._activeSubagents ??= new Map(); + this._activeSubagents.set(data.toolCallId, state); + } + + /** Routes an event to its owning subagent's spans. */ + private _processSubagentEvent(subagent: SubagentState, event: SessionEvent): void { + switch (event.type) { + case "assistant.usage": { + const data = (event as Extract).data; + subagent.responseModel = data.model; + + if (data.model) { + subagent.chatSpan?.setAttribute(ATTR.GEN_AI_RESPONSE_MODEL, data.model); + subagent.invokeAgentSpan.setAttribute(ATTR.GEN_AI_RESPONSE_MODEL, data.model); + } + + if (data.apiCallId) { + subagent.responseId = data.apiCallId; + } else if (data.providerCallId) { + subagent.responseId = data.providerCallId; + } + if (data.inputTokens != null) subagent.inputTokens += data.inputTokens; + if (data.outputTokens != null) subagent.outputTokens += data.outputTokens; + if (data.cacheReadTokens != null) subagent.cacheReadTokens += data.cacheReadTokens; + if (data.cacheWriteTokens != null) + subagent.cacheCreationTokens += data.cacheWriteTokens; + break; + } + + case "assistant.message": { + const data = (event as Extract).data; + const parts: OtelPart[] = []; + if (data.reasoningText) { + parts.push({ type: "reasoning", content: data.reasoningText }); + } + if (data.content) { + parts.push({ type: "text", content: data.content }); + } + if (parts.length > 0) { + subagent.outputMessages.push({ role: "assistant", parts }); + } + break; + } + + case "tool.execution_start": { + const data = (event as Extract) + .data; + const isServerTool = !!data.mcpServerName; + if (isServerTool && data.toolCallId) { + this._serverToolCallIds ??= new Map(); + this._serverToolCallIds.set(data.toolCallId, data.mcpServerName!); + } + + subagent.outputMessages.push({ + role: "assistant", + parts: [ + { + type: isServerTool ? "server_tool_call" : "tool_call", + id: data.toolCallId, + name: data.toolName, + arguments: data.arguments, + mcpServerName: data.mcpServerName, + }, + ], + }); + + // For subagent tool calls, parent is the subagent's invoke_agent. + if (data.toolCallId) { + this._pendingToolParents ??= new Map(); + this._pendingToolParents.set(data.toolCallId, subagent.invokeAgentContext); + } + break; + } + + case "tool.execution_complete": { + const data = (event as Extract) + .data; + const resultContent = data.result?.content ?? data.error?.message; + let serverName: string | undefined; + const isServerTool = this._serverToolCallIds?.has(data.toolCallId) ?? false; + if (isServerTool) { + serverName = this._serverToolCallIds!.get(data.toolCallId); + this._serverToolCallIds!.delete(data.toolCallId); + } + + subagent.inputMessages.push({ + role: "tool", + parts: [ + { + type: isServerTool ? "server_tool_call_response" : "tool_call_response", + id: data.toolCallId, + response: resultContent, + mcpServerName: serverName, + }, + ], + }); + break; + } + } + } + + /** Completes a subagent's chat and invoke_agent spans. */ + private _completeSubagent(toolCallId: string, error: Error | undefined): void { + if (!this._activeSubagents) return; + const subagent = this._activeSubagents.get(toolCallId); + if (!subagent) return; + this._activeSubagents.delete(toolCallId); + + const finishReason = error ? "error" : "stop"; + + // -- Complete the chat child span -- + const chatSpan = subagent.chatSpan; + if (chatSpan) { + if (error) CopilotTelemetry.recordError(chatSpan, error); + chatSpan.setAttribute(ATTR.GEN_AI_RESPONSE_FINISH_REASONS, [finishReason]); + + if (subagent.responseModel) { + chatSpan.setAttribute(ATTR.GEN_AI_RESPONSE_MODEL, subagent.responseModel); + } + if (subagent.responseId) { + chatSpan.setAttribute(ATTR.GEN_AI_RESPONSE_ID, subagent.responseId); + } + if (subagent.inputTokens > 0) { + chatSpan.setAttribute(ATTR.GEN_AI_USAGE_INPUT_TOKENS, subagent.inputTokens); + } + if (subagent.outputTokens > 0) { + chatSpan.setAttribute(ATTR.GEN_AI_USAGE_OUTPUT_TOKENS, subagent.outputTokens); + } + if (subagent.cacheReadTokens > 0) { + chatSpan.setAttribute( + ATTR.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, + subagent.cacheReadTokens + ); + } + if (subagent.cacheCreationTokens > 0) { + chatSpan.setAttribute( + ATTR.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS, + subagent.cacheCreationTokens + ); + } + + if (this._telemetry.enableSensitiveData) { + if (subagent.inputMessages.length > 0) { + const inputJson = buildMessagesJson(subagent.inputMessages); + if (inputJson) { + chatSpan.setAttribute(ATTR.GEN_AI_INPUT_MESSAGES, inputJson); + } + } + if (subagent.outputMessages.length > 0) { + const outputJson = buildMessagesJson(subagent.outputMessages, finishReason); + if (outputJson) { + chatSpan.setAttribute(ATTR.GEN_AI_OUTPUT_MESSAGES, outputJson); + } + } + } + + this._telemetry.recordTokenUsageMetrics( + subagent.inputTokens > 0 ? subagent.inputTokens : undefined, + subagent.outputTokens > 0 ? subagent.outputTokens : undefined, + subagent.responseModel ?? this._requestModel, + subagent.responseModel, + this.providerName, + this.serverAddress, + this.serverPort, + error, + CHAT + ); + + chatSpan.end(); + } + + // -- Complete the invoke_agent span -- + const invokeSpan = subagent.invokeAgentSpan; + if (error) CopilotTelemetry.recordError(invokeSpan, error); + invokeSpan.setAttribute(ATTR.GEN_AI_RESPONSE_FINISH_REASONS, [finishReason]); + + if (subagent.responseModel) { + invokeSpan.setAttribute(ATTR.GEN_AI_RESPONSE_MODEL, subagent.responseModel); + } + if (subagent.responseId) { + invokeSpan.setAttribute(ATTR.GEN_AI_RESPONSE_ID, subagent.responseId); + } + if (subagent.inputTokens > 0) { + invokeSpan.setAttribute(ATTR.GEN_AI_USAGE_INPUT_TOKENS, subagent.inputTokens); + } + if (subagent.outputTokens > 0) { + invokeSpan.setAttribute(ATTR.GEN_AI_USAGE_OUTPUT_TOKENS, subagent.outputTokens); + } + if (subagent.cacheReadTokens > 0) { + invokeSpan.setAttribute( + ATTR.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, + subagent.cacheReadTokens + ); + } + if (subagent.cacheCreationTokens > 0) { + invokeSpan.setAttribute( + ATTR.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS, + subagent.cacheCreationTokens + ); + } + + if (this._telemetry.enableSensitiveData && subagent.outputMessages.length > 0) { + const outputJson = buildMessagesJson( + subagent.outputMessages.map((m) => ({ ...m, finishReason })) + ); + if (outputJson) { + invokeSpan.setAttribute(ATTR.GEN_AI_OUTPUT_MESSAGES, outputJson); + } + } + + const durationSeconds = (performance.now() - subagent.invokeAgentStartTime) / 1000; + this._telemetry.recordOperationDuration( + durationSeconds, + subagent.responseModel ?? this._requestModel, + subagent.responseModel, + this.providerName, + this.serverAddress, + this.serverPort, + error, + INVOKE_AGENT + ); + + invokeSpan.end(); + } +} diff --git a/nodejs/src/index.ts b/nodejs/src/index.ts index f2655f2fc..78adb6dbe 100644 --- a/nodejs/src/index.ts +++ b/nodejs/src/index.ts @@ -45,6 +45,7 @@ export type { SystemMessageAppendConfig, SystemMessageConfig, SystemMessageReplaceConfig, + TelemetryConfig, Tool, ToolHandler, ToolInvocation, diff --git a/nodejs/src/session.ts b/nodejs/src/session.ts index 04525d2bb..6514f74f0 100644 --- a/nodejs/src/session.ts +++ b/nodejs/src/session.ts @@ -8,17 +8,21 @@ */ import type { MessageConnection } from "vscode-jsonrpc/node"; +import type { Context } from "@opentelemetry/api"; import { createSessionRpc } from "./generated/rpc.js"; +import { CopilotTelemetry, AgentTurnTracker } from "./copilot-telemetry.js"; import type { MessageOptions, PermissionHandler, PermissionRequest, PermissionRequestResult, + ProviderConfig, SessionEvent, SessionEventHandler, SessionEventPayload, SessionEventType, SessionHooks, + SystemMessageConfig, Tool, ToolHandler, TypedSessionEventHandler, @@ -60,24 +64,58 @@ export class CopilotSession { private typedEventHandlers: Map void>> = new Map(); private toolHandlers: Map = new Map(); + private toolDescriptions: Map = new Map(); private permissionHandler?: PermissionHandler; private userInputHandler?: UserInputHandler; private hooks?: SessionHooks; private _rpc: ReturnType | null = null; + // Telemetry state + private readonly _telemetry: CopilotTelemetry | undefined; + private readonly _turnTracker: AgentTurnTracker | undefined; + /** * Creates a new CopilotSession instance. * * @param sessionId - The unique identifier for this session * @param connection - The JSON-RPC message connection to the Copilot CLI * @param workspacePath - Path to the session workspace directory (when infinite sessions enabled) + * @param telemetry - The telemetry instance, or undefined if telemetry is disabled + * @param model - The request model name + * @param provider - The provider configuration + * @param systemMessage - The system message configuration + * @param tools - The tool definitions + * @param streaming - Whether streaming is enabled * @internal This constructor is internal. Use {@link CopilotClient.createSession} to create sessions. */ constructor( public readonly sessionId: string, private connection: MessageConnection, - private readonly _workspacePath?: string - ) {} + private readonly _workspacePath?: string, + telemetry?: CopilotTelemetry, + model?: string, + provider?: ProviderConfig, + systemMessage?: SystemMessageConfig, + tools?: Tool[], + streaming?: boolean, + agentName?: string, + agentDescription?: string + ) { + this._telemetry = telemetry; + this._turnTracker = telemetry + ? new AgentTurnTracker( + telemetry, + sessionId, + model, + provider, + systemMessage, + tools, + streaming, + agentName, + agentDescription + ) + : undefined; + } /** * Typed session-scoped RPC methods. @@ -98,6 +136,27 @@ export class CopilotSession { return this._workspacePath; } + /** @internal Telemetry accessors for client-level tool instrumentation. */ + get telemetry(): CopilotTelemetry | undefined { + return this._telemetry; + } + get telemetrySpanContext(): Context | undefined { + return this._turnTracker?.getSpanContext(); + } + get telemetryProviderName(): string { + return this._turnTracker?.providerName ?? "github"; + } + get telemetryServerAddress(): string | undefined { + return this._turnTracker?.serverAddress; + } + get telemetryServerPort(): number | undefined { + return this._turnTracker?.serverPort; + } + /** @internal Gets the parent context for a tool call span (may be subagent context). */ + getTelemetryToolCallParentContext(toolCallId: string): Context | undefined { + return this._turnTracker?.getToolCallParentContext(toolCallId); + } + /** * Sends a message to this session and waits for the response. * @@ -117,14 +176,24 @@ export class CopilotSession { * ``` */ async send(options: MessageOptions): Promise { - const response = await this.connection.sendRequest("session.send", { - sessionId: this.sessionId, - prompt: options.prompt, - attachments: options.attachments, - mode: options.mode, - }); + // Start telemetry span on first send after idle + this._turnTracker?.beginSend(options.prompt); - return (response as { messageId: string }).messageId; + try { + const response = await this.connection.sendRequest("session.send", { + sessionId: this.sessionId, + prompt: options.prompt, + attachments: options.attachments, + mode: options.mode, + }); + + return (response as { messageId: string }).messageId; + } catch (error) { + this._turnTracker?.completeTurnWithError( + error instanceof Error ? error : new Error(String(error)) + ); + throw error; + } } /** @@ -197,6 +266,12 @@ export class CopilotSession { await Promise.race([idlePromise, timeoutPromise]); return lastAssistantMessage; + } catch (ex) { + // Complete telemetry spans on timeout/cancellation (idempotent if already completed). + if (ex instanceof Error) { + this._turnTracker?.completeTurnWithError(ex); + } + throw ex; } finally { if (timeoutId !== undefined) { clearTimeout(timeoutId); @@ -289,6 +364,9 @@ export class CopilotSession { * @internal This method is for internal use by the SDK. */ _dispatchEvent(event: SessionEvent): void { + // Delegate telemetry enrichment and turn completion to the tracker + this._turnTracker?.processEvent(event); + // Dispatch to typed handlers for this specific event type const typedHandlers = this.typedEventHandlers.get(event.type); if (typedHandlers) { @@ -322,12 +400,16 @@ export class CopilotSession { */ registerTools(tools?: Tool[]): void { this.toolHandlers.clear(); + this.toolDescriptions.clear(); if (!tools) { return; } for (const tool of tools) { this.toolHandlers.set(tool.name, tool.handler); + if (tool.description) { + this.toolDescriptions.set(tool.name, tool.description); + } } } @@ -342,6 +424,17 @@ export class CopilotSession { return this.toolHandlers.get(name); } + /** + * Retrieves a registered tool description by name. + * + * @param name - The name of the tool + * @returns The tool description if available, or undefined + * @internal This method is for internal use by the SDK. + */ + getToolDescription(name: string): string | undefined { + return this.toolDescriptions.get(name); + } + /** * Registers a handler for permission requests. * @@ -521,7 +614,9 @@ export class CopilotSession { this.eventHandlers.clear(); this.typedEventHandlers.clear(); this.toolHandlers.clear(); + this.toolDescriptions.clear(); this.permissionHandler = undefined; + this._turnTracker?.completeOnDispose(); } /** diff --git a/nodejs/src/types.ts b/nodejs/src/types.ts index 3a0ccbce7..a39364ce9 100644 --- a/nodejs/src/types.ts +++ b/nodejs/src/types.ts @@ -10,6 +10,26 @@ import type { SessionEvent as GeneratedSessionEvent } from "./generated/session-events.js"; export type SessionEvent = GeneratedSessionEvent; +/** + * Configuration for OpenTelemetry instrumentation. + * When provided to CopilotClientOptions, enables telemetry emission + * following the Semantic Conventions for Generative AI systems. + */ +export interface TelemetryConfig { + /** + * Whether to include potentially sensitive data (message content, tool arguments/results) in telemetry. + * When not set, falls back to the OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT environment variable. + * @default false + */ + enableSensitiveData?: boolean; + + /** + * Source name for the tracer and meter. + * @default "github.copilot.sdk" + */ + sourceName?: string; +} + /** * Options for creating a CopilotClient */ @@ -89,6 +109,13 @@ export interface CopilotClientOptions { * @default true (but defaults to false when githubToken is provided) */ useLoggedInUser?: boolean; + + /** + * OpenTelemetry instrumentation configuration. + * When provided, enables telemetry emission following GenAI semantic conventions. + * When undefined, no telemetry is emitted (opt-in pattern). + */ + telemetry?: TelemetryConfig; } /** @@ -731,6 +758,18 @@ export interface SessionConfig { * Set to `{ enabled: false }` to disable. */ infiniteSessions?: InfiniteSessionConfig; + + /** + * Name of the agent for telemetry attribution. + * When set, the invoke_agent span includes a `gen_ai.agent.name` attribute. + */ + agentName?: string; + + /** + * Description of the agent for telemetry attribution. + * When set, the invoke_agent span includes a `gen_ai.agent.description` attribute. + */ + agentDescription?: string; } /** @@ -757,6 +796,8 @@ export type ResumeSessionConfig = Pick< | "skillDirectories" | "disabledSkills" | "infiniteSessions" + | "agentName" + | "agentDescription" > & { /** * When true, skips emitting the session.resume event. diff --git a/nodejs/test/client.test.ts b/nodejs/test/client.test.ts index 6fa33e9ec..079a6ca57 100644 --- a/nodejs/test/client.test.ts +++ b/nodejs/test/client.test.ts @@ -68,7 +68,9 @@ describe("CopilotClient", () => { onTestFinished(() => client.forceStop()); const session = await client.createSession({ onPermissionRequest: approveAll }); - const spy = vi.spyOn((client as any).connection!, "sendRequest"); + const spy = vi + .spyOn((client as any).connection!, "sendRequest") + .mockResolvedValueOnce({ sessionId: session.sessionId }); await client.resumeSession(session.sessionId, { clientName: "my-app", onPermissionRequest: approveAll, diff --git a/nodejs/test/opentelemetry.test.ts b/nodejs/test/opentelemetry.test.ts new file mode 100644 index 000000000..18d41e369 --- /dev/null +++ b/nodejs/test/opentelemetry.test.ts @@ -0,0 +1,1555 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ +import { describe, expect, it, beforeEach, afterEach } from "vitest"; +import { trace, metrics, SpanKind, SpanStatusCode } from "@opentelemetry/api"; +import { + InMemorySpanExporter, + SimpleSpanProcessor, + BasicTracerProvider, +} from "@opentelemetry/sdk-trace-base"; +import { + MeterProvider, + InMemoryMetricExporter, + PeriodicExportingMetricReader, +} from "@opentelemetry/sdk-metrics"; +import { CopilotTelemetry } from "../src/copilot-telemetry.js"; +import { CopilotSession } from "../src/session.js"; +import type { SessionEvent } from "../src/types.js"; + +// ============================================================================ +// Test infrastructure +// ============================================================================ + +let spanExporter: InMemorySpanExporter; +let tracerProvider: BasicTracerProvider; +let metricExporter: InMemoryMetricExporter; +let metricReader: PeriodicExportingMetricReader; +let meterProvider: MeterProvider; + +function setupOtel(_sourceName = "github.copilot.sdk") { + spanExporter = new InMemorySpanExporter(); + tracerProvider = new BasicTracerProvider({ + spanProcessors: [new SimpleSpanProcessor(spanExporter)], + }); + trace.setGlobalTracerProvider(tracerProvider); + + metricExporter = new InMemoryMetricExporter(); + metricReader = new PeriodicExportingMetricReader({ + exporter: metricExporter, + exportIntervalMillis: 60_000, + }); + meterProvider = new MeterProvider({ readers: [metricReader] }); + metrics.setGlobalMeterProvider(meterProvider); +} + +async function teardownOtel() { + if (metricReader) await metricReader.shutdown(); + if (tracerProvider) await tracerProvider.shutdown(); + if (meterProvider) await meterProvider.shutdown(); + trace.disable(); + metrics.disable(); +} + +function getSpans() { + return spanExporter.getFinishedSpans(); +} + +async function getMetrics() { + await metricReader.forceFlush(); + const collected = metricExporter.getMetrics(); + if (collected.length === 0) return []; + // Get the most recent collection + const latest = collected[collected.length - 1]; + return latest.scopeMetrics; +} + +function findMetric(scopeMetrics: any[], name: string) { + for (const scope of scopeMetrics) { + for (const metric of scope.metrics) { + if (metric.descriptor.name === name) { + return metric; + } + } + } + return undefined; +} + +/** Create a mock MessageConnection for CopilotSession */ +function createMockConnection(): any { + return { + sendRequest: async () => ({ messageId: "msg-1" }), + onNotification: () => {}, + onRequest: () => {}, + listen: () => {}, + dispose: () => {}, + }; +} + +/** Create a session with telemetry for testing */ +function createTestSession( + telemetry: CopilotTelemetry, + opts: { + model?: string; + providerType?: string; + baseUrl?: string; + streaming?: boolean; + systemContent?: string; + tools?: any[]; + } = {} +): CopilotSession { + const conn = createMockConnection(); + const session = new CopilotSession( + "test-session-id", + conn, + undefined, + telemetry, + opts.model ?? "gpt-4o", + opts.providerType || opts.baseUrl + ? { + type: opts.providerType as any, + baseUrl: opts.baseUrl ?? "https://api.openai.com/v1", + } + : undefined, + opts.systemContent ? { mode: "replace", content: opts.systemContent } : undefined, + opts.tools, + opts.streaming ?? false + ); + return session; +} + +/** Dispatch an event to the session */ +function dispatchEvent(session: CopilotSession, event: SessionEvent) { + session._dispatchEvent(event); +} + +function makeEvent(type: string, data: Record = {}): SessionEvent { + return { + id: "evt-1", + timestamp: new Date().toISOString(), + parentId: null, + type, + data, + } as unknown as SessionEvent; +} + +// ============================================================================ +// Tests +// ============================================================================ + +describe("CopilotTelemetry", () => { + beforeEach(() => { + setupOtel(); + }); + + afterEach(async () => { + await teardownOtel(); + }); + + // ======================================================================== + // Provider name normalization + // ======================================================================== + + describe("normalizeProviderName", () => { + it("maps openai to openai", () => { + expect(CopilotTelemetry.normalizeProviderName("openai")).toBe("openai"); + }); + + it("maps azure to azure.ai.openai", () => { + expect(CopilotTelemetry.normalizeProviderName("azure")).toBe("azure.ai.openai"); + }); + + it("maps anthropic to anthropic", () => { + expect(CopilotTelemetry.normalizeProviderName("anthropic")).toBe("anthropic"); + }); + + it("maps unknown to github", () => { + expect(CopilotTelemetry.normalizeProviderName("something")).toBe("github"); + }); + + it("maps undefined to github", () => { + expect(CopilotTelemetry.normalizeProviderName(undefined)).toBe("github"); + }); + + it("maps empty string to github", () => { + expect(CopilotTelemetry.normalizeProviderName("")).toBe("github"); + }); + + it("handles case-insensitive matching", () => { + expect(CopilotTelemetry.normalizeProviderName("OpenAI")).toBe("openai"); + expect(CopilotTelemetry.normalizeProviderName("AZURE")).toBe("azure.ai.openai"); + }); + }); + + // ======================================================================== + // Server address parsing + // ======================================================================== + + describe("parseServerAddress", () => { + it("parses standard http URL", () => { + const result = CopilotTelemetry.parseServerAddress("http://api.example.com:8080/v1"); + expect(result.address).toBe("api.example.com"); + expect(result.port).toBe(8080); + }); + + it("parses https URL without explicit port", () => { + const result = CopilotTelemetry.parseServerAddress("https://api.openai.com/v1"); + expect(result.address).toBe("api.openai.com"); + expect(result.port).toBe(443); + }); + + it("parses http URL without explicit port", () => { + const result = CopilotTelemetry.parseServerAddress("http://example.com/path"); + expect(result.address).toBe("example.com"); + expect(result.port).toBe(80); + }); + + it("returns undefined for empty/undefined input", () => { + expect(CopilotTelemetry.parseServerAddress(undefined)).toEqual({ + address: undefined, + port: undefined, + }); + expect(CopilotTelemetry.parseServerAddress("")).toEqual({ + address: undefined, + port: undefined, + }); + }); + + it("returns undefined for invalid URL", () => { + expect(CopilotTelemetry.parseServerAddress("not-a-url")).toEqual({ + address: undefined, + port: undefined, + }); + }); + }); + + // ======================================================================== + // invoke_agent span creation + // ======================================================================== + + describe("invoke_agent span", () => { + it("creates span with correct attributes on send", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry, { + model: "gpt-4o", + baseUrl: "https://api.openai.com:8080/v1", + }); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + expect(spans.length).toBe(1); + + const span = spans[0]; + expect(span.kind).toBe(SpanKind.CLIENT); + expect(span.attributes["gen_ai.operation.name"]).toBe("invoke_agent"); + expect(span.attributes["gen_ai.provider.name"]).toBe("github"); + expect(span.attributes["gen_ai.agent.id"]).toBe("test-session-id"); + expect(span.attributes["gen_ai.conversation.id"]).toBe("test-session-id"); + expect(span.attributes["gen_ai.request.model"]).toBe("gpt-4o"); + expect(span.attributes["server.address"]).toBe("api.openai.com"); + expect(span.attributes["server.port"]).toBe(8080); + }); + + it("subsequent sends within same turn do not create new span", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "First" }); + await session.send({ prompt: "Second" }); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + expect(spans.length).toBe(1); + }); + + it("new span after idle", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + // First turn + await session.send({ prompt: "Turn 1" }); + dispatchEvent(session, makeEvent("session.idle")); + + // Second turn + await session.send({ prompt: "Turn 2" }); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + expect(spans.length).toBe(2); + }); + }); + + // ======================================================================== + // execute_tool span creation + // ======================================================================== + + describe("execute_tool span", () => { + it("creates span with correct attributes", () => { + const telemetry = new CopilotTelemetry({}); + const span = telemetry.startExecuteToolSpan( + "get_weather", + "call-123", + "Gets weather info", + { location: "Seattle" } + ); + + expect(span).toBeDefined(); + + span.end(); + + const spans = getSpans(); + expect(spans.length).toBe(1); + expect(spans[0].kind).toBe(SpanKind.INTERNAL); + expect(spans[0].attributes["gen_ai.operation.name"]).toBe("execute_tool"); + expect(spans[0].attributes["gen_ai.tool.name"]).toBe("get_weather"); + expect(spans[0].attributes["gen_ai.tool.call.id"]).toBe("call-123"); + expect(spans[0].attributes["gen_ai.tool.type"]).toBe("function"); + expect(spans[0].attributes["gen_ai.tool.description"]).toBe("Gets weather info"); + }); + + it("does not include arguments when sensitive data is disabled", () => { + const telemetry = new CopilotTelemetry({ enableSensitiveData: false }); + const span = telemetry.startExecuteToolSpan("get_weather", "call-123", undefined, { + location: "Seattle", + }); + span.end(); + + const spans = getSpans(); + expect(spans[0].attributes["gen_ai.tool.call.arguments"]).toBeUndefined(); + }); + + it("includes arguments when sensitive data is enabled", () => { + const telemetry = new CopilotTelemetry({ enableSensitiveData: true }); + const span = telemetry.startExecuteToolSpan("get_weather", "call-123", undefined, { + location: "Seattle", + }); + span.end(); + + const spans = getSpans(); + expect(spans[0].attributes["gen_ai.tool.call.arguments"]).toBe( + JSON.stringify({ location: "Seattle" }) + ); + }); + }); + + // ======================================================================== + // Metric recording + // ======================================================================== + + describe("metric recording", () => { + it("records operation duration metric on session.idle", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry, { model: "gpt-4o" }); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("session.idle")); + + const scopeMetrics = await getMetrics(); + const durationMetric = findMetric(scopeMetrics, "gen_ai.client.operation.duration"); + expect(durationMetric).toBeDefined(); + expect(durationMetric.dataPoints.length).toBeGreaterThan(0); + }); + + it("records token usage metrics from assistant.usage event", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry, { model: "gpt-4o" }); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("assistant.usage", { + model: "gpt-4o-2024-05-13", + inputTokens: 100, + outputTokens: 50, + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + // Chat span has per-turn usage + const chatSpan = spans.find((s) => s.name.startsWith("chat")); + expect(chatSpan).toBeDefined(); + expect(chatSpan!.attributes["gen_ai.usage.input_tokens"]).toBe(100); + expect(chatSpan!.attributes["gen_ai.usage.output_tokens"]).toBe(50); + expect(chatSpan!.attributes["gen_ai.response.model"]).toBe("gpt-4o-2024-05-13"); + // invoke_agent span has accumulated totals + const agentSpan = spans.find((s) => s.name.startsWith("invoke_agent")); + expect(agentSpan).toBeDefined(); + expect(agentSpan!.attributes["gen_ai.usage.input_tokens"]).toBe(100); + expect(agentSpan!.attributes["gen_ai.usage.output_tokens"]).toBe(50); + + const scopeMetrics = await getMetrics(); + const tokenMetric = findMetric(scopeMetrics, "gen_ai.client.token.usage"); + expect(tokenMetric).toBeDefined(); + expect(tokenMetric.dataPoints.length).toBe(2); // input + output + }); + + it("records streaming chunk metrics", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry, { + model: "gpt-4o", + streaming: true, + }); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + + // Simulate streaming chunks + dispatchEvent( + session, + makeEvent("assistant.message_delta", { + messageId: "msg-1", + deltaContent: "First ", + }) + ); + dispatchEvent( + session, + makeEvent("assistant.message_delta", { + messageId: "msg-1", + deltaContent: "second ", + }) + ); + dispatchEvent( + session, + makeEvent("assistant.message", { + messageId: "msg-1", + content: "First second chunk", + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const scopeMetrics = await getMetrics(); + const ttfcMetric = findMetric( + scopeMetrics, + "gen_ai.client.operation.time_to_first_chunk" + ); + const tpocMetric = findMetric( + scopeMetrics, + "gen_ai.client.operation.time_per_output_chunk" + ); + expect(ttfcMetric).toBeDefined(); + expect(ttfcMetric.dataPoints.length).toBe(1); // one time-to-first-chunk + // Subsequent chunks record time-per-output-chunk + expect(tpocMetric).toBeDefined(); + expect(tpocMetric.dataPoints.length).toBeGreaterThanOrEqual(1); + }); + }); + + // ======================================================================== + // Sensitive data gating + // ======================================================================== + + describe("sensitive data gating", () => { + it("includes tool call result when sensitive enabled", () => { + const telemetry = new CopilotTelemetry({ enableSensitiveData: true }); + const span = telemetry.startExecuteToolSpan("tool", "call-1", undefined, {}); + telemetry.setExecuteToolResult(span, { result: "data" }); + span.end(); + + const spans = getSpans(); + expect(spans[0].attributes["gen_ai.tool.call.result"]).toBe( + JSON.stringify({ result: "data" }) + ); + }); + + it("excludes tool call result when sensitive disabled", () => { + const telemetry = new CopilotTelemetry({ enableSensitiveData: false }); + const span = telemetry.startExecuteToolSpan("tool", "call-1", undefined, {}); + telemetry.setExecuteToolResult(span, { result: "data" }); + span.end(); + + const spans = getSpans(); + expect(spans[0].attributes["gen_ai.tool.call.result"]).toBeUndefined(); + }); + + it("includes input/output messages when sensitive enabled", async () => { + const telemetry = new CopilotTelemetry({ enableSensitiveData: true }); + const session = createTestSession(telemetry, { + systemContent: "You are a helpful assistant.", + tools: [ + { + name: "myTool", + description: "A tool", + parameters: { type: "object" }, + handler: () => {}, + }, + ], + }); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent(session, makeEvent("assistant.message", { content: "Hi there!" })); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const invokeSpan = spans.find((s) => s.name.startsWith("invoke_agent")); + expect(invokeSpan).toBeDefined(); + + // Input messages on invoke_agent span (agent-level = user prompts) + const agentInputMessages = invokeSpan!.attributes["gen_ai.input.messages"] as string; + expect(agentInputMessages).toBeDefined(); + const parsedAgentInput = JSON.parse(agentInputMessages); + expect(parsedAgentInput[0].role).toBe("user"); + expect(parsedAgentInput[0].parts[0].content).toBe("Hello"); + + // Chat span has per-turn input/output messages + const chatSpan = spans.find((s) => s.name.startsWith("chat")); + expect(chatSpan).toBeDefined(); + + const chatInputMessages = chatSpan!.attributes["gen_ai.input.messages"] as string; + expect(chatInputMessages).toBeDefined(); + const parsedInput = JSON.parse(chatInputMessages); + expect(parsedInput[0].role).toBe("user"); + expect(parsedInput[0].parts[0].content).toBe("Hello"); + + // Output messages on chat span + const outputMessages = chatSpan!.attributes["gen_ai.output.messages"] as string; + expect(outputMessages).toBeDefined(); + const parsedOutput = JSON.parse(outputMessages); + expect(parsedOutput[0].role).toBe("assistant"); + expect(parsedOutput[0].parts[0].content).toBe("Hi there!"); + expect(parsedOutput[0].finish_reason).toBe("stop"); + + // System instructions on invoke_agent span + const sysInstructions = invokeSpan!.attributes["gen_ai.system_instructions"] as string; + expect(sysInstructions).toBeDefined(); + const parsedSys = JSON.parse(sysInstructions); + expect(parsedSys[0].content).toBe("You are a helpful assistant."); + + // Tool definitions (always included, not sensitive-gated) + const toolDefs = invokeSpan!.attributes["gen_ai.tool.definitions"] as string; + expect(toolDefs).toBeDefined(); + const parsedTools = JSON.parse(toolDefs); + expect(parsedTools[0].name).toBe("myTool"); + }); + + it("excludes input/output messages when sensitive disabled", async () => { + const telemetry = new CopilotTelemetry({ enableSensitiveData: false }); + const session = createTestSession(telemetry, { + systemContent: "You are a helpful assistant.", + tools: [ + { + name: "myTool", + description: "A tool", + parameters: { type: "object" }, + handler: () => {}, + }, + ], + }); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent(session, makeEvent("assistant.message", { content: "Hi there!" })); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const invokeSpan = spans.find((s) => s.name.startsWith("invoke_agent")); + expect(invokeSpan).toBeDefined(); + + // Sensitive attributes should be excluded + expect(invokeSpan!.attributes["gen_ai.input.messages"]).toBeUndefined(); + expect(invokeSpan!.attributes["gen_ai.output.messages"]).toBeUndefined(); + expect(invokeSpan!.attributes["gen_ai.system_instructions"]).toBeUndefined(); + + // Tool definitions are NOT sensitive-gated, should still be present + const toolDefs = invokeSpan!.attributes["gen_ai.tool.definitions"] as string; + expect(toolDefs).toBeDefined(); + const parsedTools = JSON.parse(toolDefs); + expect(parsedTools[0].name).toBe("myTool"); + }); + }); + + // ======================================================================== + // Opt-in behavior + // ======================================================================== + + describe("opt-in behavior", () => { + it("no telemetry when config not provided", async () => { + // Create session without telemetry + const conn = createMockConnection(); + const session = new CopilotSession("test-id", conn); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + expect(spans.length).toBe(0); + }); + }); + + // ======================================================================== + // Custom source name + // ======================================================================== + + describe("custom source name", () => { + it("uses custom source name for tracer and meter", async () => { + const telemetry = new CopilotTelemetry({ + sourceName: "my.custom.source", + }); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + expect(spans.length).toBe(1); + expect((spans[0] as any).instrumentationScope.name).toBe("my.custom.source"); + + const scopeMetrics = await getMetrics(); + // Check that metrics were recorded under custom source name + const customScope = scopeMetrics.find((sm) => sm.scope.name === "my.custom.source"); + expect(customScope).toBeDefined(); + }); + }); + + // ======================================================================== + // Error handling + // ======================================================================== + + describe("error handling", () => { + it("records error on session.error event", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent( + session, + makeEvent("session.error", { + errorType: "internal", + message: "Something went wrong", + }) + ); + + const spans = getSpans(); + expect(spans.length).toBe(1); + expect(spans[0].status.code).toBe(SpanStatusCode.ERROR); + expect(spans[0].attributes["error.type"]).toBe("SessionTurnError"); + }); + + it("sets response.id from apiCallId", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("assistant.usage", { + model: "gpt-4o", + inputTokens: 10, + outputTokens: 5, + apiCallId: "api-call-123", + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const chatSpan = spans.find((s) => s.name.startsWith("chat")); + expect(chatSpan).toBeDefined(); + expect(chatSpan!.attributes["gen_ai.response.id"]).toBe("api-call-123"); + }); + + it("sets response.id from providerCallId when apiCallId is missing", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("assistant.usage", { + model: "gpt-4o", + inputTokens: 10, + outputTokens: 5, + providerCallId: "provider-call-456", + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const chatSpan = spans.find((s) => s.name.startsWith("chat")); + expect(chatSpan).toBeDefined(); + expect(chatSpan!.attributes["gen_ai.response.id"]).toBe("provider-call-456"); + }); + }); + + // ======================================================================== + // Model change event + // ======================================================================== + + describe("session.model_change event", () => { + it("updates response model from model_change event", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry, { model: "gpt-4o" }); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("session.model_change", { + previousModel: "gpt-4o", + newModel: "gpt-4o-mini", + }) + ); + dispatchEvent( + session, + makeEvent("assistant.usage", { + model: "gpt-4o-mini", + inputTokens: 10, + outputTokens: 5, + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const chatSpan = spans.find((s) => s.name.startsWith("chat")); + expect(chatSpan).toBeDefined(); + expect(chatSpan!.attributes["gen_ai.response.model"]).toBe("gpt-4o-mini"); + }); + }); + + // ======================================================================== + // Cache token attributes + // ======================================================================== + + describe("cache token attributes", () => { + it("records cache read and write tokens", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("assistant.usage", { + model: "gpt-4o", + inputTokens: 100, + outputTokens: 50, + cacheReadTokens: 80, + cacheWriteTokens: 20, + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const chatSpan = spans.find((s) => s.name.startsWith("chat")); + expect(chatSpan).toBeDefined(); + expect(chatSpan!.attributes["gen_ai.usage.cache_read.input_tokens"]).toBe(80); + expect(chatSpan!.attributes["gen_ai.usage.cache_creation.input_tokens"]).toBe(20); + }); + }); + + // ======================================================================== + // Static recordError + // ======================================================================== + + describe("static recordError", () => { + it("sets error attributes and status on span", () => { + const telemetry = new CopilotTelemetry({}); + const span = telemetry.startInvokeAgentSpan( + "sid", + "model", + "github", + undefined, + undefined + ); + CopilotTelemetry.recordError(span, new TypeError("test error")); + span.end(); + + const spans = getSpans(); + expect(spans[0].attributes["error.type"]).toBe("TypeError"); + expect(spans[0].status.code).toBe(SpanStatusCode.ERROR); + expect(spans[0].status.message).toBe("test error"); + }); + }); + + // ======================================================================== + // Finish reasons + // ======================================================================== + + describe("finish reasons", () => { + it("sets gen_ai.response.finish_reasons to ['stop'] on successful turn completion", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + expect(spans.length).toBe(1); + expect(spans[0].attributes["gen_ai.response.finish_reasons"]).toEqual(["stop"]); + }); + }); + + // ======================================================================== + // Agent metadata + // ======================================================================== + + describe("agent metadata", () => { + it("includes gen_ai.agent.name and gen_ai.agent.description on invoke_agent span", async () => { + const telemetry = new CopilotTelemetry({}); + const conn = createMockConnection(); + const session = new CopilotSession( + "test-agent-session", + conn, + undefined, // workspacePath + telemetry, // telemetry + "gpt-4o", // model + undefined, // provider + undefined, // systemMessage + undefined, // tools + false, // streaming + "my-agent", // agentName + "A helpful agent" // agentDescription + ); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + expect(spans.length).toBe(1); + expect(spans[0].attributes["gen_ai.agent.name"]).toBe("my-agent"); + expect(spans[0].attributes["gen_ai.agent.description"]).toBe("A helpful agent"); + }); + }); + + // ======================================================================== + // Environment variable override + // ======================================================================== + + describe("env var override", () => { + it("enables sensitive data when OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true", () => { + const originalEnv = process.env["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"]; + try { + process.env["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = "true"; + const telemetry = new CopilotTelemetry({}); + expect(telemetry.enableSensitiveData).toBe(true); + } finally { + if (originalEnv !== undefined) { + process.env["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = originalEnv; + } else { + delete process.env["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"]; + } + } + }); + }); + + // ======================================================================== + // Two-level span hierarchy (invoke_agent → chat) + // ======================================================================== + + describe("two-level span hierarchy", () => { + it("assistant.turn_start creates a chat child span under invoke_agent", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const invokeSpan = spans.find((s) => s.name.startsWith("invoke_agent")); + const chatSpan = spans.find((s) => s.name.startsWith("chat")); + expect(invokeSpan).toBeDefined(); + expect(chatSpan).toBeDefined(); + expect(chatSpan!.attributes["gen_ai.operation.name"]).toBe("chat"); + expect(chatSpan!.attributes["gen_ai.conversation.id"]).toBe("test-session-id"); + // Chat span is in the same trace as invoke_agent + expect(chatSpan!.spanContext().traceId).toBe(invokeSpan!.spanContext().traceId); + }); + + it("multi-turn: two turn_start/turn_end cycles produce 1 invoke_agent + 2 chat spans", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + + // Turn 1 + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("assistant.usage", { + model: "gpt-4o", + inputTokens: 50, + outputTokens: 20, + }) + ); + dispatchEvent(session, makeEvent("assistant.turn_end", { turnId: "turn-1" })); + + // Turn 2 + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-2" })); + dispatchEvent( + session, + makeEvent("assistant.usage", { + model: "gpt-4o", + inputTokens: 60, + outputTokens: 30, + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const invokeSpans = spans.filter((s) => s.name.startsWith("invoke_agent")); + const chatSpans = spans.filter((s) => s.name.startsWith("chat")); + expect(invokeSpans.length).toBe(1); + expect(chatSpans.length).toBe(2); + }); + + it("per-turn usage goes on chat span, accumulated usage goes on invoke_agent", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + + // Turn 1 + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("assistant.usage", { + model: "gpt-4o", + inputTokens: 50, + outputTokens: 20, + }) + ); + dispatchEvent(session, makeEvent("assistant.turn_end", { turnId: "turn-1" })); + + // Turn 2 + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-2" })); + dispatchEvent( + session, + makeEvent("assistant.usage", { + model: "gpt-4o", + inputTokens: 60, + outputTokens: 30, + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const chatSpans = spans + .filter((s) => s.name.startsWith("chat")) + .sort((a, b) => a.startTime[1] - b.startTime[1]); + const invokeSpan = spans.find((s) => s.name.startsWith("invoke_agent"))!; + + // Per-turn usage on chat spans + expect(chatSpans[0].attributes["gen_ai.usage.input_tokens"]).toBe(50); + expect(chatSpans[0].attributes["gen_ai.usage.output_tokens"]).toBe(20); + expect(chatSpans[1].attributes["gen_ai.usage.input_tokens"]).toBe(60); + expect(chatSpans[1].attributes["gen_ai.usage.output_tokens"]).toBe(30); + + // Accumulated totals on invoke_agent + expect(invokeSpan.attributes["gen_ai.usage.input_tokens"]).toBe(110); + expect(invokeSpan.attributes["gen_ai.usage.output_tokens"]).toBe(50); + }); + }); + + // ======================================================================== + // Rich message content (parts-based) + // ======================================================================== + + describe("rich message content", () => { + it("output messages include reasoning parts when reasoningText is present", async () => { + const telemetry = new CopilotTelemetry({ enableSensitiveData: true }); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Think step by step" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("assistant.message", { + messageId: "msg-1", + content: "The answer is 42.", + reasoningText: "Let me think about this...", + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const chatSpan = spans.find((s) => s.name.startsWith("chat"))!; + const outputMessages = JSON.parse( + chatSpan.attributes["gen_ai.output.messages"] as string + ); + expect(outputMessages[0].role).toBe("assistant"); + // Should have reasoning part first, then text part + const parts = outputMessages[0].parts; + expect(parts[0].type).toBe("reasoning"); + expect(parts[0].content).toBe("Let me think about this..."); + expect(parts[1].type).toBe("text"); + expect(parts[1].content).toBe("The answer is 42."); + }); + + it("tool_call and tool_call_response parts appear for tool execution events", async () => { + const telemetry = new CopilotTelemetry({ enableSensitiveData: true }); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Get weather" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + + // Tool execution start -> output message with tool_call part + dispatchEvent( + session, + makeEvent("tool.execution_start", { + toolCallId: "call-1", + toolName: "get_weather", + arguments: { city: "Seattle" }, + }) + ); + + // Tool execution complete -> input message with tool_call_response part + dispatchEvent( + session, + makeEvent("tool.execution_complete", { + toolCallId: "call-1", + success: true, + result: { content: "Sunny, 72°F" }, + }) + ); + + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const chatSpan = spans.find((s) => s.name.startsWith("chat"))!; + + // Output messages should include tool_call + const outputMessages = JSON.parse( + chatSpan.attributes["gen_ai.output.messages"] as string + ); + const toolCallMsg = outputMessages.find((m: any) => m.parts[0]?.type === "tool_call"); + expect(toolCallMsg).toBeDefined(); + expect(toolCallMsg.parts[0].name).toBe("get_weather"); + expect(toolCallMsg.parts[0].id).toBe("call-1"); + + // Input messages should include tool_call_response + const inputMessages = JSON.parse( + chatSpan.attributes["gen_ai.input.messages"] as string + ); + const toolResponseMsg = inputMessages.find((m: any) => m.role === "tool"); + expect(toolResponseMsg).toBeDefined(); + expect(toolResponseMsg.parts[0].type).toBe("tool_call_response"); + expect(toolResponseMsg.parts[0].response).toBe("Sunny, 72°F"); + }); + + it("user prompt appears as input message with text type", async () => { + const telemetry = new CopilotTelemetry({ enableSensitiveData: true }); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello world" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const chatSpan = spans.find((s) => s.name.startsWith("chat"))!; + const inputMessages = JSON.parse( + chatSpan.attributes["gen_ai.input.messages"] as string + ); + expect(inputMessages[0].role).toBe("user"); + expect(inputMessages[0].parts[0].type).toBe("text"); + expect(inputMessages[0].parts[0].content).toBe("Hello world"); + }); + }); + + // ======================================================================== + // Subagent spans + // ======================================================================== + + describe("subagent spans", () => { + it("subagent.started creates a nested invoke_agent → chat hierarchy", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("subagent.started", { + toolCallId: "sub-call-1", + agentName: "code-reviewer", + agentDisplayName: "Code Reviewer", + agentDescription: "Reviews code changes", + }) + ); + dispatchEvent( + session, + makeEvent("subagent.completed", { + toolCallId: "sub-call-1", + agentName: "code-reviewer", + agentDisplayName: "Code Reviewer", + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + // Main invoke_agent + main chat + subagent invoke_agent + subagent chat + const invokeSpans = spans.filter( + (s) => s.attributes["gen_ai.operation.name"] === "invoke_agent" + ); + const chatSpans = spans.filter((s) => s.attributes["gen_ai.operation.name"] === "chat"); + expect(invokeSpans.length).toBe(2); // main + subagent + expect(chatSpans.length).toBe(2); // main + subagent + }); + + it("subagent spans are parented under the main invoke_agent span", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("subagent.started", { + toolCallId: "sub-call-1", + agentName: "helper", + agentDisplayName: "Helper", + agentDescription: "Helps with tasks", + }) + ); + dispatchEvent( + session, + makeEvent("subagent.completed", { + toolCallId: "sub-call-1", + agentName: "helper", + agentDisplayName: "Helper", + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const mainInvokeSpan = spans.find( + (s) => + s.attributes["gen_ai.operation.name"] === "invoke_agent" && + !s.attributes["gen_ai.agent.name"] + )!; + const subInvokeSpan = spans.find( + (s) => + s.attributes["gen_ai.operation.name"] === "invoke_agent" && + s.attributes["gen_ai.agent.name"] === "helper" + )!; + + // Subagent invoke_agent is in the same trace as main invoke_agent + expect(subInvokeSpan.spanContext().traceId).toBe(mainInvokeSpan.spanContext().traceId); + + // There should be 2 chat spans (main + subagent) in the same trace + const chatSpans = spans.filter((s) => s.attributes["gen_ai.operation.name"] === "chat"); + expect(chatSpans.length).toBe(2); + }); + + it("subagent attributes include agent name and description", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("subagent.started", { + toolCallId: "sub-call-1", + agentName: "code-reviewer", + agentDisplayName: "Code Reviewer", + agentDescription: "Reviews code changes", + }) + ); + dispatchEvent( + session, + makeEvent("subagent.completed", { + toolCallId: "sub-call-1", + agentName: "code-reviewer", + agentDisplayName: "Code Reviewer", + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const subInvokeSpan = spans.find( + (s) => + s.attributes["gen_ai.operation.name"] === "invoke_agent" && + s.attributes["gen_ai.agent.name"] === "code-reviewer" + )!; + expect(subInvokeSpan).toBeDefined(); + expect(subInvokeSpan.attributes["gen_ai.agent.name"]).toBe("code-reviewer"); + expect(subInvokeSpan.attributes["gen_ai.agent.description"]).toBe( + "Reviews code changes" + ); + }); + + it("subagent.completed completes the subagent spans", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("subagent.started", { + toolCallId: "sub-call-1", + agentName: "helper", + agentDisplayName: "Helper", + agentDescription: "Helps", + }) + ); + + // Before completion, subagent spans should NOT be finished yet + const spansBefore = getSpans(); + const subSpansBefore = spansBefore.filter( + (s) => s.attributes["gen_ai.agent.name"] === "helper" + ); + expect(subSpansBefore.length).toBe(0); // not yet finished + + dispatchEvent( + session, + makeEvent("subagent.completed", { + toolCallId: "sub-call-1", + agentName: "helper", + agentDisplayName: "Helper", + }) + ); + + // After completion, subagent spans should be finished + const spansAfter = getSpans(); + const subSpansAfter = spansAfter.filter( + (s) => s.attributes["gen_ai.agent.name"] === "helper" + ); + expect(subSpansAfter.length).toBe(1); // invoke_agent span for subagent + + dispatchEvent(session, makeEvent("session.idle")); + }); + }); + + // ======================================================================== + // MCP server tool tracking + // ======================================================================== + + describe("MCP server tool tracking", () => { + it("tool execution with mcpServerName produces server_tool_call / server_tool_call_response parts", async () => { + const telemetry = new CopilotTelemetry({ enableSensitiveData: true }); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Use MCP tool" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + + dispatchEvent( + session, + makeEvent("tool.execution_start", { + toolCallId: "mcp-call-1", + toolName: "query_database", + arguments: { sql: "SELECT 1" }, + mcpServerName: "postgres-server", + }) + ); + + dispatchEvent( + session, + makeEvent("tool.execution_complete", { + toolCallId: "mcp-call-1", + success: true, + result: { content: "1 row returned" }, + }) + ); + + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const chatSpan = spans.find((s) => s.name.startsWith("chat"))!; + const outputMessages = JSON.parse( + chatSpan.attributes["gen_ai.output.messages"] as string + ); + const inputMessages = JSON.parse( + chatSpan.attributes["gen_ai.input.messages"] as string + ); + + // Output should have server_tool_call type + const serverToolCallMsg = outputMessages.find( + (m: any) => m.parts[0]?.type === "server_tool_call" + ); + expect(serverToolCallMsg).toBeDefined(); + expect(serverToolCallMsg.parts[0].server_tool_call.type).toBe("mcp"); + expect(serverToolCallMsg.parts[0].server_tool_call.server_name).toBe("postgres-server"); + expect(serverToolCallMsg.parts[0].server_tool_call.arguments).toEqual({ + sql: "SELECT 1", + }); + + // Input should have server_tool_call_response type + const serverResponseMsg = inputMessages.find( + (m: any) => m.parts[0]?.type === "server_tool_call_response" + ); + expect(serverResponseMsg).toBeDefined(); + expect(serverResponseMsg.parts[0].server_tool_call_response.type).toBe("mcp"); + expect(serverResponseMsg.parts[0].server_tool_call_response.server_name).toBe( + "postgres-server" + ); + }); + }); + + // ======================================================================== + // Vendor-prefixed attributes + // ======================================================================== + + describe("vendor-prefixed attributes", () => { + it("chat spans get copilot vendor attributes from usage/turn_start events", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent( + session, + makeEvent("assistant.turn_start", { + turnId: "turn-123", + interactionId: "interaction-456", + }) + ); + dispatchEvent( + session, + makeEvent("assistant.usage", { + model: "gpt-4o", + inputTokens: 100, + outputTokens: 50, + cost: 0.005, + duration: 1200, + initiator: "user", + copilotUsage: { + tokenDetails: { + batchSize: 1, + costPerBatch: 1, + tokenCount: 100, + tokenType: "input", + }, + totalNanoAiu: 42000, + }, + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const chatSpan = spans.find((s) => s.name.startsWith("chat"))!; + expect(chatSpan.attributes["github.copilot.cost"]).toBe(0.005); + expect(chatSpan.attributes["github.copilot.server_duration"]).toBe(1200); + expect(chatSpan.attributes["github.copilot.initiator"]).toBe("user"); + expect(chatSpan.attributes["github.copilot.aiu"]).toBe(42000); + expect(chatSpan.attributes["github.copilot.turn_id"]).toBe("turn-123"); + expect(chatSpan.attributes["github.copilot.interaction_id"]).toBe("interaction-456"); + }); + + it("invoke_agent spans get accumulated copilot cost and aiu", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + + // Turn 1 with cost and AIU + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("assistant.usage", { + model: "gpt-4o", + inputTokens: 50, + outputTokens: 20, + cost: 0.003, + copilotUsage: { + tokenDetails: { + batchSize: 1, + costPerBatch: 1, + tokenCount: 50, + tokenType: "input", + }, + totalNanoAiu: 10000, + }, + }) + ); + dispatchEvent(session, makeEvent("assistant.turn_end", { turnId: "turn-1" })); + + // Turn 2 with more cost and AIU + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-2" })); + dispatchEvent( + session, + makeEvent("assistant.usage", { + model: "gpt-4o", + inputTokens: 60, + outputTokens: 30, + cost: 0.004, + copilotUsage: { + tokenDetails: { + batchSize: 1, + costPerBatch: 1, + tokenCount: 60, + tokenType: "input", + }, + totalNanoAiu: 15000, + }, + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const invokeSpan = spans.find((s) => s.name.startsWith("invoke_agent"))!; + expect(invokeSpan.attributes["github.copilot.cost"]).toBeCloseTo(0.007, 6); + expect(invokeSpan.attributes["github.copilot.aiu"]).toBe(25000); + }); + }); + + // ======================================================================== + // Custom span events + // ======================================================================== + + describe("custom span events", () => { + it("session.truncation dispatches github.copilot.session.truncation span event", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("session.truncation", { + tokenLimit: 8000, + preTruncationTokensInMessages: 10000, + preTruncationMessagesLength: 20, + postTruncationTokensInMessages: 7500, + postTruncationMessagesLength: 15, + tokensRemovedDuringTruncation: 2500, + messagesRemovedDuringTruncation: 5, + performedBy: "system", + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const chatSpan = spans.find((s) => s.name.startsWith("chat"))!; + const truncationEvent = chatSpan.events.find( + (e) => e.name === "github.copilot.session.truncation" + ); + expect(truncationEvent).toBeDefined(); + expect(truncationEvent!.attributes!["github.copilot.token_limit"]).toBe(8000); + expect(truncationEvent!.attributes!["github.copilot.pre_tokens"]).toBe(10000); + expect(truncationEvent!.attributes!["github.copilot.post_tokens"]).toBe(7500); + expect(truncationEvent!.attributes!["github.copilot.pre_messages"]).toBe(20); + expect(truncationEvent!.attributes!["github.copilot.post_messages"]).toBe(15); + expect(truncationEvent!.attributes!["github.copilot.tokens_removed"]).toBe(2500); + expect(truncationEvent!.attributes!["github.copilot.messages_removed"]).toBe(5); + expect(truncationEvent!.attributes!["github.copilot.performed_by"]).toBe("system"); + }); + + it("session.compaction_start produces the right span event", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent(session, makeEvent("session.compaction_start", {})); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const chatSpan = spans.find((s) => s.name.startsWith("chat"))!; + const compactionStartEvent = chatSpan.events.find( + (e) => e.name === "github.copilot.session.compaction_start" + ); + expect(compactionStartEvent).toBeDefined(); + }); + + it("session.compaction_complete produces span event with proper attributes", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("session.compaction_complete", { + success: true, + preCompactionTokens: 5000, + postCompactionTokens: 2000, + tokensRemoved: 3000, + messagesRemoved: 8, + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const spans = getSpans(); + const chatSpan = spans.find((s) => s.name.startsWith("chat"))!; + const compactionEvent = chatSpan.events.find( + (e) => e.name === "github.copilot.session.compaction_complete" + ); + expect(compactionEvent).toBeDefined(); + expect(compactionEvent!.attributes!["github.copilot.success"]).toBe(true); + expect(compactionEvent!.attributes!["github.copilot.pre_tokens"]).toBe(5000); + expect(compactionEvent!.attributes!["github.copilot.post_tokens"]).toBe(2000); + expect(compactionEvent!.attributes!["github.copilot.tokens_removed"]).toBe(3000); + expect(compactionEvent!.attributes!["github.copilot.messages_removed"]).toBe(8); + }); + }); + + // ======================================================================== + // CompleteOnDispose + // ======================================================================== + + describe("completeOnDispose", () => { + it("completes open spans with error status when session is destroyed", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + + // Spans should not be finished yet + expect(getSpans().length).toBe(0); + + // Simulate session destroy - this calls completeOnDispose + try { + await session.destroy(); + } catch { + // sendRequest will fail since mock doesn't handle destroy, that's ok + } + + const spans = getSpans(); + // Both invoke_agent and chat spans should now be finished + const invokeSpan = spans.find((s) => s.name.startsWith("invoke_agent")); + const chatSpan = spans.find((s) => s.name.startsWith("chat")); + expect(invokeSpan).toBeDefined(); + expect(chatSpan).toBeDefined(); + expect(invokeSpan!.status.code).toBe(SpanStatusCode.ERROR); + expect(chatSpan!.status.code).toBe(SpanStatusCode.ERROR); + }); + }); + + // ======================================================================== + // Chunk timing with "chat" operation name + // ======================================================================== + + describe("chunk timing operation name", () => { + it("time_to_first_chunk metrics use chat as the operation name", async () => { + const telemetry = new CopilotTelemetry({}); + const session = createTestSession(telemetry, { + model: "gpt-4o", + streaming: true, + }); + + await session.send({ prompt: "Hello" }); + dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); + dispatchEvent( + session, + makeEvent("assistant.message_delta", { + messageId: "msg-1", + deltaContent: "Hi ", + }) + ); + dispatchEvent( + session, + makeEvent("assistant.message", { + messageId: "msg-1", + content: "Hi there", + }) + ); + dispatchEvent(session, makeEvent("session.idle")); + + const scopeMetrics = await getMetrics(); + const ttfcMetric = findMetric( + scopeMetrics, + "gen_ai.client.operation.time_to_first_chunk" + ); + expect(ttfcMetric).toBeDefined(); + + // The metric data point should have gen_ai.operation.name = "chat" + const dataPoint = ttfcMetric.dataPoints[0]; + expect(dataPoint.attributes["gen_ai.operation.name"]).toBe("chat"); + }); + }); +}); diff --git a/python/README.md b/python/README.md index 3a1c4c73c..a7092112b 100644 --- a/python/README.md +++ b/python/README.md @@ -106,6 +106,7 @@ await client.stop() - `auto_restart` (bool): Auto-restart on crash (default: True) - `github_token` (str): GitHub token for authentication. When provided, takes priority over other auth methods. - `use_logged_in_user` (bool): Whether to use logged-in user for authentication (default: True, but False when `github_token` is provided). Cannot be used with `cli_url`. +- `telemetry` (TelemetryConfig): OpenTelemetry instrumentation configuration. When provided, enables automatic tracing and metrics following [GenAI semantic conventions](../docs/opentelemetry-instrumentation.md). **SessionConfig Options (for `create_session`):** @@ -117,6 +118,8 @@ await client.stop() - `streaming` (bool): Enable streaming delta events - `provider` (dict): Custom API provider configuration (BYOK). See [Custom Providers](#custom-providers) section. - `infinite_sessions` (dict): Automatic context compaction configuration +- `agent_name` (str): Agent name for telemetry attribution. When set, the `invoke_agent` span includes a `gen_ai.agent.name` attribute. +- `agent_description` (str): Agent description for telemetry attribution. When set, the `invoke_agent` span includes a `gen_ai.agent.description` attribute. - `on_user_input_request` (callable): Handler for user input requests from the agent (enables ask_user tool). See [User Input Requests](#user-input-requests) section. - `hooks` (dict): Hook handlers for session lifecycle events. See [Session Hooks](#session-hooks) section. diff --git a/python/copilot/__init__.py b/python/copilot/__init__.py index f5f7ed0b1..1e277742a 100644 --- a/python/copilot/__init__.py +++ b/python/copilot/__init__.py @@ -33,6 +33,7 @@ SessionListFilter, SessionMetadata, StopError, + TelemetryConfig, Tool, ToolHandler, ToolInvocation, @@ -69,6 +70,7 @@ "SessionListFilter", "SessionMetadata", "StopError", + "TelemetryConfig", "Tool", "ToolHandler", "ToolInvocation", diff --git a/python/copilot/_copilot_telemetry.py b/python/copilot/_copilot_telemetry.py new file mode 100644 index 000000000..2ff3d4f17 --- /dev/null +++ b/python/copilot/_copilot_telemetry.py @@ -0,0 +1,1744 @@ +""" +Internal OpenTelemetry instrumentation for the Copilot SDK. + +Implements the Semantic Conventions for Generative AI systems. +See https://opentelemetry.io/docs/specs/semconv/gen-ai/ + +This module is private — do not import from outside the copilot package. +""" + +from __future__ import annotations + +import json +import os +import threading +import time +from collections.abc import Sequence +from dataclasses import dataclass, field +from typing import Any +from urllib.parse import urlparse + +from opentelemetry import context as otel_context +from opentelemetry import metrics, trace + +from .generated.session_events import SessionEvent, SessionEventType +from .types import ProviderConfig, SystemMessageConfig, TelemetryConfig, Tool + +# --------------------------------------------------------------------------- +# SDK version (read once at import time) +# --------------------------------------------------------------------------- + + +def _get_sdk_version() -> str | None: + """Get the SDK version, avoiding circular imports. + + This is called lazily (not at module import time) because + _copilot_telemetry is imported during copilot.__init__, so + copilot.__version__ isn't available yet at import time. + """ + try: + import importlib.metadata + + return importlib.metadata.version("github-copilot-sdk") + except Exception: + pass + try: + import copilot + + return getattr(copilot, "__version__", None) + except Exception: + pass + return None + + +_SDK_VERSION: str = _get_sdk_version() or "" + +# --------------------------------------------------------------------------- +# Constants (mirrors .NET OpenTelemetryConsts) +# --------------------------------------------------------------------------- + +DEFAULT_SOURCE_NAME = "github.copilot.sdk" +DEFAULT_PROVIDER_NAME = "github" +CAPTURE_MESSAGE_CONTENT_ENV_VAR = "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT" + +# Semantic convention attribute keys +ATTR_GEN_AI_OPERATION_NAME = "gen_ai.operation.name" +ATTR_GEN_AI_PROVIDER_NAME = "gen_ai.provider.name" +ATTR_GEN_AI_AGENT_ID = "gen_ai.agent.id" +ATTR_GEN_AI_AGENT_NAME = "gen_ai.agent.name" +ATTR_GEN_AI_AGENT_DESCRIPTION = "gen_ai.agent.description" +ATTR_GEN_AI_CONVERSATION_ID = "gen_ai.conversation.id" +ATTR_GEN_AI_REQUEST_MODEL = "gen_ai.request.model" +ATTR_GEN_AI_RESPONSE_MODEL = "gen_ai.response.model" +ATTR_GEN_AI_RESPONSE_ID = "gen_ai.response.id" +ATTR_GEN_AI_RESPONSE_FINISH_REASONS = "gen_ai.response.finish_reasons" +ATTR_GEN_AI_USAGE_INPUT_TOKENS = "gen_ai.usage.input_tokens" +ATTR_GEN_AI_USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens" +ATTR_GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS = "gen_ai.usage.cache_read.input_tokens" +ATTR_GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS = "gen_ai.usage.cache_creation.input_tokens" +ATTR_GEN_AI_TOKEN_TYPE = "gen_ai.token.type" +ATTR_GEN_AI_INPUT_MESSAGES = "gen_ai.input.messages" +ATTR_GEN_AI_OUTPUT_MESSAGES = "gen_ai.output.messages" +ATTR_GEN_AI_SYSTEM_INSTRUCTIONS = "gen_ai.system_instructions" +ATTR_GEN_AI_TOOL_DEFINITIONS = "gen_ai.tool.definitions" +ATTR_GEN_AI_TOOL_CALL_ID = "gen_ai.tool.call.id" +ATTR_GEN_AI_TOOL_CALL_ARGUMENTS = "gen_ai.tool.call.arguments" +ATTR_GEN_AI_TOOL_CALL_RESULT = "gen_ai.tool.call.result" +ATTR_GEN_AI_TOOL_NAME = "gen_ai.tool.name" +ATTR_GEN_AI_TOOL_DESCRIPTION = "gen_ai.tool.description" +ATTR_GEN_AI_TOOL_TYPE = "gen_ai.tool.type" +ATTR_SERVER_ADDRESS = "server.address" +ATTR_SERVER_PORT = "server.port" +ATTR_ERROR_TYPE = "error.type" + +# Vendor-prefixed span attributes +ATTR_COPILOT_COST = "github.copilot.cost" +ATTR_COPILOT_SERVER_DURATION = "github.copilot.server_duration" +ATTR_COPILOT_INITIATOR = "github.copilot.initiator" +ATTR_COPILOT_AIU = "github.copilot.aiu" +ATTR_COPILOT_TURN_ID = "github.copilot.turn_id" +ATTR_COPILOT_INTERACTION_ID = "github.copilot.interaction_id" + +# Vendor-prefixed span event names and attribute keys +COPILOT_EVENT_SESSION_TRUNCATION = "github.copilot.session.truncation" +COPILOT_EVENT_SESSION_COMPACTION_START = "github.copilot.session.compaction_start" +COPILOT_EVENT_SESSION_COMPACTION_COMPLETE = "github.copilot.session.compaction_complete" +COPILOT_EVENT_SKILL_INVOKED = "github.copilot.skill.invoked" + +COPILOT_EVENT_ATTR_MESSAGE = "github.copilot.message" +COPILOT_EVENT_ATTR_TOKEN_LIMIT = "github.copilot.token_limit" +COPILOT_EVENT_ATTR_PRE_TOKENS = "github.copilot.pre_tokens" +COPILOT_EVENT_ATTR_POST_TOKENS = "github.copilot.post_tokens" +COPILOT_EVENT_ATTR_PRE_MESSAGES = "github.copilot.pre_messages" +COPILOT_EVENT_ATTR_POST_MESSAGES = "github.copilot.post_messages" +COPILOT_EVENT_ATTR_TOKENS_REMOVED = "github.copilot.tokens_removed" +COPILOT_EVENT_ATTR_MESSAGES_REMOVED = "github.copilot.messages_removed" +COPILOT_EVENT_ATTR_PERFORMED_BY = "github.copilot.performed_by" +COPILOT_EVENT_ATTR_SUCCESS = "github.copilot.success" +COPILOT_EVENT_ATTR_SKILL_NAME = "github.copilot.skill.name" +COPILOT_EVENT_ATTR_SKILL_PATH = "github.copilot.skill.path" +COPILOT_EVENT_ATTR_SKILL_CONTENT = "github.copilot.skill.content" +COPILOT_EVENT_ATTR_SKILL_PLUGIN_NAME = "github.copilot.skill.plugin_name" +COPILOT_EVENT_ATTR_SKILL_PLUGIN_VERSION = "github.copilot.skill.plugin_version" + +# Operation names +OP_INVOKE_AGENT = "invoke_agent" +OP_CHAT = "chat" +OP_EXECUTE_TOOL = "execute_tool" + +# Token type values +TOKEN_TYPE_INPUT = "input" +TOKEN_TYPE_OUTPUT = "output" + +# Metric names +METRIC_OPERATION_DURATION = "gen_ai.client.operation.duration" +METRIC_TOKEN_USAGE = "gen_ai.client.token.usage" +METRIC_TIME_TO_FIRST_CHUNK = "gen_ai.client.operation.time_to_first_chunk" +METRIC_TIME_PER_OUTPUT_CHUNK = "gen_ai.client.operation.time_per_output_chunk" + +# Bucket boundaries (matching .NET) +DURATION_BUCKET_BOUNDARIES: Sequence[float] = ( + 0.01, + 0.02, + 0.04, + 0.08, + 0.16, + 0.32, + 0.64, + 1.28, + 2.56, + 5.12, + 10.24, + 20.48, + 40.96, + 81.92, +) +TOKEN_BUCKET_BOUNDARIES: Sequence[int] = ( + 1, + 4, + 16, + 64, + 256, + 1024, + 4096, + 16384, + 65536, + 262144, + 1048576, + 4194304, + 16777216, + 67108864, +) + + +# --------------------------------------------------------------------------- +# OTel message types for parts-based JSON +# --------------------------------------------------------------------------- + + +@dataclass +class OtelPart: + type: str + content: str | None = None + id: str | None = None + name: str | None = None + arguments: Any = None + response: Any = None + mcp_server_name: str | None = None + + +@dataclass +class OtelMsg: + role: str + parts: list[OtelPart] = field(default_factory=list) + finish_reason: str | None = None + + +# --------------------------------------------------------------------------- +# Subagent state +# --------------------------------------------------------------------------- + + +@dataclass +class _SubagentState: + invoke_agent_span: trace.Span + invoke_agent_context: Any # Context + invoke_agent_start_time: float + chat_span: trace.Span | None = None + agent_name: str | None = None + response_model: str | None = None + response_id: str | None = None + input_tokens: int = 0 + output_tokens: int = 0 + cache_read_tokens: int = 0 + cache_creation_tokens: int = 0 + input_messages: list[OtelMsg] = field(default_factory=list) + output_messages: list[OtelMsg] = field(default_factory=list) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def normalize_provider_name(provider_type: str | None) -> str: + """Normalize provider type string to OpenTelemetry semantic convention name.""" + if not provider_type or not provider_type.strip(): + return DEFAULT_PROVIDER_NAME + name = provider_type.strip().lower() + if name == "anthropic": + return "anthropic" + if name == "azure": + return "azure.ai.openai" + if name == "openai": + return "openai" + return DEFAULT_PROVIDER_NAME + + +def parse_server_address(base_url: str | None) -> tuple[str | None, int | None]: + """Extract host and port from a provider base URL.""" + if not base_url or not base_url.strip(): + return None, None + try: + parsed = urlparse(base_url) + except Exception: + return None, None + host = parsed.hostname + if not host: + return None, None + port = parsed.port + if port and port > 0: + return host, port + scheme = (parsed.scheme or "").lower() + if scheme == "https": + return host, 443 + if scheme == "http": + return host, 80 + return host, None + + +def _serialize_value(value: Any) -> str: + """Convert a value to a string suitable for a span attribute.""" + if isinstance(value, str): + return value + if isinstance(value, bool): + return "true" if value else "false" + if isinstance(value, (int, float)): + return str(value) + if isinstance(value, (dict, list)): + return json.dumps(value, default=str) + return str(value) + + +def _get_parent_tool_call_id(event: SessionEvent) -> str | None: + """Extract parentToolCallId from event data if present.""" + if event.type in ( + SessionEventType.ASSISTANT_USAGE, + SessionEventType.ASSISTANT_MESSAGE, + SessionEventType.ASSISTANT_MESSAGE_DELTA, + SessionEventType.TOOL_EXECUTION_START, + SessionEventType.TOOL_EXECUTION_COMPLETE, + ): + return getattr(event.data, "parent_tool_call_id", None) + return None + + +class SessionDestroyedError(Exception): + """Raised when a session is destroyed while a turn is in progress.""" + + ... + + +class SessionTurnError(Exception): + """Raised when the session encounters an error or a subagent fails.""" + + ... + + +# --------------------------------------------------------------------------- +# CopilotTelemetry +# --------------------------------------------------------------------------- + + +class CopilotTelemetry: + """ + Provides OpenTelemetry instrumentation for the Copilot SDK. + + Implements the Semantic Conventions for Generative AI systems. + The specification is experimental and subject to change. + """ + + def __init__( + self, + config: TelemetryConfig, + tracer_provider: Any | None = None, + meter_provider: Any | None = None, + ) -> None: + source_name = config.get("source_name") or DEFAULT_SOURCE_NAME + + enable_sensitive = config.get("enable_sensitive_data") + if enable_sensitive is None: + env_val = os.environ.get(CAPTURE_MESSAGE_CONTENT_ENV_VAR, "") + enable_sensitive = env_val.lower() == "true" + self.enable_sensitive_data: bool = enable_sensitive + + if tracer_provider is not None: + self._tracer = tracer_provider.get_tracer(source_name, _SDK_VERSION) + else: + self._tracer = trace.get_tracer(source_name, _SDK_VERSION) + + if meter_provider is not None: + self._meter = meter_provider.get_meter(source_name, _SDK_VERSION) + else: + self._meter = metrics.get_meter(source_name, _SDK_VERSION) + + self.operation_duration_histogram = self._meter.create_histogram( + name=METRIC_OPERATION_DURATION, + unit="s", + description="GenAI operation duration.", + explicit_bucket_boundaries_advisory=DURATION_BUCKET_BOUNDARIES, + ) + self.token_usage_histogram = self._meter.create_histogram( + name=METRIC_TOKEN_USAGE, + unit="{token}", + description="Number of input and output tokens used.", + explicit_bucket_boundaries_advisory=TOKEN_BUCKET_BOUNDARIES, + ) + self.time_to_first_chunk_histogram = self._meter.create_histogram( + name=METRIC_TIME_TO_FIRST_CHUNK, + unit="s", + description="Time to receive the first chunk from a streaming response.", + explicit_bucket_boundaries_advisory=DURATION_BUCKET_BOUNDARIES, + ) + self.time_per_output_chunk_histogram = self._meter.create_histogram( + name=METRIC_TIME_PER_OUTPUT_CHUNK, + unit="s", + description="Time elapsed between streamed output chunks after the first chunk.", + explicit_bucket_boundaries_advisory=DURATION_BUCKET_BOUNDARIES, + ) + + # -- Span creation ------------------------------------------------------- + + def start_invoke_agent_span( + self, + session_id: str, + model: str | None, + provider_name: str, + server_address: str | None, + server_port: int | None, + agent_name: str | None = None, + agent_description: str | None = None, + parent_context: Any | None = None, + ) -> tuple[trace.Span, Any]: + """Start an invoke_agent span (CLIENT kind) for a session turn. + + Returns: + A tuple of (span, context_with_span) where the context carries + the span for child span parenting. + """ + display_name = f"{OP_INVOKE_AGENT} {agent_name}" if agent_name else OP_INVOKE_AGENT + + attributes: dict[str, Any] = { + ATTR_GEN_AI_OPERATION_NAME: OP_INVOKE_AGENT, + ATTR_GEN_AI_PROVIDER_NAME: provider_name, + ATTR_GEN_AI_AGENT_ID: session_id, + ATTR_GEN_AI_CONVERSATION_ID: session_id, + } + if model: + attributes[ATTR_GEN_AI_REQUEST_MODEL] = model + if agent_name: + attributes[ATTR_GEN_AI_AGENT_NAME] = agent_name + if agent_description: + attributes[ATTR_GEN_AI_AGENT_DESCRIPTION] = agent_description + if server_address: + attributes[ATTR_SERVER_ADDRESS] = server_address + if server_port is not None: + attributes[ATTR_SERVER_PORT] = server_port + + span = self._tracer.start_span( + name=display_name, + kind=trace.SpanKind.CLIENT, + attributes=attributes, + context=parent_context, + ) + + span_context = trace.set_span_in_context(span) + return span, span_context + + def start_chat_span( + self, + model: str | None, + provider_name: str, + server_address: str | None, + server_port: int | None, + parent_context: Any, + conversation_id: str | None = None, + ) -> trace.Span: + """Start a chat child span for an individual LLM turn.""" + display_name = f"{OP_CHAT} {model}" if model else OP_CHAT + + attributes: dict[str, Any] = { + ATTR_GEN_AI_OPERATION_NAME: OP_CHAT, + ATTR_GEN_AI_PROVIDER_NAME: provider_name, + } + if model: + attributes[ATTR_GEN_AI_REQUEST_MODEL] = model + if conversation_id: + attributes[ATTR_GEN_AI_CONVERSATION_ID] = conversation_id + if server_address: + attributes[ATTR_SERVER_ADDRESS] = server_address + if server_port is not None: + attributes[ATTR_SERVER_PORT] = server_port + + return self._tracer.start_span( + name=display_name, + kind=trace.SpanKind.CLIENT, + attributes=attributes, + context=parent_context, + ) + + def start_execute_tool_span( + self, + tool_name: str, + tool_call_id: str, + description: str | None, + arguments: Any, + parent_context: Any | None = None, + ) -> trace.Span: + """Start an execute_tool span (INTERNAL kind) for a tool call.""" + display_name = f"{OP_EXECUTE_TOOL} {tool_name}" + + attributes: dict[str, Any] = { + ATTR_GEN_AI_OPERATION_NAME: OP_EXECUTE_TOOL, + ATTR_GEN_AI_TOOL_NAME: tool_name, + ATTR_GEN_AI_TOOL_CALL_ID: tool_call_id, + ATTR_GEN_AI_TOOL_TYPE: "function", + } + if description: + attributes[ATTR_GEN_AI_TOOL_DESCRIPTION] = description + if self.enable_sensitive_data and arguments is not None: + attributes[ATTR_GEN_AI_TOOL_CALL_ARGUMENTS] = _serialize_value(arguments) + + span = self._tracer.start_span( + name=display_name, + kind=trace.SpanKind.INTERNAL, + context=parent_context or otel_context.get_current(), + attributes=attributes, + ) + + return span + + # -- Span enrichment ----------------------------------------------------- + + def record_token_usage_metrics( + self, + input_tokens: int | None, + output_tokens: int | None, + request_model: str | None, + response_model: str | None, + provider_name: str, + server_address: str | None, + server_port: int | None, + error: Exception | None = None, + operation_name: str = OP_INVOKE_AGENT, + ) -> None: + """Record token usage metrics at turn completion (so error.type can be included).""" + base_attrs = self._create_metric_attributes( + operation_name, + request_model, + response_model, + provider_name, + server_address, + server_port, + ) + if error is not None: + base_attrs[ATTR_ERROR_TYPE] = type(error).__name__ + if input_tokens is not None: + attrs = dict(base_attrs) + attrs[ATTR_GEN_AI_TOKEN_TYPE] = TOKEN_TYPE_INPUT + self.token_usage_histogram.record(int(input_tokens), attrs) + if output_tokens is not None: + attrs = dict(base_attrs) + attrs[ATTR_GEN_AI_TOKEN_TYPE] = TOKEN_TYPE_OUTPUT + self.token_usage_histogram.record(int(output_tokens), attrs) + + def set_execute_tool_result(self, span: trace.Span, result: Any) -> None: + """Set tool call result on span (sensitive only).""" + if not self.enable_sensitive_data or result is None: + return + span.set_attribute(ATTR_GEN_AI_TOOL_CALL_RESULT, _serialize_value(result)) + + @staticmethod + def record_error(span: trace.Span, error: Exception) -> None: + """Record an error on a span.""" + span.set_attribute(ATTR_ERROR_TYPE, type(error).__name__) + span.set_status(trace.StatusCode.ERROR, str(error)) + + # -- Metrics recording --------------------------------------------------- + + def record_operation_duration( + self, + duration_seconds: float, + request_model: str | None, + response_model: str | None, + provider_name: str, + server_address: str | None, + server_port: int | None, + error: Exception | None = None, + operation_name: str = OP_INVOKE_AGENT, + ) -> None: + """Record operation duration metric.""" + attrs = self._create_metric_attributes( + operation_name, + request_model, + response_model, + provider_name, + server_address, + server_port, + ) + if error is not None: + attrs[ATTR_ERROR_TYPE] = type(error).__name__ + self.operation_duration_histogram.record(duration_seconds, attrs) + + def record_time_to_first_chunk( + self, + duration_seconds: float, + request_model: str | None, + response_model: str | None, + provider_name: str, + server_address: str | None, + server_port: int | None, + ) -> None: + """Record time to first chunk metric.""" + attrs = self._create_metric_attributes( + OP_CHAT, + request_model, + response_model, + provider_name, + server_address, + server_port, + ) + self.time_to_first_chunk_histogram.record(duration_seconds, attrs) + + def record_time_per_output_chunk( + self, + duration_seconds: float, + request_model: str | None, + response_model: str | None, + provider_name: str, + server_address: str | None, + server_port: int | None, + ) -> None: + """Record time per output chunk metric.""" + attrs = self._create_metric_attributes( + OP_CHAT, + request_model, + response_model, + provider_name, + server_address, + server_port, + ) + self.time_per_output_chunk_histogram.record(duration_seconds, attrs) + + # -- Internal ------------------------------------------------------------ + + @staticmethod + def _create_metric_attributes( + operation_name: str, + request_model: str | None, + response_model: str | None, + provider_name: str, + server_address: str | None, + server_port: int | None, + ) -> dict[str, Any]: + attrs: dict[str, Any] = { + ATTR_GEN_AI_OPERATION_NAME: operation_name, + ATTR_GEN_AI_PROVIDER_NAME: provider_name, + } + if request_model: + attrs[ATTR_GEN_AI_REQUEST_MODEL] = request_model + if response_model: + attrs[ATTR_GEN_AI_RESPONSE_MODEL] = response_model + if server_address: + attrs[ATTR_SERVER_ADDRESS] = server_address + if server_port is not None: + attrs[ATTR_SERVER_PORT] = server_port + return attrs + + +# --------------------------------------------------------------------------- +# Module-level helpers +# --------------------------------------------------------------------------- + + +def _build_messages_json( + messages: list[OtelMsg] | None, + finish_reason: str | None = None, +) -> str | None: + """Build JSON array of messages in the GenAI parts-based convention format.""" + if not messages: + return None + + result: list[dict[str, Any]] = [] + for message in messages: + if not message.parts: + continue + + parts: list[dict[str, Any]] = [] + for part in message.parts: + obj: dict[str, Any] = {"type": part.type} + + if part.type == "server_tool_call": + if part.id is not None: + obj["id"] = part.id + if part.name is not None: + obj["name"] = part.name + nested: dict[str, Any] = {"type": "mcp"} + if part.mcp_server_name is not None: + nested["server_name"] = part.mcp_server_name + if part.arguments is not None: + nested["arguments"] = part.arguments + obj["server_tool_call"] = nested + elif part.type == "server_tool_call_response": + if part.id is not None: + obj["id"] = part.id + nested = {"type": "mcp"} + if part.mcp_server_name is not None: + nested["server_name"] = part.mcp_server_name + if part.response is not None: + nested["response"] = part.response + obj["server_tool_call_response"] = nested + else: + if part.content is not None: + obj["content"] = part.content + if part.id is not None: + obj["id"] = part.id + if part.name is not None: + obj["name"] = part.name + if part.arguments is not None: + obj["arguments"] = part.arguments + if part.response is not None: + obj["response"] = part.response + + parts.append(obj) + + msg: dict[str, Any] = {"role": message.role, "parts": parts} + effective_finish_reason = message.finish_reason or finish_reason + if effective_finish_reason: + msg["finish_reason"] = effective_finish_reason + result.append(msg) + + return json.dumps(result) if result else None + + +def _build_system_instructions_json( + system_message: SystemMessageConfig | None, +) -> str | None: + """Build JSON for system instructions.""" + if not system_message: + return None + content = system_message.get("content") + if not content: + return None + return json.dumps([{"type": "text", "content": content}]) + + +def _build_tool_definitions_json( + tools: list[Tool] | None, +) -> str | None: + """Build JSON for tool definitions.""" + if not tools: + return None + result = [] + for tool in tools: + defn: dict[str, Any] = {"type": "function", "name": tool.name} + if tool.description: + defn["description"] = tool.description + if tool.parameters: + defn["parameters"] = tool.parameters + result.append(defn) + return json.dumps(result) if result else None + + +# --------------------------------------------------------------------------- +# AgentTurnTracker +# --------------------------------------------------------------------------- + + +class AgentTurnTracker: + """Encapsulates per-turn telemetry state and logic for a session. + + Implements a two-level span hierarchy: invoke_agent → chat, matching the + TypeScript/C# AgentTurnTracker. Supports subagent spans and tool call parenting. + + All public methods acquire ``_lock`` for thread safety. + """ + + def __init__( + self, + telemetry: CopilotTelemetry, + session_id: str, + model: str | None = None, + provider: ProviderConfig | None = None, + system_message: SystemMessageConfig | None = None, + tools: list[Tool] | None = None, + streaming: bool = False, + agent_name: str | None = None, + agent_description: str | None = None, + ) -> None: + self._telemetry = telemetry + self._session_id = session_id + self._lock = threading.Lock() + + # Session-level context (effectively immutable after construction) + self._request_model = model + self._agent_name = agent_name + self._agent_description = agent_description + provider_type = provider.get("type") if provider else None + self._provider_name = normalize_provider_name(provider_type) + base_url = provider.get("base_url") if provider else None + self._server_address, self._server_port = parse_server_address(base_url) + self._system_instructions_json = _build_system_instructions_json(system_message) + self._tool_definitions_json = _build_tool_definitions_json(tools) + self._is_streaming = streaming + + # Per-invoke_agent mutable state + self._agent_span: trace.Span | None = None + self._agent_span_context: Any | None = None + self._agent_start_time: float | None = None + self._agent_input_messages: list[OtelMsg] | None = None + self._agent_output_messages: list[OtelMsg] | None = None + self._active_subagents: dict[str, _SubagentState] | None = None + + # Pending tool call parent contexts + self._pending_tool_parents: dict[str, Any] | None = None + + # MCP server tool tracking: toolCallId → mcpServerName + self._server_tool_call_ids: dict[str, str] | None = None + + # Agent-level accumulated usage across all chat turns + self._agent_response_model: str | None = None + self._agent_response_id: str | None = None + self._agent_total_input_tokens: int = 0 + self._agent_total_output_tokens: int = 0 + self._agent_total_cache_read_tokens: int = 0 + self._agent_total_cache_creation_tokens: int = 0 + self._agent_total_cost: float = 0 + self._agent_total_aiu: float = 0 + + # Per-chat-turn mutable state (reset on each assistant.turn_start) + self._turn_span: trace.Span | None = None + self._turn_start_time: float | None = None + self._first_output_chunk_recorded: bool = False + self._last_output_chunk_time: float = 0.0 + self._response_model: str | None = None + self._response_id: str | None = None + self._input_tokens: int = 0 + self._output_tokens: int = 0 + self._cache_read_tokens: int = 0 + self._cache_creation_tokens: int = 0 + self._input_messages: list[OtelMsg] | None = None + self._output_messages: list[OtelMsg] | None = None + + # Copilot-specific per-turn attributes + self._turn_cost: float | None = None + self._turn_server_duration: float | None = None + self._turn_initiator: str | None = None + self._turn_aiu: float | None = None + self._turn_id: str | None = None + self._turn_interaction_id: str | None = None + + # Stashed user prompt for the first chat turn + self._pending_user_prompt: str | None = None + + # -- Property accessors -------------------------------------------------- + + @property + def telemetry(self) -> CopilotTelemetry: + return self._telemetry + + @property + def provider_name(self) -> str: + return self._provider_name + + @property + def server_address(self) -> str | None: + return self._server_address + + @property + def server_port(self) -> int | None: + return self._server_port + + def get_span_context(self) -> Any | None: + """Return the current invoke_agent span context (for parenting tool spans).""" + with self._lock: + return self._agent_span_context + + def get_tool_call_parent_context(self, tool_call_id: str) -> Any | None: + """Gets the parent context for a tool call span. Consumes the stored context (one-time use). + + For subagent tool calls this returns the subagent's invoke_agent context; + for main agent tool calls this returns the root invoke_agent context. + """ + with self._lock: + if self._pending_tool_parents: + ctx = self._pending_tool_parents.get(tool_call_id) + if ctx is not None: + del self._pending_tool_parents[tool_call_id] + return ctx + return self._agent_span_context + + # -- Public methods ------------------------------------------------------ + + def begin_send(self, prompt: str | None) -> None: + """Called at the start of send(); starts the invoke_agent span if needed.""" + with self._lock: + if self._agent_span is None: + self._agent_span, self._agent_span_context = ( + self._telemetry.start_invoke_agent_span( + self._session_id, + self._request_model, + self._provider_name, + self._server_address, + self._server_port, + self._agent_name, + self._agent_description, + ) + ) + self._agent_start_time = time.monotonic() + self._agent_input_messages = [] + + # Agent-level input = what the caller sent (all user prompts). + if self._agent_input_messages is not None and prompt: + self._agent_input_messages.append( + OtelMsg(role="user", parts=[OtelPart(type="text", content=prompt)]) + ) + + # Stash user prompt for the first chat turn's input messages. + self._pending_user_prompt = prompt + + def process_event(self, event: SessionEvent) -> None: + """Called from _dispatch_event; handles telemetry enrichment and turn completion.""" + with self._lock: + self._process_event_locked(event) + + def complete_turn_with_error(self, error: Exception) -> None: + """Called from send() error path; completes turn with error.""" + with self._lock: + self._complete_chat_turn(error) + self._complete_agent_turn(error) + + def complete_on_dispose(self) -> None: + """Closes any active spans with an error status. + + Called when the session is destroyed while a turn may still be in progress. + """ + with self._lock: + if self._agent_span is not None: + dispose_error = SessionDestroyedError( + "Session destroyed while agent turn was in progress" + ) + self._complete_chat_turn(dispose_error) + self._complete_agent_turn(dispose_error) + + # -- Private methods (caller must hold _lock) ---------------------------- + + def _process_event_locked(self, event: SessionEvent) -> None: + """Process a single event under the lock.""" + # Route subagent events by parentToolCallId. + parent_tool_call_id = _get_parent_tool_call_id(event) + if parent_tool_call_id: + if self._active_subagents: + subagent = self._active_subagents.get(parent_tool_call_id) + if subagent: + self._process_subagent_event(subagent, event) + return + + # Handle subagent lifecycle events. + if event.type == SessionEventType.SUBAGENT_STARTED: + self._begin_subagent(event) + return + if event.type == SessionEventType.SUBAGENT_COMPLETED: + tool_call_id = getattr(event.data, "tool_call_id", None) + if tool_call_id: + self._complete_subagent(tool_call_id, None) + return + if event.type == SessionEventType.SUBAGENT_FAILED: + tool_call_id = getattr(event.data, "tool_call_id", None) + agent_name = getattr(event.data, "agent_name", None) or "unknown" + error_msg = getattr(event.data, "error", None) + if isinstance(error_msg, str): + err_str = error_msg + elif hasattr(error_msg, "message"): + err_str = error_msg.message + else: + err_str = str(error_msg) if error_msg else "unknown" + if tool_call_id: + self._complete_subagent( + tool_call_id, + SessionTurnError(f"Subagent '{agent_name}' failed: {err_str}"), + ) + return + + # Record chunk timing for main agent events during a turn. + self._record_output_chunk_metric() + + # Per-turn event processing (writes to the chat child span). + if self._turn_span is not None: + if event.type == SessionEventType.ASSISTANT_MESSAGE: + parts: list[OtelPart] = [] + reasoning_text = getattr(event.data, "reasoning_text", None) + if reasoning_text: + parts.append(OtelPart(type="reasoning", content=reasoning_text)) + content = getattr(event.data, "content", None) + if content: + parts.append(OtelPart(type="text", content=content)) + if parts and self._output_messages is not None: + self._output_messages.append(OtelMsg(role="assistant", parts=parts)) + + elif event.type == SessionEventType.ASSISTANT_USAGE: + data = event.data + model = getattr(data, "model", None) + if model: + self._response_model = model + api_call_id = getattr(data, "api_call_id", None) + provider_call_id = getattr(data, "provider_call_id", None) + if api_call_id: + self._response_id = api_call_id + elif provider_call_id: + self._response_id = provider_call_id + input_tok = getattr(data, "input_tokens", None) + output_tok = getattr(data, "output_tokens", None) + cache_read = getattr(data, "cache_read_tokens", None) + cache_write = getattr(data, "cache_write_tokens", None) + if input_tok is not None: + self._input_tokens += int(input_tok) + if output_tok is not None: + self._output_tokens += int(output_tok) + if cache_read is not None: + self._cache_read_tokens += int(cache_read) + if cache_write is not None: + self._cache_creation_tokens += int(cache_write) + + # Copilot-specific vendor attributes + cost = getattr(data, "cost", None) + if cost is not None: + self._turn_cost = (self._turn_cost or 0) + cost + duration = getattr(data, "duration", None) + if duration is not None: + self._turn_server_duration = (self._turn_server_duration or 0) + duration + initiator = getattr(data, "initiator", None) + if initiator: + self._turn_initiator = initiator + copilot_usage = getattr(data, "copilot_usage", None) + if copilot_usage: + nano_aiu = getattr(copilot_usage, "total_nano_aiu", None) + if nano_aiu is not None: + self._turn_aiu = (self._turn_aiu or 0) + nano_aiu + + elif event.type == SessionEventType.SESSION_MODEL_CHANGE: + new_model = getattr(event.data, "new_model", None) + if new_model: + self._response_model = new_model + + elif event.type == SessionEventType.TOOL_EXECUTION_START: + data = event.data + mcp_server_name = getattr(data, "mcp_server_name", None) + tool_call_id = getattr(data, "tool_call_id", None) + tool_name = getattr(data, "tool_name", None) + arguments = getattr(data, "arguments", None) + is_server_tool = bool(mcp_server_name) + + if is_server_tool and tool_call_id: + if self._server_tool_call_ids is None: + self._server_tool_call_ids = {} + self._server_tool_call_ids[tool_call_id] = str(mcp_server_name) + + if self._output_messages is not None: + self._output_messages.append( + OtelMsg( + role="assistant", + parts=[ + OtelPart( + type="server_tool_call" if is_server_tool else "tool_call", + id=tool_call_id, + name=tool_name, + arguments=arguments, + mcp_server_name=mcp_server_name, + ) + ], + ) + ) + + # Store parent context for tool call span parenting. + if self._agent_span is not None and tool_call_id: + if self._pending_tool_parents is None: + self._pending_tool_parents = {} + self._pending_tool_parents[tool_call_id] = self._agent_span_context + + elif event.type == SessionEventType.TOOL_EXECUTION_COMPLETE: + data = event.data + tool_call_id = getattr(data, "tool_call_id", None) + result_obj = getattr(data, "result", None) + error_obj = getattr(data, "error", None) + result_content = None + if result_obj is not None: + result_content = getattr(result_obj, "content", None) + if result_content is None and error_obj is not None: + if isinstance(error_obj, str): + result_content = error_obj + else: + result_content = getattr(error_obj, "message", None) + + server_name: str | None = None + is_server_tool = False + if tool_call_id and self._server_tool_call_ids: + is_server_tool = tool_call_id in self._server_tool_call_ids + if is_server_tool: + server_name = self._server_tool_call_ids.pop(tool_call_id, None) + + if self._input_messages is not None: + self._input_messages.append( + OtelMsg( + role="tool", + parts=[ + OtelPart( + type="server_tool_call_response" + if is_server_tool + else "tool_call_response", + id=tool_call_id, + response=result_content, + mcp_server_name=server_name, + ) + ], + ) + ) + + # Copilot-specific lifecycle events emitted as span events on the + # current activity (chat turn if active, otherwise invoke_agent). + target = self._turn_span or self._agent_span + if target is not None: + if event.type == SessionEventType.SESSION_TRUNCATION: + data = event.data + attrs: dict[str, Any] = {} + token_limit = getattr(data, "token_limit", None) + if token_limit is not None: + attrs[COPILOT_EVENT_ATTR_TOKEN_LIMIT] = int(token_limit) + pre_tokens = getattr(data, "pre_truncation_tokens_in_messages", None) + if pre_tokens is not None: + attrs[COPILOT_EVENT_ATTR_PRE_TOKENS] = int(pre_tokens) + post_tokens = getattr(data, "post_truncation_tokens_in_messages", None) + if post_tokens is not None: + attrs[COPILOT_EVENT_ATTR_POST_TOKENS] = int(post_tokens) + pre_messages = getattr(data, "pre_truncation_messages_length", None) + if pre_messages is not None: + attrs[COPILOT_EVENT_ATTR_PRE_MESSAGES] = int(pre_messages) + post_messages = getattr(data, "post_truncation_messages_length", None) + if post_messages is not None: + attrs[COPILOT_EVENT_ATTR_POST_MESSAGES] = int(post_messages) + tokens_removed = getattr(data, "tokens_removed_during_truncation", None) + if tokens_removed is not None: + attrs[COPILOT_EVENT_ATTR_TOKENS_REMOVED] = int(tokens_removed) + messages_removed = getattr(data, "messages_removed_during_truncation", None) + if messages_removed is not None: + attrs[COPILOT_EVENT_ATTR_MESSAGES_REMOVED] = int(messages_removed) + performed_by = getattr(data, "performed_by", None) + if performed_by is not None: + attrs[COPILOT_EVENT_ATTR_PERFORMED_BY] = performed_by + target.add_event(COPILOT_EVENT_SESSION_TRUNCATION, attrs) + + elif event.type == SessionEventType.SESSION_COMPACTION_START: + target.add_event(COPILOT_EVENT_SESSION_COMPACTION_START) + + elif event.type == SessionEventType.SESSION_COMPACTION_COMPLETE: + data = event.data + attrs: dict[str, Any] = { + COPILOT_EVENT_ATTR_SUCCESS: getattr(data, "success", False), + } + if self._telemetry.enable_sensitive_data: + error_val = getattr(data, "error", None) + if error_val is not None: + if isinstance(error_val, str): + attrs[COPILOT_EVENT_ATTR_MESSAGE] = error_val + elif hasattr(error_val, "message"): + attrs[COPILOT_EVENT_ATTR_MESSAGE] = error_val.message + pre_tokens = getattr(data, "pre_compaction_tokens", None) + if pre_tokens is not None: + attrs[COPILOT_EVENT_ATTR_PRE_TOKENS] = int(pre_tokens) + post_tokens = getattr(data, "post_compaction_tokens", None) + if post_tokens is not None: + attrs[COPILOT_EVENT_ATTR_POST_TOKENS] = int(post_tokens) + tokens_removed = getattr(data, "tokens_removed", None) + if tokens_removed is not None: + attrs[COPILOT_EVENT_ATTR_TOKENS_REMOVED] = int(tokens_removed) + messages_removed = getattr(data, "messages_removed", None) + if messages_removed is not None: + attrs[COPILOT_EVENT_ATTR_MESSAGES_REMOVED] = int(messages_removed) + target.add_event(COPILOT_EVENT_SESSION_COMPACTION_COMPLETE, attrs) + + elif event.type == SessionEventType.SKILL_INVOKED: + data = event.data + skill_attrs: dict[str, str] = { + COPILOT_EVENT_ATTR_SKILL_NAME: getattr(data, "name", "") or "", + COPILOT_EVENT_ATTR_SKILL_PATH: getattr(data, "path", "") or "", + } + plugin_name = getattr(data, "plugin_name", None) + if plugin_name is not None: + skill_attrs[COPILOT_EVENT_ATTR_SKILL_PLUGIN_NAME] = plugin_name + plugin_version = getattr(data, "plugin_version", None) + if plugin_version is not None: + skill_attrs[COPILOT_EVENT_ATTR_SKILL_PLUGIN_VERSION] = plugin_version + if self._telemetry.enable_sensitive_data: + skill_content = getattr(data, "content", None) + if skill_content is not None: + skill_attrs[COPILOT_EVENT_ATTR_SKILL_CONTENT] = skill_content + target.add_event(COPILOT_EVENT_SKILL_INVOKED, skill_attrs) + + # Lifecycle events that start/end turns or the overall agent operation. + if event.type == SessionEventType.ASSISTANT_TURN_START: + self._begin_chat_turn() + self._turn_id = getattr(event.data, "turn_id", None) + self._turn_interaction_id = getattr(event.data, "interaction_id", None) + + elif event.type == SessionEventType.ASSISTANT_TURN_END: + self._complete_chat_turn(None) + + elif event.type == SessionEventType.SESSION_IDLE: + self._complete_chat_turn(None) + self._complete_agent_turn(None) + + elif event.type == SessionEventType.SESSION_ERROR: + msg = getattr(event.data, "message", None) or "unknown error" + ex = SessionTurnError(f"Session error: {msg}") + self._complete_chat_turn(ex) + self._complete_agent_turn(ex) + + # ======================================================================== + # Chat turn lifecycle + # ======================================================================== + + def _begin_chat_turn(self) -> None: + """Starts a new chat child span for an LLM turn.""" + # If there's already an active turn, complete it first. + self._complete_chat_turn(None) + + self._response_model = None + self._response_id = None + self._input_tokens = 0 + self._output_tokens = 0 + self._cache_read_tokens = 0 + self._cache_creation_tokens = 0 + self._first_output_chunk_recorded = False + self._last_output_chunk_time = 0.0 + self._input_messages = [] + self._output_messages = [] + self._turn_cost = None + self._turn_server_duration = None + self._turn_initiator = None + self._turn_aiu = None + self._turn_id = None + self._turn_interaction_id = None + + # Add stashed user prompt as input message for the first turn. + if self._pending_user_prompt: + self._input_messages.append( + OtelMsg( + role="user", + parts=[OtelPart(type="text", content=self._pending_user_prompt)], + ) + ) + self._pending_user_prompt = None + + parent_context = self._agent_span_context or otel_context.get_current() + self._turn_span = self._telemetry.start_chat_span( + self._request_model, + self._provider_name, + self._server_address, + self._server_port, + parent_context, + self._session_id, + ) + self._turn_start_time = time.monotonic() + + def _complete_chat_turn(self, error: Exception | None) -> None: + """Completes the current chat child span with per-turn attributes and metrics.""" + span = self._turn_span + if span is None: + return + + start_time = self._turn_start_time + input_messages = self._input_messages + output_messages = self._output_messages + response_model = self._response_model + response_id = self._response_id + input_tokens = self._input_tokens + output_tokens = self._output_tokens + cache_read_tokens = self._cache_read_tokens + cache_creation_tokens = self._cache_creation_tokens + turn_cost = self._turn_cost + turn_server_duration = self._turn_server_duration + turn_initiator = self._turn_initiator + turn_aiu = self._turn_aiu + turn_id = self._turn_id + turn_interaction_id = self._turn_interaction_id + + # Reset per-turn state + self._turn_span = None + self._turn_start_time = None + self._input_messages = None + self._output_messages = None + self._response_model = None + self._response_id = None + self._input_tokens = 0 + self._output_tokens = 0 + self._cache_read_tokens = 0 + self._cache_creation_tokens = 0 + self._turn_cost = None + self._turn_server_duration = None + self._turn_initiator = None + self._turn_aiu = None + self._turn_id = None + self._turn_interaction_id = None + + if error is not None: + CopilotTelemetry.record_error(span, error) + + finish_reason = "error" if error is not None else "stop" + span.set_attribute(ATTR_GEN_AI_RESPONSE_FINISH_REASONS, [finish_reason]) + + # Agent-level output = only the final turn's output. + if output_messages: + self._agent_output_messages = [ + OtelMsg(role=m.role, parts=m.parts, finish_reason=finish_reason) + for m in output_messages + ] + + # Accumulate agent-level usage across turns. + if response_model: + self._agent_response_model = response_model + if response_id: + self._agent_response_id = response_id + self._agent_total_input_tokens += input_tokens + self._agent_total_output_tokens += output_tokens + self._agent_total_cache_read_tokens += cache_read_tokens + self._agent_total_cache_creation_tokens += cache_creation_tokens + if turn_cost is not None: + self._agent_total_cost += turn_cost + if turn_aiu is not None: + self._agent_total_aiu += turn_aiu + + # Set usage-related span attributes for this LLM turn + if response_model: + span.set_attribute(ATTR_GEN_AI_RESPONSE_MODEL, response_model) + if response_id: + span.set_attribute(ATTR_GEN_AI_RESPONSE_ID, response_id) + if input_tokens > 0: + span.set_attribute(ATTR_GEN_AI_USAGE_INPUT_TOKENS, input_tokens) + if output_tokens > 0: + span.set_attribute(ATTR_GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens) + if cache_read_tokens > 0: + span.set_attribute(ATTR_GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens) + if cache_creation_tokens > 0: + span.set_attribute(ATTR_GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS, cache_creation_tokens) + + # Copilot vendor-prefixed attributes on chat spans + if turn_cost is not None: + span.set_attribute(ATTR_COPILOT_COST, turn_cost) + if turn_server_duration is not None: + span.set_attribute(ATTR_COPILOT_SERVER_DURATION, turn_server_duration) + if turn_initiator is not None: + span.set_attribute(ATTR_COPILOT_INITIATOR, turn_initiator) + if turn_aiu is not None: + span.set_attribute(ATTR_COPILOT_AIU, turn_aiu) + if turn_id is not None: + span.set_attribute(ATTR_COPILOT_TURN_ID, turn_id) + if turn_interaction_id is not None: + span.set_attribute(ATTR_COPILOT_INTERACTION_ID, turn_interaction_id) + + # Set input/output message content as span attributes (sensitive) + if self._telemetry.enable_sensitive_data: + input_json = _build_messages_json(input_messages) + if input_json: + span.set_attribute(ATTR_GEN_AI_INPUT_MESSAGES, input_json) + output_json = _build_messages_json(output_messages, finish_reason) + if output_json: + span.set_attribute(ATTR_GEN_AI_OUTPUT_MESSAGES, output_json) + + # Token usage metrics (per-turn) + self._telemetry.record_token_usage_metrics( + input_tokens if input_tokens > 0 else None, + output_tokens if output_tokens > 0 else None, + self._request_model, + response_model, + self._provider_name, + self._server_address, + self._server_port, + error, + OP_CHAT, + ) + + # Per-turn operation duration + if start_time is not None: + duration = time.monotonic() - start_time + self._telemetry.record_operation_duration( + duration, + self._request_model, + response_model, + self._provider_name, + self._server_address, + self._server_port, + error, + OP_CHAT, + ) + + self._first_output_chunk_recorded = False + self._last_output_chunk_time = 0.0 + span.end() + + # ======================================================================== + # Agent turn lifecycle + # ======================================================================== + + def _complete_agent_turn(self, error: Exception | None) -> None: + """Completes the invoke_agent span and records overall operation duration.""" + span = self._agent_span + if span is None: + return + + start_time = self._agent_start_time + agent_input_messages = self._agent_input_messages + agent_output_messages = self._agent_output_messages + + # Reset agent state + self._agent_span = None + self._agent_span_context = None + self._agent_start_time = None + self._pending_user_prompt = None + self._agent_input_messages = None + self._agent_output_messages = None + + # Complete any remaining subagents before closing the parent. + if self._active_subagents: + for key in list(self._active_subagents.keys()): + self._complete_subagent(key, error) + self._active_subagents = None + self._pending_tool_parents = None + self._server_tool_call_ids = None + + if error is not None: + CopilotTelemetry.record_error(span, error) + + finish_reason = "error" if error is not None else "stop" + span.set_attribute(ATTR_GEN_AI_RESPONSE_FINISH_REASONS, [finish_reason]) + + # Set accumulated usage across all chat turns on the invoke_agent span. + agent_response_model = self._agent_response_model + agent_response_id = self._agent_response_id + agent_total_input_tokens = self._agent_total_input_tokens + agent_total_output_tokens = self._agent_total_output_tokens + agent_total_cache_read_tokens = self._agent_total_cache_read_tokens + agent_total_cache_creation_tokens = self._agent_total_cache_creation_tokens + agent_total_cost = self._agent_total_cost + agent_total_aiu = self._agent_total_aiu + + self._agent_response_model = None + self._agent_response_id = None + self._agent_total_input_tokens = 0 + self._agent_total_output_tokens = 0 + self._agent_total_cache_read_tokens = 0 + self._agent_total_cache_creation_tokens = 0 + self._agent_total_cost = 0 + self._agent_total_aiu = 0 + + if agent_response_model: + span.set_attribute(ATTR_GEN_AI_RESPONSE_MODEL, agent_response_model) + if agent_response_id: + span.set_attribute(ATTR_GEN_AI_RESPONSE_ID, agent_response_id) + if agent_total_input_tokens > 0: + span.set_attribute(ATTR_GEN_AI_USAGE_INPUT_TOKENS, agent_total_input_tokens) + if agent_total_output_tokens > 0: + span.set_attribute(ATTR_GEN_AI_USAGE_OUTPUT_TOKENS, agent_total_output_tokens) + if agent_total_cache_read_tokens > 0: + span.set_attribute( + ATTR_GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, agent_total_cache_read_tokens + ) + if agent_total_cache_creation_tokens > 0: + span.set_attribute( + ATTR_GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS, agent_total_cache_creation_tokens + ) + + # Copilot vendor-prefixed attributes on invoke_agent span + if agent_total_cost > 0: + span.set_attribute(ATTR_COPILOT_COST, agent_total_cost) + if agent_total_aiu > 0: + span.set_attribute(ATTR_COPILOT_AIU, agent_total_aiu) + + # Agent-level input = caller's message; output = agent's final response. + if self._telemetry.enable_sensitive_data: + input_json = _build_messages_json(agent_input_messages) + if input_json: + span.set_attribute(ATTR_GEN_AI_INPUT_MESSAGES, input_json) + output_json = _build_messages_json(agent_output_messages) + if output_json: + span.set_attribute(ATTR_GEN_AI_OUTPUT_MESSAGES, output_json) + if self._system_instructions_json: + span.set_attribute(ATTR_GEN_AI_SYSTEM_INSTRUCTIONS, self._system_instructions_json) + if self._tool_definitions_json: + span.set_attribute(ATTR_GEN_AI_TOOL_DEFINITIONS, self._tool_definitions_json) + + if start_time is not None: + duration = time.monotonic() - start_time + self._telemetry.record_operation_duration( + duration, + self._request_model, + agent_response_model, + self._provider_name, + self._server_address, + self._server_port, + error, + OP_INVOKE_AGENT, + ) + + span.end() + + # ======================================================================== + # Streaming chunk timing + # ======================================================================== + + def _record_output_chunk_metric(self) -> None: + """Records streaming chunk timing metrics. Uses the chat turn stopwatch.""" + if not self._is_streaming or self._turn_start_time is None: + return + + now = time.monotonic() + elapsed = now - self._turn_start_time + + if not self._first_output_chunk_recorded: + self._first_output_chunk_recorded = True + self._last_output_chunk_time = now + self._telemetry.record_time_to_first_chunk( + elapsed, + self._request_model, + None, # response model not yet known during streaming + self._provider_name, + self._server_address, + self._server_port, + ) + return + + delta = now - self._last_output_chunk_time + self._last_output_chunk_time = now + self._telemetry.record_time_per_output_chunk( + delta, + self._request_model, + None, # response model not yet known during streaming + self._provider_name, + self._server_address, + self._server_port, + ) + + # ======================================================================== + # Subagent support + # ======================================================================== + + def _begin_subagent(self, event: SessionEvent) -> None: + """Creates nested invoke_agent + chat spans for a subagent.""" + data = event.data + if data is None: + return + + tool_call_id = getattr(data, "tool_call_id", None) + if not tool_call_id: + return + + agent_name = getattr(data, "agent_name", None) + agent_description = getattr(data, "agent_description", None) + + parent_context = self._agent_span_context or otel_context.get_current() + invoke_span, invoke_ctx = self._telemetry.start_invoke_agent_span( + self._session_id, + self._request_model, + self._provider_name, + self._server_address, + self._server_port, + agent_name, + agent_description, + parent_context, + ) + + chat_span = self._telemetry.start_chat_span( + self._request_model, + self._provider_name, + self._server_address, + self._server_port, + invoke_ctx, + self._session_id, + ) + + state = _SubagentState( + invoke_agent_span=invoke_span, + invoke_agent_context=invoke_ctx, + invoke_agent_start_time=time.monotonic(), + chat_span=chat_span, + agent_name=agent_name, + ) + + if self._active_subagents is None: + self._active_subagents = {} + self._active_subagents[tool_call_id] = state + + def _process_subagent_event(self, subagent: _SubagentState, event: SessionEvent) -> None: + """Routes an event to its owning subagent's spans.""" + if event.type == SessionEventType.ASSISTANT_USAGE: + data = event.data + model = getattr(data, "model", None) + subagent.response_model = model + + if model: + if subagent.chat_span is not None: + subagent.chat_span.set_attribute(ATTR_GEN_AI_RESPONSE_MODEL, model) + subagent.invoke_agent_span.set_attribute(ATTR_GEN_AI_RESPONSE_MODEL, model) + + api_call_id = getattr(data, "api_call_id", None) + provider_call_id = getattr(data, "provider_call_id", None) + if api_call_id: + subagent.response_id = api_call_id + elif provider_call_id: + subagent.response_id = provider_call_id + input_tok = getattr(data, "input_tokens", None) + output_tok = getattr(data, "output_tokens", None) + cache_read = getattr(data, "cache_read_tokens", None) + cache_write = getattr(data, "cache_write_tokens", None) + if input_tok is not None: + subagent.input_tokens += int(input_tok) + if output_tok is not None: + subagent.output_tokens += int(output_tok) + if cache_read is not None: + subagent.cache_read_tokens += int(cache_read) + if cache_write is not None: + subagent.cache_creation_tokens += int(cache_write) + + elif event.type == SessionEventType.ASSISTANT_MESSAGE: + data = event.data + parts: list[OtelPart] = [] + reasoning_text = getattr(data, "reasoning_text", None) + if reasoning_text: + parts.append(OtelPart(type="reasoning", content=reasoning_text)) + content = getattr(data, "content", None) + if content: + parts.append(OtelPart(type="text", content=content)) + if parts: + subagent.output_messages.append(OtelMsg(role="assistant", parts=parts)) + + elif event.type == SessionEventType.TOOL_EXECUTION_START: + data = event.data + mcp_server_name = getattr(data, "mcp_server_name", None) + tool_call_id = getattr(data, "tool_call_id", None) + is_server_tool = bool(mcp_server_name) + if is_server_tool and tool_call_id: + if self._server_tool_call_ids is None: + self._server_tool_call_ids = {} + self._server_tool_call_ids[tool_call_id] = str(mcp_server_name) + + subagent.output_messages.append( + OtelMsg( + role="assistant", + parts=[ + OtelPart( + type="server_tool_call" if is_server_tool else "tool_call", + id=tool_call_id, + name=getattr(data, "tool_name", None), + arguments=getattr(data, "arguments", None), + mcp_server_name=mcp_server_name, + ) + ], + ) + ) + + # For subagent tool calls, parent is the subagent's invoke_agent. + if tool_call_id: + if self._pending_tool_parents is None: + self._pending_tool_parents = {} + self._pending_tool_parents[tool_call_id] = subagent.invoke_agent_context + + elif event.type == SessionEventType.TOOL_EXECUTION_COMPLETE: + data = event.data + tool_call_id = getattr(data, "tool_call_id", None) + result_obj = getattr(data, "result", None) + error_obj = getattr(data, "error", None) + result_content = None + if result_obj is not None: + result_content = getattr(result_obj, "content", None) + if result_content is None and error_obj is not None: + if isinstance(error_obj, str): + result_content = error_obj + else: + result_content = getattr(error_obj, "message", None) + + server_name: str | None = None + is_server_tool = False + if tool_call_id and self._server_tool_call_ids: + is_server_tool = tool_call_id in self._server_tool_call_ids + if is_server_tool: + server_name = self._server_tool_call_ids.pop(tool_call_id, None) + + subagent.input_messages.append( + OtelMsg( + role="tool", + parts=[ + OtelPart( + type="server_tool_call_response" + if is_server_tool + else "tool_call_response", + id=tool_call_id, + response=result_content, + mcp_server_name=server_name, + ) + ], + ) + ) + + def _complete_subagent(self, tool_call_id: str, error: Exception | None) -> None: + """Completes a subagent's chat and invoke_agent spans.""" + if not self._active_subagents: + return + subagent = self._active_subagents.pop(tool_call_id, None) + if subagent is None: + return + + finish_reason = "error" if error else "stop" + + # -- Complete the chat child span -- + chat_span = subagent.chat_span + if chat_span is not None: + if error: + CopilotTelemetry.record_error(chat_span, error) + chat_span.set_attribute(ATTR_GEN_AI_RESPONSE_FINISH_REASONS, [finish_reason]) + + if subagent.response_model: + chat_span.set_attribute(ATTR_GEN_AI_RESPONSE_MODEL, subagent.response_model) + if subagent.response_id: + chat_span.set_attribute(ATTR_GEN_AI_RESPONSE_ID, subagent.response_id) + if subagent.input_tokens > 0: + chat_span.set_attribute(ATTR_GEN_AI_USAGE_INPUT_TOKENS, subagent.input_tokens) + if subagent.output_tokens > 0: + chat_span.set_attribute(ATTR_GEN_AI_USAGE_OUTPUT_TOKENS, subagent.output_tokens) + if subagent.cache_read_tokens > 0: + chat_span.set_attribute( + ATTR_GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, subagent.cache_read_tokens + ) + if subagent.cache_creation_tokens > 0: + chat_span.set_attribute( + ATTR_GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS, subagent.cache_creation_tokens + ) + + if self._telemetry.enable_sensitive_data: + if subagent.input_messages: + input_json = _build_messages_json(subagent.input_messages) + if input_json: + chat_span.set_attribute(ATTR_GEN_AI_INPUT_MESSAGES, input_json) + if subagent.output_messages: + output_json = _build_messages_json(subagent.output_messages, finish_reason) + if output_json: + chat_span.set_attribute(ATTR_GEN_AI_OUTPUT_MESSAGES, output_json) + + self._telemetry.record_token_usage_metrics( + subagent.input_tokens if subagent.input_tokens > 0 else None, + subagent.output_tokens if subagent.output_tokens > 0 else None, + subagent.response_model or self._request_model, + subagent.response_model, + self._provider_name, + self._server_address, + self._server_port, + error, + OP_CHAT, + ) + + chat_span.end() + + # -- Complete the invoke_agent span -- + invoke_span = subagent.invoke_agent_span + if error: + CopilotTelemetry.record_error(invoke_span, error) + invoke_span.set_attribute(ATTR_GEN_AI_RESPONSE_FINISH_REASONS, [finish_reason]) + + if subagent.response_model: + invoke_span.set_attribute(ATTR_GEN_AI_RESPONSE_MODEL, subagent.response_model) + if subagent.response_id: + invoke_span.set_attribute(ATTR_GEN_AI_RESPONSE_ID, subagent.response_id) + if subagent.input_tokens > 0: + invoke_span.set_attribute(ATTR_GEN_AI_USAGE_INPUT_TOKENS, subagent.input_tokens) + if subagent.output_tokens > 0: + invoke_span.set_attribute(ATTR_GEN_AI_USAGE_OUTPUT_TOKENS, subagent.output_tokens) + if subagent.cache_read_tokens > 0: + invoke_span.set_attribute( + ATTR_GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, subagent.cache_read_tokens + ) + if subagent.cache_creation_tokens > 0: + invoke_span.set_attribute( + ATTR_GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS, subagent.cache_creation_tokens + ) + + if self._telemetry.enable_sensitive_data and subagent.output_messages: + output_json = _build_messages_json( + [ + OtelMsg(role=m.role, parts=m.parts, finish_reason=finish_reason) + for m in subagent.output_messages + ] + ) + if output_json: + invoke_span.set_attribute(ATTR_GEN_AI_OUTPUT_MESSAGES, output_json) + + duration = time.monotonic() - subagent.invoke_agent_start_time + self._telemetry.record_operation_duration( + duration, + subagent.response_model or self._request_model, + subagent.response_model, + self._provider_name, + self._server_address, + self._server_port, + error, + OP_INVOKE_AGENT, + ) + + invoke_span.end() diff --git a/python/copilot/client.py b/python/copilot/client.py index 60e0d0264..9fa60e4b4 100644 --- a/python/copilot/client.py +++ b/python/copilot/client.py @@ -19,11 +19,13 @@ import subprocess import sys import threading +import time from collections.abc import Callable from dataclasses import asdict, is_dataclass from pathlib import Path from typing import Any, cast +from ._copilot_telemetry import OP_EXECUTE_TOOL, CopilotTelemetry from .generated.rpc import ServerRpc from .generated.session_events import session_event_from_dict from .jsonrpc import JsonRpcClient, ProcessExitedError @@ -212,6 +214,13 @@ def __init__(self, options: CopilotClientOptions | None = None): self._lifecycle_handlers_lock = threading.Lock() self._rpc: ServerRpc | None = None + # Telemetry — only created when config is provided (opt-in) + telemetry_config = opts.get("telemetry") + if telemetry_config is not None: + self._telemetry: Any | None = CopilotTelemetry(telemetry_config) + else: + self._telemetry = None + @property def rpc(self) -> ServerRpc: """Typed server-scoped RPC methods.""" @@ -579,7 +588,19 @@ async def create_session(self, config: SessionConfig) -> CopilotSession: session_id = response["sessionId"] workspace_path = response.get("workspacePath") - session = CopilotSession(session_id, self._client, workspace_path) + session = CopilotSession( + session_id, + self._client, + workspace_path, + self._telemetry, + model=cfg.get("model"), + provider=cfg.get("provider"), + system_message=cfg.get("system_message"), + tools=tools, + streaming=bool(cfg.get("streaming")), + agent_name=cfg.get("agent_name"), + agent_description=cfg.get("agent_description"), + ) session._register_tools(tools) session._register_permission_handler(on_permission_request) if on_user_input_request: @@ -761,7 +782,19 @@ async def resume_session(self, session_id: str, config: ResumeSessionConfig) -> resumed_session_id = response["sessionId"] workspace_path = response.get("workspacePath") - session = CopilotSession(resumed_session_id, self._client, workspace_path) + session = CopilotSession( + resumed_session_id, + self._client, + workspace_path, + self._telemetry, + model=cfg.get("model"), + provider=cfg.get("provider"), + system_message=cfg.get("system_message"), + tools=cfg.get("tools"), + streaming=bool(cfg.get("streaming")), + agent_name=cfg.get("agent_name"), + agent_description=cfg.get("agent_description"), + ) session._register_tools(cfg.get("tools")) session._register_permission_handler(on_permission_request) if on_user_input_request: @@ -1560,6 +1593,28 @@ async def _execute_tool_call( Returns: A ToolResult containing the execution result or error. """ + # Telemetry: start execute_tool span + telemetry = self._telemetry + span = None + start_time = None + with self._sessions_lock: + session = self._sessions.get(session_id) + tool_description = session._get_tool_description(tool_name) if session else None + + if telemetry is not None: + parent_ctx = ( + session.get_telemetry_tool_call_parent_context(tool_call_id) if session else None + ) + span = telemetry.start_execute_tool_span( + tool_name, + tool_call_id, + tool_description, + arguments, + parent_context=parent_ctx, + ) + start_time = time.monotonic() + + operation_error: Exception | None = None invocation: ToolInvocation = { "session_id": session_id, "tool_call_id": tool_call_id, @@ -1572,6 +1627,9 @@ async def _execute_tool_call( if inspect.isawaitable(result): result = await result except Exception as exc: # pylint: disable=broad-except + operation_error = exc + if span is not None and telemetry is not None: + telemetry.record_error(span, exc) # Don't expose detailed error information to the LLM for security reasons. # The actual error is stored in the 'error' field for debugging. result = ToolResult( @@ -1590,6 +1648,23 @@ async def _execute_tool_call( toolTelemetry={}, ) + # Telemetry: record tool result and finish span + if span is not None and telemetry is not None: + if operation_error is None: + telemetry.set_execute_tool_result(span, result) + if start_time is not None and session is not None: + telemetry.record_operation_duration( + time.monotonic() - start_time, + request_model=None, + response_model=None, + provider_name=session.telemetry_provider_name, + server_address=session.telemetry_server_address, + server_port=session.telemetry_server_port, + error=operation_error, + operation_name=OP_EXECUTE_TOOL, + ) + span.end() + return self._normalize_tool_result(cast(ToolResult, result)) def _normalize_tool_result(self, result: ToolResult) -> ToolResult: diff --git a/python/copilot/session.py b/python/copilot/session.py index a02dcf1e9..fc72ac5a8 100644 --- a/python/copilot/session.py +++ b/python/copilot/session.py @@ -11,6 +11,7 @@ from collections.abc import Callable from typing import Any, cast +from ._copilot_telemetry import AgentTurnTracker from .generated.rpc import SessionRpc from .generated.session_events import SessionEvent, SessionEventType, session_event_from_dict from .types import ( @@ -56,7 +57,20 @@ class CopilotSession: ... unsubscribe() """ - def __init__(self, session_id: str, client: Any, workspace_path: str | None = None): + def __init__( + self, + session_id: str, + client: Any, + workspace_path: str | None = None, + telemetry: Any | None = None, + model: str | None = None, + provider: Any | None = None, + system_message: Any | None = None, + tools: list | None = None, + streaming: bool = False, + agent_name: str | None = None, + agent_description: str | None = None, + ): """ Initialize a new CopilotSession. @@ -69,6 +83,14 @@ def __init__(self, session_id: str, client: Any, workspace_path: str | None = No client: The internal client connection to the Copilot CLI. workspace_path: Path to the session workspace directory (when infinite sessions enabled). + telemetry: Optional CopilotTelemetry instance for instrumentation. + model: Model name for telemetry context. + provider: Provider config for telemetry context. + system_message: System message config for telemetry context. + tools: Tool definitions for telemetry context. + streaming: Whether streaming is enabled for telemetry context. + agent_name: Optional agent name for telemetry attribution. + agent_description: Optional agent description for telemetry attribution. """ self.session_id = session_id self._client = client @@ -77,6 +99,7 @@ def __init__(self, session_id: str, client: Any, workspace_path: str | None = No self._event_handlers_lock = threading.Lock() self._tool_handlers: dict[str, ToolHandler] = {} self._tool_handlers_lock = threading.Lock() + self._tool_descriptions: dict[str, str] = {} self._permission_handler: _PermissionHandlerFn | None = None self._permission_handler_lock = threading.Lock() self._user_input_handler: UserInputHandler | None = None @@ -85,6 +108,24 @@ def __init__(self, session_id: str, client: Any, workspace_path: str | None = No self._hooks_lock = threading.Lock() self._rpc: SessionRpc | None = None + # Telemetry — tracker encapsulates per-turn state and logic + self._telemetry = telemetry # CopilotTelemetry | None (kept for client.py access) + self._turn_tracker: AgentTurnTracker | None = ( + AgentTurnTracker( + telemetry, + session_id, + model=model, + provider=provider, + system_message=system_message, + tools=tools, + streaming=streaming, + agent_name=agent_name, + agent_description=agent_description, + ) + if telemetry + else None + ) + @property def rpc(self) -> SessionRpc: """Typed session-scoped RPC methods.""" @@ -102,6 +143,31 @@ def workspace_path(self) -> str | None: """ return self._workspace_path + @property + def telemetry_provider_name(self) -> str: + """Provider name for telemetry metrics (internal).""" + return self._turn_tracker.provider_name if self._turn_tracker else "github" + + @property + def telemetry_server_address(self) -> str | None: + """Server address for telemetry metrics (internal).""" + return self._turn_tracker.server_address if self._turn_tracker else None + + @property + def telemetry_server_port(self) -> int | None: + """Server port for telemetry metrics (internal).""" + return self._turn_tracker.server_port if self._turn_tracker else None + + def get_telemetry_tool_call_parent_context(self, tool_call_id: str) -> Any | None: + """Get the parent context for a tool call span (internal). + + Uses tool-call-specific context when available (e.g. subagent context), + otherwise falls back to the root invoke_agent context. + """ + if self._turn_tracker is not None: + return self._turn_tracker.get_tool_call_parent_context(tool_call_id) + return None + async def send(self, options: MessageOptions) -> str: """ Send a message to this session and wait for the response. @@ -126,16 +192,25 @@ async def send(self, options: MessageOptions) -> str: ... "attachments": [{"type": "file", "path": "./src/main.py"}] ... }) """ - response = await self._client.request( - "session.send", - { - "sessionId": self.session_id, - "prompt": options["prompt"], - "attachments": options.get("attachments"), - "mode": options.get("mode"), - }, - ) - return response["messageId"] + # Start or continue telemetry span for this turn + if self._turn_tracker is not None: + self._turn_tracker.begin_send(options["prompt"]) + + try: + response = await self._client.request( + "session.send", + { + "sessionId": self.session_id, + "prompt": options["prompt"], + "attachments": options.get("attachments"), + "mode": options.get("mode"), + }, + ) + return response["messageId"] + except Exception as exc: + if self._turn_tracker is not None: + self._turn_tracker.complete_turn_with_error(exc) + raise async def send_and_wait( self, options: MessageOptions, timeout: float | None = None @@ -192,7 +267,16 @@ def handler(event: SessionEventTypeAlias) -> None: raise error_event return last_assistant_message except TimeoutError: - raise TimeoutError(f"Timeout after {effective_timeout}s waiting for session.idle") + ex = TimeoutError(f"Timeout after {effective_timeout}s waiting for session.idle") + # Complete telemetry spans on timeout (idempotent if already completed). + if self._turn_tracker is not None: + self._turn_tracker.complete_turn_with_error(ex) + raise ex + except Exception as ex: + # Complete telemetry spans on error (idempotent if already completed). + if self._turn_tracker is not None: + self._turn_tracker.complete_turn_with_error(ex) + raise finally: unsubscribe() @@ -242,6 +326,10 @@ def _dispatch_event(self, event: SessionEvent) -> None: Args: event: The session event to dispatch to all handlers. """ + # Telemetry enrichment before user handlers + if self._turn_tracker is not None: + self._turn_tracker.process_event(event) + with self._event_handlers_lock: handlers = list(self._event_handlers) @@ -268,12 +356,15 @@ def _register_tools(self, tools: list[Tool] | None) -> None: """ with self._tool_handlers_lock: self._tool_handlers.clear() + self._tool_descriptions.clear() if not tools: return for tool in tools: if not tool.name or not tool.handler: continue self._tool_handlers[tool.name] = tool.handler + if tool.description: + self._tool_descriptions[tool.name] = tool.description def _get_tool_handler(self, name: str) -> ToolHandler | None: """ @@ -292,6 +383,11 @@ def _get_tool_handler(self, name: str) -> ToolHandler | None: with self._tool_handlers_lock: return self._tool_handlers.get(name) + def _get_tool_description(self, name: str) -> str | None: + """Retrieve a registered tool description by name (internal).""" + with self._tool_handlers_lock: + return self._tool_descriptions.get(name) + def _register_permission_handler(self, handler: _PermissionHandlerFn | None) -> None: """ Register a handler for permission requests. @@ -489,6 +585,10 @@ async def destroy(self) -> None: >>> # Clean up when done >>> await session.destroy() """ + # Close any open telemetry spans before destroying + if self._turn_tracker is not None: + self._turn_tracker.complete_on_dispose() + await self._client.request("session.destroy", {"sessionId": self.session_id}) with self._event_handlers_lock: self._event_handlers.clear() diff --git a/python/copilot/types.py b/python/copilot/types.py index b0e52cd3a..eacd1845e 100644 --- a/python/copilot/types.py +++ b/python/copilot/types.py @@ -66,6 +66,26 @@ class SelectionAttachment(TypedDict): Attachment = FileAttachment | DirectoryAttachment | SelectionAttachment +# Telemetry configuration +class TelemetryConfig(TypedDict, total=False): + """Configuration for OpenTelemetry instrumentation. + + When provided in CopilotClientOptions, enables OpenTelemetry GenAI semantic + convention instrumentation for traces and metrics. + + Attributes: + enable_sensitive_data: Whether to include potentially sensitive data + (message content, tool arguments/results) in telemetry. When not + set, defaults to the OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT + env var, else False. + source_name: The name used for the tracer and meter. Defaults to + "github.copilot.sdk". + """ + + enable_sensitive_data: bool + source_name: str + + # Options for creating a CopilotClient class CopilotClientOptions(TypedDict, total=False): """Options for creating a CopilotClient""" @@ -95,6 +115,9 @@ class CopilotClientOptions(TypedDict, total=False): # When False, only explicit tokens (github_token or environment variables) are used. # Default: True (but defaults to False when github_token is provided) use_logged_in_user: bool + # OpenTelemetry instrumentation configuration. When set, enables GenAI semantic + # convention traces and metrics. When absent, no telemetry is emitted (opt-in). + telemetry: TelemetryConfig ToolResultType = Literal["success", "failure", "rejected", "denied"] @@ -504,6 +527,12 @@ class SessionConfig(TypedDict, total=False): # When enabled (default), sessions automatically manage context limits and persist state. # Set to {"enabled": False} to disable. infinite_sessions: InfiniteSessionConfig + # Name of the agent for telemetry attribution. + # When set, the invoke_agent span includes a gen_ai.agent.name attribute. + agent_name: str + # Description of the agent for telemetry attribution. + # When set, the invoke_agent span includes a gen_ai.agent.description attribute. + agent_description: str # Azure-specific provider options @@ -570,6 +599,10 @@ class ResumeSessionConfig(TypedDict, total=False): # When True, skips emitting the session.resume event. # Useful for reconnecting to a session without triggering resume-related side effects. disable_resume: bool + # Name of the agent for telemetry attribution. + agent_name: str + # Description of the agent for telemetry attribution. + agent_description: str # Options for sending a message to a session diff --git a/python/pyproject.toml b/python/pyproject.toml index 741232e8a..dee45509a 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -25,6 +25,8 @@ classifiers = [ dependencies = [ "python-dateutil>=2.9.0.post0", "pydantic>=2.0", + "typing-extensions>=4.0.0", + "opentelemetry-api>=1.20.0", ] [project.urls] @@ -39,6 +41,7 @@ dev = [ "pytest-asyncio>=0.21.0", "pytest-timeout>=2.0.0", "httpx>=0.24.0", + "opentelemetry-sdk>=1.20.0", ] # Use find with a glob so that the copilot.bin subpackage (created dynamically diff --git a/python/test_opentelemetry.py b/python/test_opentelemetry.py new file mode 100644 index 000000000..592f0ad8c --- /dev/null +++ b/python/test_opentelemetry.py @@ -0,0 +1,1759 @@ +"""Tests for OpenTelemetry GenAI semantic convention instrumentation.""" + +from __future__ import annotations + +import json +import threading +from datetime import UTC +from typing import Any +from unittest.mock import AsyncMock + +import pytest +from opentelemetry import trace +from opentelemetry.sdk.metrics import MeterProvider +from opentelemetry.sdk.metrics.export import InMemoryMetricReader +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter, SpanExportResult + +from copilot._copilot_telemetry import ( + ATTR_COPILOT_AIU, + ATTR_COPILOT_COST, + ATTR_COPILOT_INITIATOR, + ATTR_COPILOT_INTERACTION_ID, + ATTR_COPILOT_SERVER_DURATION, + ATTR_COPILOT_TURN_ID, + ATTR_ERROR_TYPE, + ATTR_GEN_AI_AGENT_DESCRIPTION, + ATTR_GEN_AI_AGENT_ID, + ATTR_GEN_AI_AGENT_NAME, + ATTR_GEN_AI_CONVERSATION_ID, + ATTR_GEN_AI_INPUT_MESSAGES, + ATTR_GEN_AI_OPERATION_NAME, + ATTR_GEN_AI_OUTPUT_MESSAGES, + ATTR_GEN_AI_PROVIDER_NAME, + ATTR_GEN_AI_REQUEST_MODEL, + ATTR_GEN_AI_RESPONSE_FINISH_REASONS, + ATTR_GEN_AI_RESPONSE_MODEL, + ATTR_GEN_AI_SYSTEM_INSTRUCTIONS, + ATTR_GEN_AI_TOOL_CALL_ARGUMENTS, + ATTR_GEN_AI_TOOL_CALL_ID, + ATTR_GEN_AI_TOOL_CALL_RESULT, + ATTR_GEN_AI_TOOL_DEFINITIONS, + ATTR_GEN_AI_TOOL_DESCRIPTION, + ATTR_GEN_AI_TOOL_NAME, + ATTR_GEN_AI_TOOL_TYPE, + ATTR_GEN_AI_USAGE_INPUT_TOKENS, + ATTR_GEN_AI_USAGE_OUTPUT_TOKENS, + ATTR_SERVER_ADDRESS, + ATTR_SERVER_PORT, + COPILOT_EVENT_ATTR_PERFORMED_BY, + COPILOT_EVENT_ATTR_PRE_TOKENS, + COPILOT_EVENT_ATTR_SKILL_CONTENT, + COPILOT_EVENT_ATTR_SKILL_NAME, + COPILOT_EVENT_ATTR_SKILL_PATH, + COPILOT_EVENT_ATTR_SKILL_PLUGIN_NAME, + COPILOT_EVENT_ATTR_SKILL_PLUGIN_VERSION, + COPILOT_EVENT_ATTR_SUCCESS, + COPILOT_EVENT_ATTR_TOKEN_LIMIT, + COPILOT_EVENT_ATTR_TOKENS_REMOVED, + COPILOT_EVENT_SESSION_COMPACTION_COMPLETE, + COPILOT_EVENT_SESSION_COMPACTION_START, + COPILOT_EVENT_SESSION_TRUNCATION, + COPILOT_EVENT_SKILL_INVOKED, + DEFAULT_PROVIDER_NAME, + DEFAULT_SOURCE_NAME, + METRIC_OPERATION_DURATION, + METRIC_TIME_PER_OUTPUT_CHUNK, + METRIC_TIME_TO_FIRST_CHUNK, + METRIC_TOKEN_USAGE, + OP_CHAT, + OP_EXECUTE_TOOL, + OP_INVOKE_AGENT, + CopilotTelemetry, + normalize_provider_name, + parse_server_address, +) +from copilot.generated.session_events import ( + CopilotUsage, + Data, + SessionEvent, + SessionEventType, +) +from copilot.session import CopilotSession +from copilot.types import TelemetryConfig, Tool + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + + +@pytest.fixture() +def _reset_otel_globals(): + """Create isolated OpenTelemetry providers for each test.""" + exporter = _CollectingSpanExporter() + tracer_provider = TracerProvider() + tracer_provider.add_span_processor(SimpleSpanProcessor(exporter)) + + reader = InMemoryMetricReader() + meter_provider = MeterProvider(metric_readers=[reader]) + + yield exporter, reader, tracer_provider, meter_provider + + tracer_provider.shutdown() + meter_provider.shutdown() + + +class _CollectingSpanExporter(SpanExporter): + """Collects finished spans in-memory for test assertions.""" + + def __init__(self): + self._spans: list[Any] = [] + self._lock = threading.Lock() + + def export(self, spans): + with self._lock: + self._spans.extend(spans) + return SpanExportResult.SUCCESS + + def shutdown(self): + pass + + def get_finished_spans(self) -> list[Any]: + with self._lock: + return list(self._spans) + + def clear(self): + with self._lock: + self._spans.clear() + + +def _get_exporter_and_reader(otel_fixture): + exporter, reader, tracer_provider, meter_provider = otel_fixture + return exporter, reader, tracer_provider, meter_provider + + +def _make_telemetry( + enable_sensitive_data: bool = False, + source_name: str | None = None, + tracer_provider: Any = None, + meter_provider: Any = None, +) -> CopilotTelemetry: + config: TelemetryConfig = {} + if enable_sensitive_data: + config["enable_sensitive_data"] = True + if source_name: + config["source_name"] = source_name + return CopilotTelemetry(config, tracer_provider=tracer_provider, meter_provider=meter_provider) + + +def _make_session_event( + event_type: SessionEventType, + **data_kwargs: Any, +) -> SessionEvent: + """Create a minimal SessionEvent for testing.""" + import uuid + from datetime import datetime + + data = Data(**data_kwargs) + return SessionEvent( + data=data, + id=uuid.uuid4(), + timestamp=datetime.now(UTC), + type=event_type, + ) + + +def _make_session_with_telemetry( + telemetry: CopilotTelemetry, + session_id: str = "test-session", + model: str = None, + provider=None, + system_message=None, + tools=None, + streaming: bool = False, + agent_name: str | None = None, + agent_description: str | None = None, +) -> CopilotSession: + """Create a CopilotSession with telemetry for testing.""" + mock_client = AsyncMock() + mock_client.request = AsyncMock(return_value={"messageId": "msg-1"}) + return CopilotSession( + session_id, + mock_client, + telemetry=telemetry, + model=model, + provider=provider, + system_message=system_message, + tools=tools, + streaming=streaming, + agent_name=agent_name, + agent_description=agent_description, + ) + + +def _get_metric_names(reader): + """Collect all metric names from the reader.""" + data = reader.get_metrics_data() + names = [] + for rm in data.resource_metrics: + for sm in rm.scope_metrics: + for m in sm.metrics: + names.append(m.name) + return names + + +def _get_metric_data_points(reader, metric_name): + """Get data points for a specific metric.""" + data = reader.get_metrics_data() + for rm in data.resource_metrics: + for sm in rm.scope_metrics: + for m in sm.metrics: + if m.name == metric_name: + return list(m.data.data_points) + return [] + + +def _do_single_turn(session, prompt="Hello"): + """Helper: send + turn_start + usage + message + turn_end + idle.""" + import asyncio + + asyncio.get_event_loop().run_until_complete(session.send({"prompt": prompt})) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event( + SessionEventType.ASSISTANT_USAGE, + model="gpt-4o", + api_call_id="call-1", + input_tokens=50.0, + output_tokens=100.0, + ) + ) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_MESSAGE, content="Hi there!") + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + +# --------------------------------------------------------------------------- +# Tests: Provider name normalization +# --------------------------------------------------------------------------- + + +class TestNormalizeProviderName: + def test_openai(self): + assert normalize_provider_name("openai") == "openai" + + def test_openai_case_insensitive(self): + assert normalize_provider_name("OpenAI") == "openai" + + def test_azure(self): + assert normalize_provider_name("azure") == "azure.ai.openai" + + def test_anthropic(self): + assert normalize_provider_name("anthropic") == "anthropic" + + def test_unknown_defaults_to_github(self): + assert normalize_provider_name("some-other") == DEFAULT_PROVIDER_NAME + + def test_none_defaults_to_github(self): + assert normalize_provider_name(None) == DEFAULT_PROVIDER_NAME + + def test_empty_defaults_to_github(self): + assert normalize_provider_name("") == DEFAULT_PROVIDER_NAME + + def test_whitespace_defaults_to_github(self): + assert normalize_provider_name(" ") == DEFAULT_PROVIDER_NAME + + +# --------------------------------------------------------------------------- +# Tests: Server address parsing +# --------------------------------------------------------------------------- + + +class TestParseServerAddress: + def test_https_url(self): + host, port = parse_server_address("https://api.example.com:443/v1") + assert host == "api.example.com" + assert port == 443 + + def test_http_url_default_port(self): + host, port = parse_server_address("http://localhost/v1") + assert host == "localhost" + assert port == 80 + + def test_https_url_default_port(self): + host, port = parse_server_address("https://api.openai.com/v1") + assert host == "api.openai.com" + assert port == 443 + + def test_none_url(self): + host, port = parse_server_address(None) + assert host is None + assert port is None + + def test_empty_url(self): + host, port = parse_server_address("") + assert host is None + assert port is None + + def test_custom_port(self): + host, port = parse_server_address("https://api.openai.com:8080") + assert host == "api.openai.com" + assert port == 8080 + + +# --------------------------------------------------------------------------- +# Tests: invoke_agent span +# --------------------------------------------------------------------------- + + +class TestInvokeAgentSpan: + def test_creates_client_span_with_attributes(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + + span, _ctx = telemetry.start_invoke_agent_span( + session_id="s1", + model="gpt-4", + provider_name="openai", + server_address="api.openai.com", + server_port=443, + ) + span.end() + + spans = exporter.get_finished_spans() + assert len(spans) == 1 + s = spans[0] + assert s.name == OP_INVOKE_AGENT # no agent_name, so just operation name + assert s.kind == trace.SpanKind.CLIENT + assert s.attributes[ATTR_GEN_AI_OPERATION_NAME] == OP_INVOKE_AGENT + assert s.attributes[ATTR_GEN_AI_PROVIDER_NAME] == "openai" + assert s.attributes[ATTR_GEN_AI_AGENT_ID] == "s1" + assert s.attributes[ATTR_GEN_AI_CONVERSATION_ID] == "s1" + assert s.attributes[ATTR_GEN_AI_REQUEST_MODEL] == "gpt-4" + assert s.attributes[ATTR_SERVER_ADDRESS] == "api.openai.com" + assert s.attributes[ATTR_SERVER_PORT] == 443 + + def test_no_model_omits_model_attribute(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + + span, _ctx = telemetry.start_invoke_agent_span( + session_id="s1", + model=None, + provider_name="github", + server_address=None, + server_port=None, + ) + span.end() + + s = exporter.get_finished_spans()[0] + assert ATTR_GEN_AI_REQUEST_MODEL not in s.attributes + assert ATTR_SERVER_ADDRESS not in s.attributes + + +# --------------------------------------------------------------------------- +# Tests: execute_tool span +# --------------------------------------------------------------------------- + + +class TestExecuteToolSpan: + def test_creates_internal_span_with_attributes(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + + span = telemetry.start_execute_tool_span( + tool_name="my_tool", + tool_call_id="tc-1", + description="Does something", + arguments={"x": 1}, + ) + span.end() + + spans = exporter.get_finished_spans() + assert len(spans) == 1 + s = spans[0] + assert s.name == f"{OP_EXECUTE_TOOL} my_tool" + assert s.kind == trace.SpanKind.INTERNAL + assert s.attributes[ATTR_GEN_AI_OPERATION_NAME] == OP_EXECUTE_TOOL + assert s.attributes[ATTR_GEN_AI_TOOL_NAME] == "my_tool" + assert s.attributes[ATTR_GEN_AI_TOOL_CALL_ID] == "tc-1" + assert s.attributes[ATTR_GEN_AI_TOOL_TYPE] == "function" + assert s.attributes[ATTR_GEN_AI_TOOL_DESCRIPTION] == "Does something" + # Arguments should NOT be present without sensitive data + assert ATTR_GEN_AI_TOOL_CALL_ARGUMENTS not in s.attributes + + def test_sensitive_data_includes_arguments(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry( + enable_sensitive_data=True, tracer_provider=tp, meter_provider=mp + ) + + span = telemetry.start_execute_tool_span( + tool_name="my_tool", + tool_call_id="tc-1", + description=None, + arguments={"key": "value"}, + ) + span.end() + + s = exporter.get_finished_spans()[0] + assert ATTR_GEN_AI_TOOL_CALL_ARGUMENTS in s.attributes + assert "key" in s.attributes[ATTR_GEN_AI_TOOL_CALL_ARGUMENTS] + + +# --------------------------------------------------------------------------- +# Tests: Metrics recording +# --------------------------------------------------------------------------- + + +class TestMetrics: + def test_operation_duration_metric(self, _reset_otel_globals): + _, reader, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + + telemetry.record_operation_duration( + 1.5, + "gpt-4", + "gpt-4", + "openai", + "api.openai.com", + 443, + ) + + data = reader.get_metrics_data() + metric_names = [] + for resource_metric in data.resource_metrics: + for scope_metric in resource_metric.scope_metrics: + for metric in scope_metric.metrics: + metric_names.append(metric.name) + assert METRIC_OPERATION_DURATION in metric_names + + def test_token_usage_metric(self, _reset_otel_globals): + _, reader, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + + # Token usage metrics are now recorded via record_token_usage_metrics + telemetry.record_token_usage_metrics(100, 50, "gpt-4", "gpt-4", "openai", None, None) + + data = reader.get_metrics_data() + metric_names = [] + for resource_metric in data.resource_metrics: + for scope_metric in resource_metric.scope_metrics: + for metric in scope_metric.metrics: + metric_names.append(metric.name) + assert METRIC_TOKEN_USAGE in metric_names + + def test_time_to_first_chunk_metric(self, _reset_otel_globals): + _, reader, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + + telemetry.record_time_to_first_chunk(0.5, "gpt-4", "gpt-4", "openai", None, None) + + data = reader.get_metrics_data() + metric_names = [] + for resource_metric in data.resource_metrics: + for scope_metric in resource_metric.scope_metrics: + for metric in scope_metric.metrics: + metric_names.append(metric.name) + assert METRIC_TIME_TO_FIRST_CHUNK in metric_names + + def test_time_per_output_chunk_metric(self, _reset_otel_globals): + _, reader, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + + telemetry.record_time_per_output_chunk(0.1, "gpt-4", "gpt-4", "openai", None, None) + + data = reader.get_metrics_data() + metric_names = [] + for resource_metric in data.resource_metrics: + for scope_metric in resource_metric.scope_metrics: + for metric in scope_metric.metrics: + metric_names.append(metric.name) + assert METRIC_TIME_PER_OUTPUT_CHUNK in metric_names + + +# --------------------------------------------------------------------------- +# Tests: Error recording +# --------------------------------------------------------------------------- + + +class TestErrorRecording: + def test_records_error_on_span(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + + span, _ctx = telemetry.start_invoke_agent_span("s1", None, "github", None, None) + error = ValueError("test error") + telemetry.record_error(span, error) + span.end() + + s = exporter.get_finished_spans()[0] + assert s.attributes[ATTR_ERROR_TYPE] == "ValueError" + assert s.status.status_code == trace.StatusCode.ERROR + + +# --------------------------------------------------------------------------- +# Tests: Tool result recording +# --------------------------------------------------------------------------- + + +class TestToolResult: + def test_set_tool_result_with_sensitive(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry( + enable_sensitive_data=True, tracer_provider=tp, meter_provider=mp + ) + + span = telemetry.start_execute_tool_span("t1", "tc-1", None, None) + telemetry.set_execute_tool_result(span, {"output": "hello"}) + span.end() + + s = exporter.get_finished_spans()[0] + assert ATTR_GEN_AI_TOOL_CALL_RESULT in s.attributes + assert "hello" in s.attributes[ATTR_GEN_AI_TOOL_CALL_RESULT] + + def test_set_tool_result_without_sensitive(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry( + enable_sensitive_data=False, tracer_provider=tp, meter_provider=mp + ) + + span = telemetry.start_execute_tool_span("t1", "tc-1", None, None) + telemetry.set_execute_tool_result(span, {"output": "hello"}) + span.end() + + s = exporter.get_finished_spans()[0] + assert ATTR_GEN_AI_TOOL_CALL_RESULT not in s.attributes + + +# --------------------------------------------------------------------------- +# Tests: Sensitive data gating +# --------------------------------------------------------------------------- + + +class TestSensitiveDataGating: + def test_sensitive_false_by_default(self, _reset_otel_globals): + _, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + assert telemetry.enable_sensitive_data is False + + def test_sensitive_true_when_configured(self, _reset_otel_globals): + _, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry( + enable_sensitive_data=True, tracer_provider=tp, meter_provider=mp + ) + assert telemetry.enable_sensitive_data is True + + def test_env_var_override(self, monkeypatch, _reset_otel_globals): + _, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + monkeypatch.setenv("OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT", "true") + config: TelemetryConfig = {} + telemetry = CopilotTelemetry(config, tracer_provider=tp, meter_provider=mp) + assert telemetry.enable_sensitive_data is True + + @pytest.mark.asyncio + async def test_includes_messages_when_sensitive_enabled(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry( + enable_sensitive_data=True, tracer_provider=tp, meter_provider=mp + ) + tool = Tool( + name="myTool", + description="A tool", + handler=lambda inv: "ok", + parameters={"type": "object"}, + ) + session = _make_session_with_telemetry( + telemetry, + "s1", + system_message={"mode": "replace", "content": "You are helpful."}, + tools=[tool], + ) + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_MESSAGE, content="Hi there!") + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + invoke_span = next(s for s in spans if OP_INVOKE_AGENT in s.name) + chat_span = next(s for s in spans if OP_CHAT in s.name) + invoke_attrs = dict(invoke_span.attributes) + chat_attrs = dict(chat_span.attributes) + + # Chat span has input messages (user prompt) and output messages + assert ATTR_GEN_AI_INPUT_MESSAGES in chat_attrs + parsed_input = json.loads(chat_attrs[ATTR_GEN_AI_INPUT_MESSAGES]) + assert parsed_input[0]["role"] == "user" + assert parsed_input[0]["parts"][0]["content"] == "Hello" + + assert ATTR_GEN_AI_OUTPUT_MESSAGES in chat_attrs + parsed_output = json.loads(chat_attrs[ATTR_GEN_AI_OUTPUT_MESSAGES]) + assert parsed_output[0]["role"] == "assistant" + assert parsed_output[0]["parts"][0]["content"] == "Hi there!" + + # invoke_agent span has input messages (user prompt) and output (last turn) + assert ATTR_GEN_AI_INPUT_MESSAGES in invoke_attrs + parsed_invoke_input = json.loads(invoke_attrs[ATTR_GEN_AI_INPUT_MESSAGES]) + assert parsed_invoke_input[0]["role"] == "user" + assert parsed_invoke_input[0]["parts"][0]["content"] == "Hello" + + assert ATTR_GEN_AI_OUTPUT_MESSAGES in invoke_attrs + + # System instructions on invoke_agent span + assert ATTR_GEN_AI_SYSTEM_INSTRUCTIONS in invoke_attrs + parsed_sys = json.loads(invoke_attrs[ATTR_GEN_AI_SYSTEM_INSTRUCTIONS]) + assert parsed_sys[0]["content"] == "You are helpful." + + # Tool definitions (always included) on invoke_agent span + assert ATTR_GEN_AI_TOOL_DEFINITIONS in invoke_attrs + parsed_tools = json.loads(invoke_attrs[ATTR_GEN_AI_TOOL_DEFINITIONS]) + assert parsed_tools[0]["name"] == "myTool" + + @pytest.mark.asyncio + async def test_excludes_messages_when_sensitive_disabled(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry( + enable_sensitive_data=False, tracer_provider=tp, meter_provider=mp + ) + tool = Tool( + name="myTool", + description="A tool", + handler=lambda inv: "ok", + parameters={"type": "object"}, + ) + session = _make_session_with_telemetry( + telemetry, + "s1", + system_message={"mode": "replace", "content": "You are helpful."}, + tools=[tool], + ) + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_MESSAGE, content="Hi there!") + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + invoke_span = next(s for s in spans if OP_INVOKE_AGENT in s.name) + invoke_attrs = dict(invoke_span.attributes) + + # Sensitive attributes excluded + assert ATTR_GEN_AI_INPUT_MESSAGES not in invoke_attrs + assert ATTR_GEN_AI_OUTPUT_MESSAGES not in invoke_attrs + assert ATTR_GEN_AI_SYSTEM_INSTRUCTIONS not in invoke_attrs + + # Tool definitions NOT sensitive-gated + assert ATTR_GEN_AI_TOOL_DEFINITIONS in invoke_attrs + parsed_tools = json.loads(invoke_attrs[ATTR_GEN_AI_TOOL_DEFINITIONS]) + assert parsed_tools[0]["name"] == "myTool" + + +# --------------------------------------------------------------------------- +# Tests: Custom source name +# --------------------------------------------------------------------------- + + +class TestCustomSourceName: + def test_default_source_name(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + + span, _ctx = telemetry.start_invoke_agent_span("s1", None, "github", None, None) + span.end() + + s = exporter.get_finished_spans()[0] + assert s.instrumentation_scope.name == DEFAULT_SOURCE_NAME + + def test_custom_source_name(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry( + source_name="my.custom.source", tracer_provider=tp, meter_provider=mp + ) + + span, _ctx = telemetry.start_invoke_agent_span("s1", None, "github", None, None) + span.end() + + s = exporter.get_finished_spans()[0] + assert s.instrumentation_scope.name == "my.custom.source" + + +# --------------------------------------------------------------------------- +# Tests: Opt-in (no telemetry when config not provided) +# --------------------------------------------------------------------------- + + +class TestOptIn: + def test_no_telemetry_when_not_configured(self, _reset_otel_globals): + """Session with no telemetry should not produce spans.""" + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + + session = _make_session_with_telemetry(telemetry=None, session_id="s1") + # Dispatch events - should not crash + event = _make_session_event(SessionEventType.SESSION_IDLE) + session._dispatch_event(event) + + assert len(exporter.get_finished_spans()) == 0 + + def test_session_without_telemetry_has_no_span_state(self): + session = _make_session_with_telemetry(telemetry=None) + assert session._telemetry is None + assert session._turn_tracker is None + + +# --------------------------------------------------------------------------- +# Tests: Session telemetry integration +# --------------------------------------------------------------------------- + + +class TestSessionTelemetryIntegration: + @pytest.mark.asyncio + async def test_send_creates_invoke_agent_span(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + + # Span started but not ended yet (need idle event) + assert session._turn_tracker._agent_span is not None + + # Dispatch turn start + turn end + idle to close spans + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + # Should have both chat and invoke_agent spans + invoke_spans = [ + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_INVOKE_AGENT + ] + chat_spans = [s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_CHAT] + assert len(invoke_spans) == 1 + assert len(chat_spans) == 1 + + @pytest.mark.asyncio + async def test_two_level_span_hierarchy(self, _reset_otel_globals): + """Chat span is a child of invoke_agent span.""" + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + invoke_span = next( + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_INVOKE_AGENT + ) + chat_span = next( + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_CHAT + ) + + # Chat span's parent should be the invoke_agent span + assert chat_span.parent is not None + assert chat_span.parent.span_id == invoke_span.context.span_id + + @pytest.mark.asyncio + async def test_usage_event_sets_per_turn_and_accumulated(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event( + SessionEventType.ASSISTANT_USAGE, + model="gpt-4o", + api_call_id="call-1", + input_tokens=50.0, + output_tokens=100.0, + ) + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + chat_span = next( + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_CHAT + ) + invoke_span = next( + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_INVOKE_AGENT + ) + + # Per-turn usage on chat span + assert chat_span.attributes[ATTR_GEN_AI_RESPONSE_MODEL] == "gpt-4o" + assert chat_span.attributes[ATTR_GEN_AI_USAGE_INPUT_TOKENS] == 50 + assert chat_span.attributes[ATTR_GEN_AI_USAGE_OUTPUT_TOKENS] == 100 + + # Accumulated usage on invoke_agent span + assert invoke_span.attributes[ATTR_GEN_AI_RESPONSE_MODEL] == "gpt-4o" + assert invoke_span.attributes[ATTR_GEN_AI_USAGE_INPUT_TOKENS] == 50 + assert invoke_span.attributes[ATTR_GEN_AI_USAGE_OUTPUT_TOKENS] == 100 + + @pytest.mark.asyncio + async def test_multi_turn_accumulates_usage(self, _reset_otel_globals): + """Multiple chat turns accumulate tokens on the invoke_agent span.""" + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + + # Turn 1 + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event( + SessionEventType.ASSISTANT_USAGE, + model="gpt-4o", + api_call_id="call-1", + input_tokens=50.0, + output_tokens=100.0, + ) + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + + # Turn 2 + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t2") + ) + session._dispatch_event( + _make_session_event( + SessionEventType.ASSISTANT_USAGE, + model="gpt-4o", + api_call_id="call-2", + input_tokens=30.0, + output_tokens=60.0, + ) + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + invoke_span = next( + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_INVOKE_AGENT + ) + chat_spans = [s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_CHAT] + + assert len(chat_spans) == 2 + + # Each chat span has per-turn tokens + assert chat_spans[0].attributes[ATTR_GEN_AI_USAGE_INPUT_TOKENS] == 50 + assert chat_spans[1].attributes[ATTR_GEN_AI_USAGE_INPUT_TOKENS] == 30 + + # invoke_agent span accumulates + assert invoke_span.attributes[ATTR_GEN_AI_USAGE_INPUT_TOKENS] == 80 + assert invoke_span.attributes[ATTR_GEN_AI_USAGE_OUTPUT_TOKENS] == 160 + + @pytest.mark.asyncio + async def test_model_change_event_updates_response_model(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event(SessionEventType.SESSION_MODEL_CHANGE, new_model="gpt-4o-mini") + ) + assert session._turn_tracker._response_model == "gpt-4o-mini" + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + @pytest.mark.asyncio + async def test_error_event_completes_span_with_error(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event(SessionEventType.SESSION_ERROR, message="Something went wrong") + ) + + spans = exporter.get_finished_spans() + # Both chat and invoke_agent spans should have error + for s in spans: + assert s.attributes[ATTR_ERROR_TYPE] == "SessionTurnError" + assert s.status.status_code == trace.StatusCode.ERROR + + @pytest.mark.asyncio + async def test_idle_event_records_duration_metric(self, _reset_otel_globals): + _, reader, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + assert METRIC_OPERATION_DURATION in _get_metric_names(reader) + + @pytest.mark.asyncio + async def test_subsequent_send_reuses_agent_span(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry( + enable_sensitive_data=True, tracer_provider=tp, meter_provider=mp + ) + session = _make_session_with_telemetry(telemetry, "s1") + + # First send creates the agent span + await session.send({"prompt": "First"}) + span_after_first = session._turn_tracker._agent_span + + # Second send reuses the same span + await session.send({"prompt": "Second"}) + assert session._turn_tracker._agent_span is span_after_first + + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + assert exporter.get_finished_spans()[0] is not None + + +# --------------------------------------------------------------------------- +# Tests: Streaming chunk metrics +# --------------------------------------------------------------------------- + + +class TestStreamingChunkMetrics: + @pytest.mark.asyncio + async def test_streaming_records_time_to_first_chunk(self, _reset_otel_globals): + _, reader, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1", streaming=True) + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + + # First chunk - any event during a turn triggers chunk timing + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_MESSAGE_DELTA, delta_content="Hi") + ) + + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + assert METRIC_TIME_TO_FIRST_CHUNK in _get_metric_names(reader) + + # Verify chunk timing uses "chat" operation name + data_points = _get_metric_data_points(reader, METRIC_TIME_TO_FIRST_CHUNK) + assert len(data_points) > 0 + attrs = dict(data_points[0].attributes) + assert attrs[ATTR_GEN_AI_OPERATION_NAME] == OP_CHAT + + @pytest.mark.asyncio + async def test_streaming_records_time_per_output_chunk(self, _reset_otel_globals): + _, reader, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1", streaming=True) + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + + # First chunk + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_MESSAGE_DELTA, delta_content="a") + ) + # Second chunk + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_MESSAGE_DELTA, delta_content="b") + ) + + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + assert METRIC_TIME_PER_OUTPUT_CHUNK in _get_metric_names(reader) + + +# --------------------------------------------------------------------------- +# Tests: configure_telemetry_context +# --------------------------------------------------------------------------- + + +class TestConfigureTelemetryContext: + def test_sets_provider_and_server(self, _reset_otel_globals): + _, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry( + telemetry, + "s1", + model="gpt-4", + provider={"type": "azure", "base_url": "https://myendpoint.openai.azure.com:443/v1"}, + ) + + assert session._turn_tracker._request_model == "gpt-4" + assert session._turn_tracker._provider_name == "azure.ai.openai" + assert session._turn_tracker._server_address == "myendpoint.openai.azure.com" + assert session._turn_tracker._server_port == 443 + + def test_noop_without_telemetry(self): + session = _make_session_with_telemetry(telemetry=None, model="gpt-4") + assert session.telemetry_provider_name == "github" # unchanged default + + +# --------------------------------------------------------------------------- +# Tests: Finish reasons +# --------------------------------------------------------------------------- + + +class TestFinishReasons: + @pytest.mark.asyncio + async def test_finish_reasons_on_success(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + for s in spans: + assert s.attributes[ATTR_GEN_AI_RESPONSE_FINISH_REASONS] == ("stop",) + + +# --------------------------------------------------------------------------- +# Tests: Agent metadata +# --------------------------------------------------------------------------- + + +class TestAgentMetadata: + def test_agent_name_and_description_on_span(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + + span, _ctx = telemetry.start_invoke_agent_span( + session_id="s1", + model="gpt-4", + provider_name="openai", + server_address="api.openai.com", + server_port=443, + agent_name="my_agent", + agent_description="Does something useful", + ) + span.end() + + spans = exporter.get_finished_spans() + assert len(spans) == 1 + s = spans[0] + assert s.attributes[ATTR_GEN_AI_AGENT_NAME] == "my_agent" + assert s.attributes[ATTR_GEN_AI_AGENT_DESCRIPTION] == "Does something useful" + + +# --------------------------------------------------------------------------- +# Tests: Vendor-prefixed attributes (Copilot-specific) +# --------------------------------------------------------------------------- + + +class TestVendorPrefixedAttributes: + @pytest.mark.asyncio + async def test_chat_span_copilot_attrs(self, _reset_otel_globals): + """Copilot-specific attrs (cost, duration, initiator, aiu, turn_id, interaction_id) + are set on chat spans.""" + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event( + SessionEventType.ASSISTANT_TURN_START, + turn_id="tid-1", + interaction_id="iid-1", + ) + ) + session._dispatch_event( + _make_session_event( + SessionEventType.ASSISTANT_USAGE, + model="gpt-4o", + input_tokens=50.0, + output_tokens=100.0, + cost=0.01, + duration=1500.0, + initiator="user", + copilot_usage=CopilotUsage(token_details=None, total_nano_aiu=42000.0), + ) + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + chat_span = next( + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_CHAT + ) + attrs = dict(chat_span.attributes) + + assert attrs[ATTR_COPILOT_COST] == 0.01 + assert attrs[ATTR_COPILOT_SERVER_DURATION] == 1500.0 + assert attrs[ATTR_COPILOT_INITIATOR] == "user" + assert attrs[ATTR_COPILOT_AIU] == 42000.0 + assert attrs[ATTR_COPILOT_TURN_ID] == "tid-1" + assert attrs[ATTR_COPILOT_INTERACTION_ID] == "iid-1" + + @pytest.mark.asyncio + async def test_invoke_agent_accumulated_cost_and_aiu(self, _reset_otel_globals): + """invoke_agent span accumulates cost and AIU across turns.""" + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + + # Turn 1 + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event( + SessionEventType.ASSISTANT_USAGE, + model="gpt-4o", + input_tokens=10.0, + output_tokens=20.0, + cost=0.01, + copilot_usage=CopilotUsage(token_details=None, total_nano_aiu=1000.0), + ) + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + + # Turn 2 + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t2") + ) + session._dispatch_event( + _make_session_event( + SessionEventType.ASSISTANT_USAGE, + model="gpt-4o", + input_tokens=20.0, + output_tokens=30.0, + cost=0.02, + copilot_usage=CopilotUsage(token_details=None, total_nano_aiu=2000.0), + ) + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + invoke_span = next( + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_INVOKE_AGENT + ) + attrs = dict(invoke_span.attributes) + + assert attrs[ATTR_COPILOT_COST] == pytest.approx(0.03) + assert attrs[ATTR_COPILOT_AIU] == 3000.0 + + +# --------------------------------------------------------------------------- +# Tests: Custom span events +# --------------------------------------------------------------------------- + + +class TestCustomSpanEvents: + @pytest.mark.asyncio + async def test_truncation_span_event(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event( + SessionEventType.SESSION_TRUNCATION, + token_limit=4096.0, + pre_truncation_tokens_in_messages=5000.0, + post_truncation_tokens_in_messages=4000.0, + pre_truncation_messages_length=20.0, + post_truncation_messages_length=15.0, + tokens_removed_during_truncation=1000.0, + messages_removed_during_truncation=5.0, + performed_by="model", + ) + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + # Truncation event should be on the chat span (active during turn) + chat_span = next( + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_CHAT + ) + events = chat_span.events + trunc_events = [e for e in events if e.name == COPILOT_EVENT_SESSION_TRUNCATION] + assert len(trunc_events) == 1 + attrs = dict(trunc_events[0].attributes) + assert attrs[COPILOT_EVENT_ATTR_TOKEN_LIMIT] == 4096.0 + assert attrs[COPILOT_EVENT_ATTR_PERFORMED_BY] == "model" + + @pytest.mark.asyncio + async def test_compaction_start_span_event(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_COMPACTION_START)) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + chat_span = next( + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_CHAT + ) + events = chat_span.events + compaction_events = [e for e in events if e.name == COPILOT_EVENT_SESSION_COMPACTION_START] + assert len(compaction_events) == 1 + + @pytest.mark.asyncio + async def test_compaction_complete_span_event(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event( + SessionEventType.SESSION_COMPACTION_COMPLETE, + success=True, + pre_compaction_tokens=5000.0, + post_compaction_tokens=3000.0, + tokens_removed=2000.0, + messages_removed=10.0, + ) + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + chat_span = next( + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_CHAT + ) + events = chat_span.events + compaction_events = [ + e for e in events if e.name == COPILOT_EVENT_SESSION_COMPACTION_COMPLETE + ] + assert len(compaction_events) == 1 + attrs = dict(compaction_events[0].attributes) + assert attrs[COPILOT_EVENT_ATTR_SUCCESS] is True + assert attrs[COPILOT_EVENT_ATTR_PRE_TOKENS] == 5000.0 + assert attrs[COPILOT_EVENT_ATTR_TOKENS_REMOVED] == 2000.0 + + @pytest.mark.asyncio + async def test_skill_invoked_span_event(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry( + enable_sensitive_data=True, tracer_provider=tp, meter_provider=mp + ) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event( + SessionEventType.SKILL_INVOKED, + name="codeSearch", + path="/skills/code-search", + content="skill content here", + plugin_name="copilot-extensions", + plugin_version="1.2.3", + ) + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + chat_span = next( + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_CHAT + ) + events = chat_span.events + skill_events = [e for e in events if e.name == COPILOT_EVENT_SKILL_INVOKED] + assert len(skill_events) == 1 + attrs = dict(skill_events[0].attributes) + assert attrs[COPILOT_EVENT_ATTR_SKILL_NAME] == "codeSearch" + assert attrs[COPILOT_EVENT_ATTR_SKILL_PATH] == "/skills/code-search" + assert attrs[COPILOT_EVENT_ATTR_SKILL_CONTENT] == "skill content here" + assert attrs[COPILOT_EVENT_ATTR_SKILL_PLUGIN_NAME] == "copilot-extensions" + assert attrs[COPILOT_EVENT_ATTR_SKILL_PLUGIN_VERSION] == "1.2.3" + + +# --------------------------------------------------------------------------- +# Tests: Rich message content (parts-based) +# --------------------------------------------------------------------------- + + +class TestRichMessageContent: + @pytest.mark.asyncio + async def test_reasoning_parts_in_output(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry( + enable_sensitive_data=True, tracer_provider=tp, meter_provider=mp + ) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Think about this"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event( + SessionEventType.ASSISTANT_MESSAGE, + content="The answer is 42", + reasoning_text="Let me think step by step...", + ) + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + chat_span = next( + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_CHAT + ) + attrs = dict(chat_span.attributes) + + output_msgs = json.loads(attrs[ATTR_GEN_AI_OUTPUT_MESSAGES]) + assert len(output_msgs) == 1 + parts = output_msgs[0]["parts"] + # Should have reasoning and text parts + assert any(p["type"] == "reasoning" for p in parts) + assert any(p["type"] == "text" for p in parts) + reasoning_part = next(p for p in parts if p["type"] == "reasoning") + assert reasoning_part["content"] == "Let me think step by step..." + + @pytest.mark.asyncio + async def test_tool_call_parts(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry( + enable_sensitive_data=True, tracer_provider=tp, meter_provider=mp + ) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Use a tool"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event( + SessionEventType.TOOL_EXECUTION_START, + tool_call_id="tc-1", + tool_name="search", + arguments={"query": "hello"}, + ) + ) + session._dispatch_event( + _make_session_event( + SessionEventType.TOOL_EXECUTION_COMPLETE, + tool_call_id="tc-1", + tool_name="search", + ) + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + chat_span = next( + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_CHAT + ) + attrs = dict(chat_span.attributes) + + output_msgs = json.loads(attrs[ATTR_GEN_AI_OUTPUT_MESSAGES]) + tool_call_msg = next(m for m in output_msgs if m["parts"][0]["type"] == "tool_call") + assert tool_call_msg["parts"][0]["name"] == "search" + assert tool_call_msg["parts"][0]["id"] == "tc-1" + + input_msgs = json.loads(attrs[ATTR_GEN_AI_INPUT_MESSAGES]) + tool_resp_msg = next( + (m for m in input_msgs if m["parts"][0]["type"] == "tool_call_response"), None + ) + assert tool_resp_msg is not None + + +# --------------------------------------------------------------------------- +# Tests: MCP server tool tracking +# --------------------------------------------------------------------------- + + +class TestMcpServerToolTracking: + @pytest.mark.asyncio + async def test_server_tool_call_parts(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry( + enable_sensitive_data=True, tracer_provider=tp, meter_provider=mp + ) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Use MCP tool"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event( + SessionEventType.TOOL_EXECUTION_START, + tool_call_id="tc-mcp-1", + tool_name="mcp_search", + mcp_server_name="my-mcp-server", + arguments={"q": "test"}, + ) + ) + session._dispatch_event( + _make_session_event( + SessionEventType.TOOL_EXECUTION_COMPLETE, + tool_call_id="tc-mcp-1", + tool_name="mcp_search", + ) + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + chat_span = next( + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_CHAT + ) + attrs = dict(chat_span.attributes) + + output_msgs = json.loads(attrs[ATTR_GEN_AI_OUTPUT_MESSAGES]) + stc_msg = next(m for m in output_msgs if m["parts"][0]["type"] == "server_tool_call") + stc_part = stc_msg["parts"][0] + assert "server_tool_call" in stc_part + assert stc_part["server_tool_call"]["type"] == "mcp" + assert stc_part["server_tool_call"]["server_name"] == "my-mcp-server" + + input_msgs = json.loads(attrs[ATTR_GEN_AI_INPUT_MESSAGES]) + stcr_msg = next( + (m for m in input_msgs if m["parts"][0]["type"] == "server_tool_call_response"), + None, + ) + assert stcr_msg is not None + stcr_part = stcr_msg["parts"][0] + assert "server_tool_call_response" in stcr_part + assert stcr_part["server_tool_call_response"]["type"] == "mcp" + assert stcr_part["server_tool_call_response"]["server_name"] == "my-mcp-server" + + +# --------------------------------------------------------------------------- +# Tests: Subagent spans +# --------------------------------------------------------------------------- + + +class TestSubagentSpans: + @pytest.mark.asyncio + async def test_subagent_creates_nested_hierarchy(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry( + enable_sensitive_data=True, tracer_provider=tp, meter_provider=mp + ) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + + # Subagent started + session._dispatch_event( + _make_session_event( + SessionEventType.SUBAGENT_STARTED, + tool_call_id="sa-tc-1", + agent_name="sub-agent", + agent_description="A helper agent", + ) + ) + + # Subagent gets a usage event (routed by parent_tool_call_id) + session._dispatch_event( + _make_session_event( + SessionEventType.ASSISTANT_USAGE, + parent_tool_call_id="sa-tc-1", + model="gpt-4o-mini", + input_tokens=25.0, + output_tokens=50.0, + ) + ) + + # Subagent gets a message + session._dispatch_event( + _make_session_event( + SessionEventType.ASSISTANT_MESSAGE, + parent_tool_call_id="sa-tc-1", + content="Subagent response", + ) + ) + + # Subagent completed + session._dispatch_event( + _make_session_event( + SessionEventType.SUBAGENT_COMPLETED, + tool_call_id="sa-tc-1", + ) + ) + + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + invoke_spans = [ + s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_INVOKE_AGENT + ] + chat_spans = [s for s in spans if s.attributes.get(ATTR_GEN_AI_OPERATION_NAME) == OP_CHAT] + + # Should have 2 invoke_agent (main + subagent) and 2 chat spans + assert len(invoke_spans) == 2 + assert len(chat_spans) == 2 + + # Subagent's invoke_agent span should have agent_name + sub_invoke = next( + (s for s in invoke_spans if s.attributes.get(ATTR_GEN_AI_AGENT_NAME) == "sub-agent"), + None, + ) + assert sub_invoke is not None + + # Subagent spans should be children of the root invoke_agent span + root_invoke = next( + s for s in invoke_spans if s.attributes.get(ATTR_GEN_AI_AGENT_NAME) != "sub-agent" + ) + assert sub_invoke.parent is not None + assert sub_invoke.parent.span_id == root_invoke.context.span_id + + # Subagent chat should have usage + sub_chat = next( + (s for s in chat_spans if s.parent and s.parent.span_id == sub_invoke.context.span_id), + None, + ) + assert sub_chat is not None + assert sub_chat.attributes.get(ATTR_GEN_AI_USAGE_INPUT_TOKENS) == 25 + assert sub_chat.attributes.get(ATTR_GEN_AI_USAGE_OUTPUT_TOKENS) == 50 + + @pytest.mark.asyncio + async def test_subagent_failed(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event( + SessionEventType.SUBAGENT_STARTED, + tool_call_id="sa-tc-fail", + agent_name="bad-agent", + ) + ) + session._dispatch_event( + _make_session_event( + SessionEventType.SUBAGENT_FAILED, + tool_call_id="sa-tc-fail", + agent_name="bad-agent", + error="Something broke", + ) + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + spans = exporter.get_finished_spans() + # Subagent spans should have error status + sub_invoke = next( + (s for s in spans if s.attributes.get(ATTR_GEN_AI_AGENT_NAME) == "bad-agent"), + None, + ) + assert sub_invoke is not None + assert sub_invoke.status.status_code == trace.StatusCode.ERROR + assert sub_invoke.attributes[ATTR_GEN_AI_RESPONSE_FINISH_REASONS] == ("error",) + + +# --------------------------------------------------------------------------- +# Tests: CompleteOnDispose +# --------------------------------------------------------------------------- + + +class TestCompleteOnDispose: + @pytest.mark.asyncio + async def test_complete_on_dispose_closes_spans_with_error(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + + # Dispose while turn is in progress + session._turn_tracker.complete_on_dispose() + + spans = exporter.get_finished_spans() + # Should have both chat and invoke_agent spans + assert len(spans) >= 2 + + for s in spans: + assert s.status.status_code == trace.StatusCode.ERROR + assert s.attributes[ATTR_ERROR_TYPE] == "SessionDestroyedError" + assert s.attributes[ATTR_GEN_AI_RESPONSE_FINISH_REASONS] == ("error",) + + @pytest.mark.asyncio + async def test_destroy_calls_complete_on_dispose(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + + # Destroy should close open spans + await session.destroy() + + spans = exporter.get_finished_spans() + assert len(spans) >= 2 + for s in spans: + assert s.status.status_code == trace.StatusCode.ERROR + + +# --------------------------------------------------------------------------- +# Tests: Tool call parent context routing +# --------------------------------------------------------------------------- + + +class TestToolCallParentContext: + @pytest.mark.asyncio + async def test_get_tool_call_parent_context_returns_agent_context(self, _reset_otel_globals): + _, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + + # Store a pending parent via tool_execution_start + session._dispatch_event( + _make_session_event( + SessionEventType.TOOL_EXECUTION_START, + tool_call_id="tc-1", + tool_name="my_tool", + ) + ) + + # get_telemetry_tool_call_parent_context should return a context + ctx = session.get_telemetry_tool_call_parent_context("tc-1") + assert ctx is not None + + # Consuming again should fallback to agent context + ctx2 = session.get_telemetry_tool_call_parent_context("tc-1") + assert ctx2 is not None # Falls back to agent span context + + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + +# --------------------------------------------------------------------------- +# Tests: Token usage metrics use "chat" operation name +# --------------------------------------------------------------------------- + + +class TestTokenUsageMetricsOperationName: + @pytest.mark.asyncio + async def test_per_turn_token_metrics_use_chat_op(self, _reset_otel_globals): + _, reader, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + session = _make_session_with_telemetry(telemetry, "s1") + + await session.send({"prompt": "Hello"}) + session._dispatch_event( + _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") + ) + session._dispatch_event( + _make_session_event( + SessionEventType.ASSISTANT_USAGE, + model="gpt-4o", + input_tokens=50.0, + output_tokens=100.0, + ) + ) + session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) + session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) + + data_points = _get_metric_data_points(reader, METRIC_TOKEN_USAGE) + assert len(data_points) > 0 + for dp in data_points: + attrs = dict(dp.attributes) + assert attrs[ATTR_GEN_AI_OPERATION_NAME] == OP_CHAT + + +# --------------------------------------------------------------------------- +# Tests: SDK version on tracer/meter +# --------------------------------------------------------------------------- + + +class TestSdkVersion: + def test_tracer_has_version(self, _reset_otel_globals): + exporter, _, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + + span, _ctx = telemetry.start_invoke_agent_span("s1", None, "github", None, None) + span.end() + + s = exporter.get_finished_spans()[0] + # The instrumentation scope should have a version from copilot.__version__ + from copilot._copilot_telemetry import _SDK_VERSION + + if _SDK_VERSION: + assert s.instrumentation_scope.version == _SDK_VERSION + else: + # Graceful fallback if version not found + assert s.instrumentation_scope.version is not None or True diff --git a/test/scenarios/auth/byok-anthropic/go/go.mod b/test/scenarios/auth/byok-anthropic/go/go.mod index 9a727c69c..2b6d2f8ca 100644 --- a/test/scenarios/auth/byok-anthropic/go/go.mod +++ b/test/scenarios/auth/byok-anthropic/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/auth/byok-anthropic/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/auth/byok-anthropic/go/go.sum b/test/scenarios/auth/byok-anthropic/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/auth/byok-anthropic/go/go.sum +++ b/test/scenarios/auth/byok-anthropic/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/auth/byok-azure/go/go.mod b/test/scenarios/auth/byok-azure/go/go.mod index f0dd08661..ae0961138 100644 --- a/test/scenarios/auth/byok-azure/go/go.mod +++ b/test/scenarios/auth/byok-azure/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/auth/byok-azure/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/auth/byok-azure/go/go.sum b/test/scenarios/auth/byok-azure/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/auth/byok-azure/go/go.sum +++ b/test/scenarios/auth/byok-azure/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/auth/byok-ollama/go/go.mod b/test/scenarios/auth/byok-ollama/go/go.mod index 806aaa5c2..4f06202ae 100644 --- a/test/scenarios/auth/byok-ollama/go/go.mod +++ b/test/scenarios/auth/byok-ollama/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/auth/byok-ollama/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/auth/byok-ollama/go/go.sum b/test/scenarios/auth/byok-ollama/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/auth/byok-ollama/go/go.sum +++ b/test/scenarios/auth/byok-ollama/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/auth/byok-openai/go/go.mod b/test/scenarios/auth/byok-openai/go/go.mod index 2d5a75ecf..e7cee801b 100644 --- a/test/scenarios/auth/byok-openai/go/go.mod +++ b/test/scenarios/auth/byok-openai/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/auth/byok-openai/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/auth/byok-openai/go/go.sum b/test/scenarios/auth/byok-openai/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/auth/byok-openai/go/go.sum +++ b/test/scenarios/auth/byok-openai/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/auth/gh-app/go/go.mod b/test/scenarios/auth/gh-app/go/go.mod index a0d270c6e..f0a9c9cef 100644 --- a/test/scenarios/auth/gh-app/go/go.mod +++ b/test/scenarios/auth/gh-app/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/auth/gh-app/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/auth/gh-app/go/go.sum b/test/scenarios/auth/gh-app/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/auth/gh-app/go/go.sum +++ b/test/scenarios/auth/gh-app/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/bundling/app-backend-to-server/go/go.mod b/test/scenarios/bundling/app-backend-to-server/go/go.mod index 6d01df73b..69de0828a 100644 --- a/test/scenarios/bundling/app-backend-to-server/go/go.mod +++ b/test/scenarios/bundling/app-backend-to-server/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/bundling/app-backend-to-server/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/bundling/app-backend-to-server/go/go.sum b/test/scenarios/bundling/app-backend-to-server/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/bundling/app-backend-to-server/go/go.sum +++ b/test/scenarios/bundling/app-backend-to-server/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/bundling/app-direct-server/go/go.mod b/test/scenarios/bundling/app-direct-server/go/go.mod index db24ae393..6b77c20db 100644 --- a/test/scenarios/bundling/app-direct-server/go/go.mod +++ b/test/scenarios/bundling/app-direct-server/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/bundling/app-direct-server/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/bundling/app-direct-server/go/go.sum b/test/scenarios/bundling/app-direct-server/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/bundling/app-direct-server/go/go.sum +++ b/test/scenarios/bundling/app-direct-server/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/bundling/container-proxy/go/go.mod b/test/scenarios/bundling/container-proxy/go/go.mod index 086f43175..f1d2acdc9 100644 --- a/test/scenarios/bundling/container-proxy/go/go.mod +++ b/test/scenarios/bundling/container-proxy/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/bundling/container-proxy/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/bundling/container-proxy/go/go.sum b/test/scenarios/bundling/container-proxy/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/bundling/container-proxy/go/go.sum +++ b/test/scenarios/bundling/container-proxy/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/bundling/fully-bundled/go/go.mod b/test/scenarios/bundling/fully-bundled/go/go.mod index 93af1915a..159b1c170 100644 --- a/test/scenarios/bundling/fully-bundled/go/go.mod +++ b/test/scenarios/bundling/fully-bundled/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/bundling/fully-bundled/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/bundling/fully-bundled/go/go.sum b/test/scenarios/bundling/fully-bundled/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/bundling/fully-bundled/go/go.sum +++ b/test/scenarios/bundling/fully-bundled/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/callbacks/hooks/go/go.mod b/test/scenarios/callbacks/hooks/go/go.mod index 51b27e491..995fc7883 100644 --- a/test/scenarios/callbacks/hooks/go/go.mod +++ b/test/scenarios/callbacks/hooks/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/callbacks/hooks/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/callbacks/hooks/go/go.sum b/test/scenarios/callbacks/hooks/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/callbacks/hooks/go/go.sum +++ b/test/scenarios/callbacks/hooks/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/callbacks/permissions/go/go.mod b/test/scenarios/callbacks/permissions/go/go.mod index 25eb7d22a..84fee8ccc 100644 --- a/test/scenarios/callbacks/permissions/go/go.mod +++ b/test/scenarios/callbacks/permissions/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/callbacks/permissions/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/callbacks/permissions/go/go.sum b/test/scenarios/callbacks/permissions/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/callbacks/permissions/go/go.sum +++ b/test/scenarios/callbacks/permissions/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/callbacks/user-input/go/go.mod b/test/scenarios/callbacks/user-input/go/go.mod index 11419b634..923dfa0d6 100644 --- a/test/scenarios/callbacks/user-input/go/go.mod +++ b/test/scenarios/callbacks/user-input/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/callbacks/user-input/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/callbacks/user-input/go/go.sum b/test/scenarios/callbacks/user-input/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/callbacks/user-input/go/go.sum +++ b/test/scenarios/callbacks/user-input/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/modes/default/go/go.mod b/test/scenarios/modes/default/go/go.mod index 50b92181f..b909ea28f 100644 --- a/test/scenarios/modes/default/go/go.mod +++ b/test/scenarios/modes/default/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/modes/default/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/modes/default/go/go.sum b/test/scenarios/modes/default/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/modes/default/go/go.sum +++ b/test/scenarios/modes/default/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/modes/minimal/go/go.mod b/test/scenarios/modes/minimal/go/go.mod index 72fbe3540..f5dc24b54 100644 --- a/test/scenarios/modes/minimal/go/go.mod +++ b/test/scenarios/modes/minimal/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/modes/minimal/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/modes/minimal/go/go.sum b/test/scenarios/modes/minimal/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/modes/minimal/go/go.sum +++ b/test/scenarios/modes/minimal/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/prompts/attachments/go/go.mod b/test/scenarios/prompts/attachments/go/go.mod index 0a5dc6c1f..d0347fb34 100644 --- a/test/scenarios/prompts/attachments/go/go.mod +++ b/test/scenarios/prompts/attachments/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/prompts/attachments/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/prompts/attachments/go/go.sum b/test/scenarios/prompts/attachments/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/prompts/attachments/go/go.sum +++ b/test/scenarios/prompts/attachments/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/prompts/reasoning-effort/go/go.mod b/test/scenarios/prompts/reasoning-effort/go/go.mod index f2aa4740c..c1ff1cc9d 100644 --- a/test/scenarios/prompts/reasoning-effort/go/go.mod +++ b/test/scenarios/prompts/reasoning-effort/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/prompts/reasoning-effort/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/prompts/reasoning-effort/go/go.sum b/test/scenarios/prompts/reasoning-effort/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/prompts/reasoning-effort/go/go.sum +++ b/test/scenarios/prompts/reasoning-effort/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/prompts/system-message/go/go.mod b/test/scenarios/prompts/system-message/go/go.mod index b8301c15a..30363d2f2 100644 --- a/test/scenarios/prompts/system-message/go/go.mod +++ b/test/scenarios/prompts/system-message/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/prompts/system-message/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/prompts/system-message/go/go.sum b/test/scenarios/prompts/system-message/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/prompts/system-message/go/go.sum +++ b/test/scenarios/prompts/system-message/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/sessions/concurrent-sessions/go/go.mod b/test/scenarios/sessions/concurrent-sessions/go/go.mod index c01642320..184552b26 100644 --- a/test/scenarios/sessions/concurrent-sessions/go/go.mod +++ b/test/scenarios/sessions/concurrent-sessions/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/sessions/concurrent-sessions/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/sessions/concurrent-sessions/go/go.sum b/test/scenarios/sessions/concurrent-sessions/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/sessions/concurrent-sessions/go/go.sum +++ b/test/scenarios/sessions/concurrent-sessions/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/sessions/infinite-sessions/go/go.mod b/test/scenarios/sessions/infinite-sessions/go/go.mod index cb8d2713d..9c488fcc7 100644 --- a/test/scenarios/sessions/infinite-sessions/go/go.mod +++ b/test/scenarios/sessions/infinite-sessions/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/sessions/infinite-sessions/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/sessions/infinite-sessions/go/go.sum b/test/scenarios/sessions/infinite-sessions/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/sessions/infinite-sessions/go/go.sum +++ b/test/scenarios/sessions/infinite-sessions/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/sessions/session-resume/go/go.mod b/test/scenarios/sessions/session-resume/go/go.mod index 3722b78d2..dfafc90e9 100644 --- a/test/scenarios/sessions/session-resume/go/go.mod +++ b/test/scenarios/sessions/session-resume/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/sessions/session-resume/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/sessions/session-resume/go/go.sum b/test/scenarios/sessions/session-resume/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/sessions/session-resume/go/go.sum +++ b/test/scenarios/sessions/session-resume/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/sessions/streaming/go/go.mod b/test/scenarios/sessions/streaming/go/go.mod index acb516379..cd639294a 100644 --- a/test/scenarios/sessions/streaming/go/go.mod +++ b/test/scenarios/sessions/streaming/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/sessions/streaming/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/sessions/streaming/go/go.sum b/test/scenarios/sessions/streaming/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/sessions/streaming/go/go.sum +++ b/test/scenarios/sessions/streaming/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/tools/custom-agents/go/go.mod b/test/scenarios/tools/custom-agents/go/go.mod index 9acbccb06..a1672c1bb 100644 --- a/test/scenarios/tools/custom-agents/go/go.mod +++ b/test/scenarios/tools/custom-agents/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/tools/custom-agents/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/tools/custom-agents/go/go.sum b/test/scenarios/tools/custom-agents/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/tools/custom-agents/go/go.sum +++ b/test/scenarios/tools/custom-agents/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/tools/mcp-servers/go/go.mod b/test/scenarios/tools/mcp-servers/go/go.mod index 4b93e09e7..8cbf69fd0 100644 --- a/test/scenarios/tools/mcp-servers/go/go.mod +++ b/test/scenarios/tools/mcp-servers/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/tools/mcp-servers/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/tools/mcp-servers/go/go.sum b/test/scenarios/tools/mcp-servers/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/tools/mcp-servers/go/go.sum +++ b/test/scenarios/tools/mcp-servers/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/tools/no-tools/go/go.mod b/test/scenarios/tools/no-tools/go/go.mod index 74131d3e6..c9d1363fe 100644 --- a/test/scenarios/tools/no-tools/go/go.mod +++ b/test/scenarios/tools/no-tools/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/tools/no-tools/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/tools/no-tools/go/go.sum b/test/scenarios/tools/no-tools/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/tools/no-tools/go/go.sum +++ b/test/scenarios/tools/no-tools/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/tools/skills/go/go.mod b/test/scenarios/tools/skills/go/go.mod index 1467fd64f..287f749f5 100644 --- a/test/scenarios/tools/skills/go/go.mod +++ b/test/scenarios/tools/skills/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/tools/skills/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/tools/skills/go/go.sum b/test/scenarios/tools/skills/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/tools/skills/go/go.sum +++ b/test/scenarios/tools/skills/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/tools/tool-filtering/go/go.mod b/test/scenarios/tools/tool-filtering/go/go.mod index c3051c52b..432ebfb71 100644 --- a/test/scenarios/tools/tool-filtering/go/go.mod +++ b/test/scenarios/tools/tool-filtering/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/tools/tool-filtering/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/tools/tool-filtering/go/go.sum b/test/scenarios/tools/tool-filtering/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/tools/tool-filtering/go/go.sum +++ b/test/scenarios/tools/tool-filtering/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/tools/virtual-filesystem/go/go.mod b/test/scenarios/tools/virtual-filesystem/go/go.mod index d6606bb7b..9d75447ae 100644 --- a/test/scenarios/tools/virtual-filesystem/go/go.mod +++ b/test/scenarios/tools/virtual-filesystem/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/tools/virtual-filesystem/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/tools/virtual-filesystem/go/go.sum b/test/scenarios/tools/virtual-filesystem/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/tools/virtual-filesystem/go/go.sum +++ b/test/scenarios/tools/virtual-filesystem/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/transport/reconnect/go/go.mod b/test/scenarios/transport/reconnect/go/go.mod index 7a1f80d6c..9cd49b4c5 100644 --- a/test/scenarios/transport/reconnect/go/go.mod +++ b/test/scenarios/transport/reconnect/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/transport/reconnect/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/transport/reconnect/go/go.sum b/test/scenarios/transport/reconnect/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/transport/reconnect/go/go.sum +++ b/test/scenarios/transport/reconnect/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/transport/stdio/go/go.mod b/test/scenarios/transport/stdio/go/go.mod index 2dcc35310..9f9412e8e 100644 --- a/test/scenarios/transport/stdio/go/go.mod +++ b/test/scenarios/transport/stdio/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/transport/stdio/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/transport/stdio/go/go.sum b/test/scenarios/transport/stdio/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/transport/stdio/go/go.sum +++ b/test/scenarios/transport/stdio/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/transport/tcp/go/go.mod b/test/scenarios/transport/tcp/go/go.mod index dc1a0b6f9..69a7e9ae4 100644 --- a/test/scenarios/transport/tcp/go/go.mod +++ b/test/scenarios/transport/tcp/go/go.mod @@ -1,9 +1,20 @@ module github.com/github/copilot-sdk/samples/transport/tcp/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/transport/tcp/go/go.sum b/test/scenarios/transport/tcp/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/transport/tcp/go/go.sum +++ b/test/scenarios/transport/tcp/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/snapshots/otel/records_subagent_invoke_agent_and_chat_spans.yaml b/test/snapshots/otel/records_subagent_invoke_agent_and_chat_spans.yaml new file mode 100644 index 000000000..408db0a53 --- /dev/null +++ b/test/snapshots/otel/records_subagent_invoke_agent_and_chat_spans.yaml @@ -0,0 +1,144 @@ +models: + - claude-sonnet-4.5 +conversations: + - messages: + - role: system + content: ${system} + - role: user + content: Write a short four-line poem about mountains. + - role: assistant + content: |- + Peaks that pierce the endless sky, + Standing tall where eagles fly, + Silent giants, ancient stone, + Watching over earth alone. + - messages: + - role: system + content: ${system} + - role: user + content: Write a short four-line poem about rivers. + - role: assistant + content: |- + Rivers flow from mountain heights, + Carving paths through ancient stone, + Whispering tales of days and nights, + To the sea they journey home. + - messages: + - role: system + content: ${system} + - role: user + content: Call get_topic twice to get two different topics, then launch two background general-purpose subagents in + parallel (one per topic) to each write a short four-line poem about their assigned topic. Use read_agent to + wait for both results and show them to me. + - role: assistant + content: I'll get two topics, launch two poem-writing agents in parallel, and retrieve their results. + tool_calls: + - id: toolcall_0 + type: function + function: + name: report_intent + arguments: '{"intent":"Getting poem topics"}' + - id: toolcall_1 + type: function + function: + name: get_topic + arguments: "{}" + - id: toolcall_2 + type: function + function: + name: get_topic + arguments: "{}" + - role: tool + tool_call_id: toolcall_0 + content: Intent logged + - role: tool + tool_call_id: toolcall_1 + content: mountains + - role: tool + tool_call_id: toolcall_2 + content: rivers + - role: assistant + content: Great! I got "mountains" and "rivers" as topics. Now launching two background agents to write poems. + tool_calls: + - id: toolcall_3 + type: function + function: + name: task + arguments: '{"agent_type":"general-purpose","description":"Write mountains poem","mode":"background","prompt":"Write a + short four-line poem about mountains."}' + - id: toolcall_4 + type: function + function: + name: task + arguments: '{"agent_type":"general-purpose","description":"Write rivers poem","mode":"background","prompt":"Write a + short four-line poem about rivers."}' + - role: tool + tool_call_id: toolcall_3 + content: "Agent started in background with agent_id: agent-0. You can use read_agent tool with this agent_id to check + status and retrieve results. Instruct the user to use /tasks to manage the background agent." + - role: tool + tool_call_id: toolcall_4 + content: "Agent started in background with agent_id: agent-1. You can use read_agent tool with this agent_id to check + status and retrieve results. Instruct the user to use /tasks to manage the background agent." + - role: assistant + content: Now waiting for both agents to complete and retrieving their poems. + tool_calls: + - id: toolcall_5 + type: function + function: + name: read_agent + arguments: '{"agent_id":"agent-0","timeout":60,"wait":true}' + - role: tool + tool_call_id: toolcall_5 + content: >- + Agent completed. agent_id: agent-0, agent_type: general-purpose, status: completed, description: Write + mountains poem, elapsed: 0s, duration: 0s + + + Result: + + Peaks that pierce the endless sky, + + Standing tall where eagles fly, + + Silent giants, ancient stone, + + Watching over earth alone. + - role: assistant + tool_calls: + - id: toolcall_6 + type: function + function: + name: read_agent + arguments: '{"agent_id":"agent-1","timeout":60,"wait":true}' + - role: tool + tool_call_id: toolcall_6 + content: >- + Agent completed. agent_id: agent-1, agent_type: general-purpose, status: completed, description: Write rivers + poem, elapsed: 0s, duration: 0s + + + Result: + + Rivers flow from mountain heights, + + Carving paths through ancient stone, + + Whispering tales of days and nights, + + To the sea they journey home. + - role: assistant + content: |- + Here are the two poems: + + **Mountains:** + Peaks that pierce the endless sky, + Standing tall where eagles fly, + Silent giants, ancient stone, + Watching over earth alone. + + **Rivers:** + Rivers flow from mountain heights, + Carving paths through ancient stone, + Whispering tales of days and nights, + To the sea they journey home. From 506427dd8e4822df04a3040a778e0038af54ff22 Mon Sep 17 00:00:00 2001 From: Stephen Toub Date: Mon, 2 Mar 2026 17:03:14 -0500 Subject: [PATCH 2/7] Address PR feedback Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- dotnet/src/Client.cs | 2 +- dotnet/src/CopilotTelemetry.cs | 35 ++++++++++++++-------------- dotnet/src/OpenTelemetryConsts.cs | 9 ++++--- go/copilot_telemetry.go | 1 - nodejs/src/copilot-telemetry.ts | 4 +--- python/copilot/_copilot_telemetry.py | 4 +--- 6 files changed, 26 insertions(+), 29 deletions(-) diff --git a/dotnet/src/Client.cs b/dotnet/src/Client.cs index f8856ae70..b3dbe70ce 100644 --- a/dotnet/src/Client.cs +++ b/dotnet/src/Client.cs @@ -1337,7 +1337,7 @@ public async Task OnToolCall(string sessionId, serverAddress: session.TelemetryServerAddress, serverPort: session.TelemetryServerPort, error: operationError, - operationName: OpenTelemetryConsts.GenAI.ExecuteTool); + operationName: OpenTelemetryConsts.GenAI.OperationNames.ExecuteTool); } } } diff --git a/dotnet/src/CopilotTelemetry.cs b/dotnet/src/CopilotTelemetry.cs index 6afc17855..e84b2671b 100644 --- a/dotnet/src/CopilotTelemetry.cs +++ b/dotnet/src/CopilotTelemetry.cs @@ -106,12 +106,12 @@ public CopilotTelemetry(TelemetryConfig? config) } string displayName = string.IsNullOrWhiteSpace(agentName) - ? OpenTelemetryConsts.GenAI.InvokeAgent - : $"{OpenTelemetryConsts.GenAI.InvokeAgent} {agentName}"; + ? OpenTelemetryConsts.GenAI.OperationNames.InvokeAgent + : $"{OpenTelemetryConsts.GenAI.OperationNames.InvokeAgent} {agentName}"; ActivityTagsCollection tags = new() { - { OpenTelemetryConsts.GenAI.Operation.Name, OpenTelemetryConsts.GenAI.InvokeAgent }, + { OpenTelemetryConsts.GenAI.Operation.Name, OpenTelemetryConsts.GenAI.OperationNames.InvokeAgent }, { OpenTelemetryConsts.GenAI.Provider.Name, providerName }, { OpenTelemetryConsts.GenAI.Agent.Id, sessionId }, { OpenTelemetryConsts.GenAI.Conversation.Id, sessionId }, @@ -159,12 +159,12 @@ public CopilotTelemetry(TelemetryConfig? config) } string displayName = string.IsNullOrWhiteSpace(model) - ? OpenTelemetryConsts.GenAI.Chat - : $"{OpenTelemetryConsts.GenAI.Chat} {model}"; + ? OpenTelemetryConsts.GenAI.OperationNames.Chat + : $"{OpenTelemetryConsts.GenAI.OperationNames.Chat} {model}"; ActivityTagsCollection tags = new() { - { OpenTelemetryConsts.GenAI.Operation.Name, OpenTelemetryConsts.GenAI.Chat }, + { OpenTelemetryConsts.GenAI.Operation.Name, OpenTelemetryConsts.GenAI.OperationNames.Chat }, { OpenTelemetryConsts.GenAI.Provider.Name, providerName }, }; @@ -198,11 +198,11 @@ public CopilotTelemetry(TelemetryConfig? config) return null; } - string displayName = $"{OpenTelemetryConsts.GenAI.ExecuteTool} {toolName}"; + string displayName = $"{OpenTelemetryConsts.GenAI.OperationNames.ExecuteTool} {toolName}"; ActivityTagsCollection tags = new() { - { OpenTelemetryConsts.GenAI.Operation.Name, OpenTelemetryConsts.GenAI.ExecuteTool }, + { OpenTelemetryConsts.GenAI.Operation.Name, OpenTelemetryConsts.GenAI.OperationNames.ExecuteTool }, { OpenTelemetryConsts.GenAI.Tool.Name, toolName }, { OpenTelemetryConsts.GenAI.Tool.CallId, toolCallId }, { OpenTelemetryConsts.GenAI.Tool.Type, "function" }, @@ -297,7 +297,7 @@ public void RecordTimeToFirstChunk( TimeToFirstChunkHistogram.Record( durationSeconds, CreateMetricTags( - OpenTelemetryConsts.GenAI.Chat, + OpenTelemetryConsts.GenAI.OperationNames.Chat, requestModel, responseModel, providerName, @@ -319,7 +319,7 @@ public void RecordTimePerOutputChunk( TimePerOutputChunkHistogram.Record( durationSeconds, CreateMetricTags( - OpenTelemetryConsts.GenAI.Chat, + OpenTelemetryConsts.GenAI.OperationNames.Chat, requestModel, responseModel, providerName, @@ -1152,7 +1152,7 @@ private void CompleteChatTurnLocked(Exception? error) ServerAddress, ServerPort, error, - OpenTelemetryConsts.GenAI.Chat); + OpenTelemetryConsts.GenAI.OperationNames.Chat); // Per-turn operation duration if (_telemetry.OperationDurationHistogram.Enabled) @@ -1165,7 +1165,7 @@ private void CompleteChatTurnLocked(Exception? error) ServerAddress, ServerPort, error: error, - operationName: OpenTelemetryConsts.GenAI.Chat); + operationName: OpenTelemetryConsts.GenAI.OperationNames.Chat); } _firstOutputChunkRecorded = false; @@ -1310,7 +1310,7 @@ private void CompleteAgentTurnLocked(Exception? error) ServerAddress, ServerPort, error: error, - operationName: OpenTelemetryConsts.GenAI.InvokeAgent); + operationName: OpenTelemetryConsts.GenAI.OperationNames.InvokeAgent); } activity.Dispose(); @@ -1324,7 +1324,7 @@ private void RecordOutputChunkMetricLocked() { Debug.Assert(Monitor.IsEntered(_lock)); - if (!_isStreaming) + if (!_isStreaming || _turnTimestamp == 0) { return; } @@ -1433,12 +1433,11 @@ private void ProcessSubagentEventLocked(SubagentState subagent, SessionEvent ses case AssistantUsageEvent usageEvent: subagent.ResponseModel = usageEvent.Data.Model; - // Update response model on both spans if the subagent is using + // Update response model on chat span if the subagent is using // a different model than what was set at span creation time. if (!string.IsNullOrWhiteSpace(usageEvent.Data.Model)) { subagent.ChatActivity?.SetTag(OpenTelemetryConsts.GenAI.Response.Model, usageEvent.Data.Model); - subagent.InvokeAgentActivity?.SetTag(OpenTelemetryConsts.GenAI.Response.Model, usageEvent.Data.Model); } if (!string.IsNullOrWhiteSpace(usageEvent.Data.ApiCallId)) @@ -1637,7 +1636,7 @@ private void CompleteSubagentLocked(string toolCallId, Exception? error) ServerAddress, ServerPort, error, - OpenTelemetryConsts.GenAI.Chat); + OpenTelemetryConsts.GenAI.OperationNames.Chat); chatActivity.Dispose(); } @@ -1706,7 +1705,7 @@ private void CompleteSubagentLocked(string toolCallId, Exception? error) ServerAddress, ServerPort, error: error, - operationName: OpenTelemetryConsts.GenAI.InvokeAgent); + operationName: OpenTelemetryConsts.GenAI.OperationNames.InvokeAgent); } invokeActivity.Dispose(); diff --git a/dotnet/src/OpenTelemetryConsts.cs b/dotnet/src/OpenTelemetryConsts.cs index 973aeea51..d2b684361 100644 --- a/dotnet/src/OpenTelemetryConsts.cs +++ b/dotnet/src/OpenTelemetryConsts.cs @@ -36,9 +36,12 @@ public static class Server public static class GenAI { - public const string Chat = "chat"; - public const string InvokeAgent = "invoke_agent"; - public const string ExecuteTool = "execute_tool"; + public static class OperationNames + { + public const string Chat = "chat"; + public const string InvokeAgent = "invoke_agent"; + public const string ExecuteTool = "execute_tool"; + } public static class Operation { diff --git a/go/copilot_telemetry.go b/go/copilot_telemetry.go index 96c282b3e..6b6d6c1f4 100644 --- a/go/copilot_telemetry.go +++ b/go/copilot_telemetry.go @@ -1438,7 +1438,6 @@ func (t *agentTurnTracker) processSubagentEventLocked(sub *subagentState, event if sub.chatSpan != nil { sub.chatSpan.SetAttributes(attribute.String(otelGenAIResponseModel, *event.Data.Model)) } - sub.invokeAgentSpan.SetAttributes(attribute.String(otelGenAIResponseModel, *event.Data.Model)) } if event.Data.APICallID != nil && *event.Data.APICallID != "" { sub.responseID = *event.Data.APICallID diff --git a/nodejs/src/copilot-telemetry.ts b/nodejs/src/copilot-telemetry.ts index 997223e39..8bf654d74 100644 --- a/nodejs/src/copilot-telemetry.ts +++ b/nodejs/src/copilot-telemetry.ts @@ -1590,11 +1590,9 @@ export class AgentTurnTracker { switch (event.type) { case "assistant.usage": { const data = (event as Extract).data; - subagent.responseModel = data.model; - if (data.model) { + subagent.responseModel = data.model; subagent.chatSpan?.setAttribute(ATTR.GEN_AI_RESPONSE_MODEL, data.model); - subagent.invokeAgentSpan.setAttribute(ATTR.GEN_AI_RESPONSE_MODEL, data.model); } if (data.apiCallId) { diff --git a/python/copilot/_copilot_telemetry.py b/python/copilot/_copilot_telemetry.py index 2ff3d4f17..718718a64 100644 --- a/python/copilot/_copilot_telemetry.py +++ b/python/copilot/_copilot_telemetry.py @@ -1532,12 +1532,10 @@ def _process_subagent_event(self, subagent: _SubagentState, event: SessionEvent) if event.type == SessionEventType.ASSISTANT_USAGE: data = event.data model = getattr(data, "model", None) - subagent.response_model = model - if model: + subagent.response_model = model if subagent.chat_span is not None: subagent.chat_span.set_attribute(ATTR_GEN_AI_RESPONSE_MODEL, model) - subagent.invoke_agent_span.set_attribute(ATTR_GEN_AI_RESPONSE_MODEL, model) api_call_id = getattr(data, "api_call_id", None) provider_call_id = getattr(data, "provider_call_id", None) From 92cca0cdbf44fe0dcb25e5f9ad525aa04ddbe87f Mon Sep 17 00:00:00 2001 From: Stephen Toub Date: Mon, 2 Mar 2026 20:53:48 -0500 Subject: [PATCH 3/7] Fix Python tool error handling --- python/copilot/tools.py | 49 +++++++++++---------------- python/e2e/test_tools_unit.py | 16 ++++----- python/test_opentelemetry.py | 64 +++++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+), 37 deletions(-) diff --git a/python/copilot/tools.py b/python/copilot/tools.py index e3e600992..f06d79351 100644 --- a/python/copilot/tools.py +++ b/python/copilot/tools.py @@ -115,35 +115,26 @@ def decorator(fn: Callable[..., Any]) -> Tool: schema = ptype.model_json_schema() async def wrapped_handler(invocation: ToolInvocation) -> ToolResult: - try: - # Build args based on detected signature - call_args = [] - if takes_params: - args = invocation["arguments"] or {} - if ptype is not None and _is_pydantic_model(ptype): - call_args.append(ptype.model_validate(args)) - else: - call_args.append(args) - if takes_invocation: - call_args.append(invocation) - - result = fn(*call_args) - - if inspect.isawaitable(result): - result = await result - - return _normalize_result(result) - - except Exception as exc: - # Don't expose detailed error information to the LLM for security reasons. - # The actual error is stored in the 'error' field for debugging. - return ToolResult( - textResultForLlm="Invoking this tool produced an error. " - "Detailed information is not available.", - resultType="failure", - error=str(exc), - toolTelemetry={}, - ) + # Build args based on detected signature. + # Exceptions are NOT caught here — they propagate to the SDK's + # _execute_tool_call, which records errors on the execute_tool + # span and builds a safe ToolResult for the LLM. + call_args = [] + if takes_params: + args = invocation["arguments"] or {} + if ptype is not None and _is_pydantic_model(ptype): + call_args.append(ptype.model_validate(args)) + else: + call_args.append(args) + if takes_invocation: + call_args.append(invocation) + + result = fn(*call_args) + + if inspect.isawaitable(result): + result = await result + + return _normalize_result(result) return Tool( name=tool_name, diff --git a/python/e2e/test_tools_unit.py b/python/e2e/test_tools_unit.py index 7481c986f..95415d20a 100644 --- a/python/e2e/test_tools_unit.py +++ b/python/e2e/test_tools_unit.py @@ -169,7 +169,9 @@ def test_tool(params: Params) -> str: assert received_params is not None assert received_params.value == "hello" - async def test_handler_error_is_hidden_from_llm(self): + async def test_handler_error_propagates(self): + """Exceptions from tool handlers propagate (caught by _execute_tool_call in client.py).""" + class Params(BaseModel): pass @@ -184,13 +186,11 @@ def failing_tool(params: Params, invocation: ToolInvocation) -> str: "arguments": {}, } - result = await failing_tool.handler(invocation) - - assert result["resultType"] == "failure" - assert "secret error message" not in result["textResultForLlm"] - assert "error" in result["textResultForLlm"].lower() - # But the actual error is stored internally - assert result["error"] == "secret error message" + # Exceptions propagate from define_tool handlers — the SDK's + # _execute_tool_call catches them, records telemetry, and builds + # a safe ToolResult that hides error details from the LLM. + with pytest.raises(ValueError, match="secret error message"): + await failing_tool.handler(invocation) async def test_function_style_api(self): class Params(BaseModel): diff --git a/python/test_opentelemetry.py b/python/test_opentelemetry.py index 592f0ad8c..be9d9982a 100644 --- a/python/test_opentelemetry.py +++ b/python/test_opentelemetry.py @@ -495,6 +495,70 @@ def test_records_error_on_span(self, _reset_otel_globals): assert s.attributes[ATTR_ERROR_TYPE] == "ValueError" assert s.status.status_code == trace.StatusCode.ERROR + def test_execute_tool_error_from_define_tool_handler(self, _reset_otel_globals): + """Verify that errors from @define_tool handlers propagate and get recorded on spans. + + This validates the fix where @define_tool no longer catches exceptions internally, + allowing _execute_tool_call to record error.type and ERROR status on the + execute_tool span — consistent with Node.js, .NET, and Go SDKs. + """ + from copilot import ToolInvocation, define_tool + + exporter, reader, tp, mp = _get_exporter_and_reader(_reset_otel_globals) + telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) + + # Use zero-param handler signature to avoid Pydantic + # + from __future__ import annotations issue + @define_tool(description="A tool that always fails") + def failing_tool() -> str: + raise RuntimeError("deliberate failure") + + # Start an execute_tool span (as _execute_tool_call would) + span = telemetry.start_execute_tool_span( + tool_name="failing_tool", + tool_call_id="tc-fail", + description="A tool that always fails", + arguments={}, + ) + + # Simulate _execute_tool_call: invoke the handler, catch the error, record it + invocation: ToolInvocation = { + "session_id": "s1", + "tool_call_id": "tc-fail", + "tool_name": "failing_tool", + "arguments": {}, + } + operation_error = None + try: + import asyncio + + loop = asyncio.new_event_loop() + loop.run_until_complete(failing_tool.handler(invocation)) + loop.close() + except Exception as exc: + operation_error = exc + telemetry.record_error(span, exc) + + span.end() + + # The exception MUST have propagated (not swallowed by @define_tool) + assert operation_error is not None, "@define_tool must not catch handler exceptions" + assert isinstance(operation_error, RuntimeError) + + # The span MUST have ERROR status and error.type + s = exporter.get_finished_spans()[0] + assert s.status.status_code == trace.StatusCode.ERROR + assert s.attributes[ATTR_ERROR_TYPE] == "RuntimeError" + + # Operation duration metric should include error.type + telemetry.record_operation_duration( + 0.1, None, None, "github", None, None, operation_error, OP_EXECUTE_TOOL + ) + dps = _get_metric_data_points(reader, METRIC_OPERATION_DURATION) + assert len(dps) > 0 + error_dp = [dp for dp in dps if dp.attributes.get(ATTR_ERROR_TYPE) == "RuntimeError"] + assert len(error_dp) > 0, "duration metric includes error.type for failed tool" + # --------------------------------------------------------------------------- # Tests: Tool result recording From f8b9c73644bd99a619c8a44d8b1e11aecafd47f3 Mon Sep 17 00:00:00 2001 From: Stephen Toub Date: Tue, 3 Mar 2026 14:23:37 -0500 Subject: [PATCH 4/7] Fix session.start event by registering sessions with generated ID before doing RPC --- dotnet/src/Client.cs | 139 +++++++++++++++++++-------------- dotnet/src/Session.cs | 2 +- go/client.go | 72 +++++++++++------ nodejs/src/client.ts | 161 ++++++++++++++++++++++----------------- nodejs/src/session.ts | 2 +- python/copilot/client.py | 40 +++++++--- 6 files changed, 250 insertions(+), 166 deletions(-) diff --git a/dotnet/src/Client.cs b/dotnet/src/Client.cs index 26eed43fb..b13a34086 100644 --- a/dotnet/src/Client.cs +++ b/dotnet/src/Client.cs @@ -385,33 +385,11 @@ public async Task CreateSessionAsync(SessionConfig config, Cance config.Hooks.OnSessionEnd != null || config.Hooks.OnErrorOccurred != null); - var request = new CreateSessionRequest( - config.Model, - config.SessionId, - config.ClientName, - config.ReasoningEffort, - config.Tools?.Select(ToolDefinition.FromAIFunction).ToList(), - config.SystemMessage, - config.AvailableTools, - config.ExcludedTools, - config.Provider, - (bool?)true, - config.OnUserInputRequest != null ? true : null, - hasHooks ? true : null, - config.WorkingDirectory, - config.Streaming is true ? true : null, - config.McpServers, - "direct", - config.CustomAgents, - config.ConfigDir, - config.SkillDirectories, - config.DisabledSkills, - config.InfiniteSessions); - - var response = await InvokeRpcAsync( - connection.Rpc, "session.create", [request], cancellationToken); - - var session = new CopilotSession(response.SessionId, connection.Rpc, _telemetry, response.WorkspacePath, + var sessionId = config.SessionId ?? Guid.NewGuid().ToString(); + + // Create and register the session before issuing the RPC so that + // events emitted by the CLI (e.g. session.start) are not dropped. + var session = new CopilotSession(sessionId, connection.Rpc, _telemetry, workspacePath: null, config.Model, config.Provider, config.SystemMessage, config.Tools, config.Streaming, config.AgentName, config.AgentDescription); session.RegisterTools(config.Tools ?? []); @@ -424,10 +402,42 @@ public async Task CreateSessionAsync(SessionConfig config, Cance { session.RegisterHooks(config.Hooks); } + _sessions[sessionId] = session; - if (!_sessions.TryAdd(response.SessionId, session)) + try { - throw new InvalidOperationException($"Session {response.SessionId} already exists"); + var request = new CreateSessionRequest( + config.Model, + sessionId, + config.ClientName, + config.ReasoningEffort, + config.Tools?.Select(ToolDefinition.FromAIFunction).ToList(), + config.SystemMessage, + config.AvailableTools, + config.ExcludedTools, + config.Provider, + (bool?)true, + config.OnUserInputRequest != null ? true : null, + hasHooks ? true : null, + config.WorkingDirectory, + config.Streaming is true ? true : null, + config.McpServers, + "direct", + config.CustomAgents, + config.ConfigDir, + config.SkillDirectories, + config.DisabledSkills, + config.InfiniteSessions); + + var response = await InvokeRpcAsync( + connection.Rpc, "session.create", [request], cancellationToken); + + session.WorkspacePath = response.WorkspacePath; + } + catch + { + _sessions.TryRemove(sessionId, out _); + throw; } return session; @@ -478,34 +488,9 @@ public async Task ResumeSessionAsync(string sessionId, ResumeSes config.Hooks.OnSessionEnd != null || config.Hooks.OnErrorOccurred != null); - var request = new ResumeSessionRequest( - sessionId, - config.ClientName, - config.Model, - config.ReasoningEffort, - config.Tools?.Select(ToolDefinition.FromAIFunction).ToList(), - config.SystemMessage, - config.AvailableTools, - config.ExcludedTools, - config.Provider, - (bool?)true, - config.OnUserInputRequest != null ? true : null, - hasHooks ? true : null, - config.WorkingDirectory, - config.ConfigDir, - config.DisableResume is true ? true : null, - config.Streaming is true ? true : null, - config.McpServers, - "direct", - config.CustomAgents, - config.SkillDirectories, - config.DisabledSkills, - config.InfiniteSessions); - - var response = await InvokeRpcAsync( - connection.Rpc, "session.resume", [request], cancellationToken); - - var session = new CopilotSession(response.SessionId, connection.Rpc, _telemetry, response.WorkspacePath, + // Create and register the session before issuing the RPC so that + // events emitted by the CLI (e.g. session.start) are not dropped. + var session = new CopilotSession(sessionId, connection.Rpc, _telemetry, workspacePath: null, config.Model, config.Provider, config.SystemMessage, config.Tools, config.Streaming, config.AgentName, config.AgentDescription); session.RegisterTools(config.Tools ?? []); @@ -518,9 +503,45 @@ public async Task ResumeSessionAsync(string sessionId, ResumeSes { session.RegisterHooks(config.Hooks); } + _sessions[sessionId] = session; + + try + { + var request = new ResumeSessionRequest( + sessionId, + config.ClientName, + config.Model, + config.ReasoningEffort, + config.Tools?.Select(ToolDefinition.FromAIFunction).ToList(), + config.SystemMessage, + config.AvailableTools, + config.ExcludedTools, + config.Provider, + (bool?)true, + config.OnUserInputRequest != null ? true : null, + hasHooks ? true : null, + config.WorkingDirectory, + config.ConfigDir, + config.DisableResume is true ? true : null, + config.Streaming is true ? true : null, + config.McpServers, + "direct", + config.CustomAgents, + config.SkillDirectories, + config.DisabledSkills, + config.InfiniteSessions); + + var response = await InvokeRpcAsync( + connection.Rpc, "session.resume", [request], cancellationToken); + + session.WorkspacePath = response.WorkspacePath; + } + catch + { + _sessions.TryRemove(sessionId, out _); + throw; + } - // Replace any existing session entry to ensure new config (like permission handler) is used - _sessions[response.SessionId] = session; return session; } diff --git a/dotnet/src/Session.cs b/dotnet/src/Session.cs index a26d5d33b..acd6d9e31 100644 --- a/dotnet/src/Session.cs +++ b/dotnet/src/Session.cs @@ -80,7 +80,7 @@ public partial class CopilotSession : IAsyncDisposable /// The path to the workspace containing checkpoints/, plan.md, and files/ subdirectories, /// or null if infinite sessions are disabled. /// - public string? WorkspacePath { get; } + public string? WorkspacePath { get; internal set; } internal string TelemetryProviderName => _turnTracker?.ProviderName ?? OpenTelemetryConsts.DefaultProviderName; internal string? TelemetryServerAddress => _turnTracker?.ServerAddress; diff --git a/go/client.go b/go/client.go index 09832a6bd..9658ad5ac 100644 --- a/go/client.go +++ b/go/client.go @@ -44,6 +44,7 @@ import ( "sync/atomic" "time" + "github.com/google/uuid" "go.opentelemetry.io/otel/trace" "github.com/github/copilot-sdk/go/internal/embeddedcli" @@ -490,7 +491,6 @@ func (c *Client) CreateSession(ctx context.Context, config *SessionConfig) (*Ses req := createSessionRequest{} req.Model = config.Model - req.SessionID = config.SessionID req.ClientName = config.ClientName req.ReasoningEffort = config.ReasoningEffort req.ConfigDir = config.ConfigDir @@ -523,17 +523,15 @@ func (c *Client) CreateSession(ctx context.Context, config *SessionConfig) (*Ses } req.RequestPermission = Bool(true) - result, err := c.client.Request("session.create", req) - if err != nil { - return nil, fmt.Errorf("failed to create session: %w", err) - } - - var response createSessionResponse - if err := json.Unmarshal(result, &response); err != nil { - return nil, fmt.Errorf("failed to unmarshal response: %w", err) + sessionID := config.SessionID + if sessionID == "" { + sessionID = uuid.New().String() } + req.SessionID = sessionID - session := newSession(response.SessionID, c.client, response.WorkspacePath) + // Create and register the session before issuing the RPC so that + // events emitted by the CLI (e.g. session.start) are not dropped. + session := newSession(sessionID, c.client, "") session.telemetry = c.telemetry session.configureTelemetryContext(config.Model, config.Provider, config.SystemMessage, config.Tools, config.Streaming, config.AgentName, config.AgentDescription) @@ -547,9 +545,27 @@ func (c *Client) CreateSession(ctx context.Context, config *SessionConfig) (*Ses } c.sessionsMux.Lock() - c.sessions[response.SessionID] = session + c.sessions[sessionID] = session c.sessionsMux.Unlock() + result, err := c.client.Request("session.create", req) + if err != nil { + c.sessionsMux.Lock() + delete(c.sessions, sessionID) + c.sessionsMux.Unlock() + return nil, fmt.Errorf("failed to create session: %w", err) + } + + var response createSessionResponse + if err := json.Unmarshal(result, &response); err != nil { + c.sessionsMux.Lock() + delete(c.sessions, sessionID) + c.sessionsMux.Unlock() + return nil, fmt.Errorf("failed to unmarshal response: %w", err) + } + + session.workspacePath = response.WorkspacePath + return session, nil } @@ -624,17 +640,9 @@ func (c *Client) ResumeSessionWithOptions(ctx context.Context, sessionID string, req.InfiniteSessions = config.InfiniteSessions req.RequestPermission = Bool(true) - result, err := c.client.Request("session.resume", req) - if err != nil { - return nil, fmt.Errorf("failed to resume session: %w", err) - } - - var response resumeSessionResponse - if err := json.Unmarshal(result, &response); err != nil { - return nil, fmt.Errorf("failed to unmarshal response: %w", err) - } - - session := newSession(response.SessionID, c.client, response.WorkspacePath) + // Create and register the session before issuing the RPC so that + // events emitted by the CLI (e.g. session.start) are not dropped. + session := newSession(sessionID, c.client, "") session.telemetry = c.telemetry session.configureTelemetryContext(config.Model, config.Provider, config.SystemMessage, config.Tools, config.Streaming, config.AgentName, config.AgentDescription) @@ -648,9 +656,27 @@ func (c *Client) ResumeSessionWithOptions(ctx context.Context, sessionID string, } c.sessionsMux.Lock() - c.sessions[response.SessionID] = session + c.sessions[sessionID] = session c.sessionsMux.Unlock() + result, err := c.client.Request("session.resume", req) + if err != nil { + c.sessionsMux.Lock() + delete(c.sessions, sessionID) + c.sessionsMux.Unlock() + return nil, fmt.Errorf("failed to resume session: %w", err) + } + + var response resumeSessionResponse + if err := json.Unmarshal(result, &response); err != nil { + c.sessionsMux.Lock() + delete(c.sessions, sessionID) + c.sessionsMux.Unlock() + return nil, fmt.Errorf("failed to unmarshal response: %w", err) + } + + session.workspacePath = response.WorkspacePath + return session, nil } diff --git a/nodejs/src/client.ts b/nodejs/src/client.ts index f81a2b438..104714099 100644 --- a/nodejs/src/client.ts +++ b/nodejs/src/client.ts @@ -12,6 +12,7 @@ */ import { spawn, type ChildProcess } from "node:child_process"; +import { randomUUID } from "node:crypto"; import { existsSync } from "node:fs"; import { Socket } from "node:net"; import { dirname, join } from "node:path"; @@ -529,43 +530,14 @@ export class CopilotClient { } } - const response = await this.connection!.sendRequest("session.create", { - model: config.model, - sessionId: config.sessionId, - clientName: config.clientName, - reasoningEffort: config.reasoningEffort, - tools: config.tools?.map((tool) => ({ - name: tool.name, - description: tool.description, - parameters: toJsonSchema(tool.parameters), - overridesBuiltInTool: tool.overridesBuiltInTool, - })), - systemMessage: config.systemMessage, - availableTools: config.availableTools, - excludedTools: config.excludedTools, - provider: config.provider, - requestPermission: true, - requestUserInput: !!config.onUserInputRequest, - hooks: !!(config.hooks && Object.values(config.hooks).some(Boolean)), - workingDirectory: config.workingDirectory, - streaming: config.streaming, - mcpServers: config.mcpServers, - envValueMode: "direct", - customAgents: config.customAgents, - configDir: config.configDir, - skillDirectories: config.skillDirectories, - disabledSkills: config.disabledSkills, - infiniteSessions: config.infiniteSessions, - }); + const sessionId = config.sessionId ?? randomUUID(); - const { sessionId, workspacePath } = response as { - sessionId: string; - workspacePath?: string; - }; + // Create and register the session before issuing the RPC so that + // events emitted by the CLI (e.g. session.start) are not dropped. const session = new CopilotSession( sessionId, this.connection!, - workspacePath, + undefined, this._telemetry, config.model, config.provider, @@ -585,6 +557,46 @@ export class CopilotClient { } this.sessions.set(sessionId, session); + try { + const response = await this.connection!.sendRequest("session.create", { + model: config.model, + sessionId, + clientName: config.clientName, + reasoningEffort: config.reasoningEffort, + tools: config.tools?.map((tool) => ({ + name: tool.name, + description: tool.description, + parameters: toJsonSchema(tool.parameters), + overridesBuiltInTool: tool.overridesBuiltInTool, + })), + systemMessage: config.systemMessage, + availableTools: config.availableTools, + excludedTools: config.excludedTools, + provider: config.provider, + requestPermission: true, + requestUserInput: !!config.onUserInputRequest, + hooks: !!(config.hooks && Object.values(config.hooks).some(Boolean)), + workingDirectory: config.workingDirectory, + streaming: config.streaming, + mcpServers: config.mcpServers, + envValueMode: "direct", + customAgents: config.customAgents, + configDir: config.configDir, + skillDirectories: config.skillDirectories, + disabledSkills: config.disabledSkills, + infiniteSessions: config.infiniteSessions, + }); + + const { workspacePath } = response as { + sessionId: string; + workspacePath?: string; + }; + session["_workspacePath"] = workspacePath; + } catch (e) { + this.sessions.delete(sessionId); + throw e; + } + return session; } @@ -627,44 +639,12 @@ export class CopilotClient { } } - const response = await this.connection!.sendRequest("session.resume", { - sessionId, - clientName: config.clientName, - model: config.model, - reasoningEffort: config.reasoningEffort, - systemMessage: config.systemMessage, - availableTools: config.availableTools, - excludedTools: config.excludedTools, - tools: config.tools?.map((tool) => ({ - name: tool.name, - description: tool.description, - parameters: toJsonSchema(tool.parameters), - overridesBuiltInTool: tool.overridesBuiltInTool, - })), - provider: config.provider, - requestPermission: true, - requestUserInput: !!config.onUserInputRequest, - hooks: !!(config.hooks && Object.values(config.hooks).some(Boolean)), - workingDirectory: config.workingDirectory, - configDir: config.configDir, - streaming: config.streaming, - mcpServers: config.mcpServers, - envValueMode: "direct", - customAgents: config.customAgents, - skillDirectories: config.skillDirectories, - disabledSkills: config.disabledSkills, - infiniteSessions: config.infiniteSessions, - disableResume: config.disableResume, - }); - - const { sessionId: resumedSessionId, workspacePath } = response as { - sessionId: string; - workspacePath?: string; - }; + // Create and register the session before issuing the RPC so that + // events emitted by the CLI (e.g. session.start) are not dropped. const session = new CopilotSession( - resumedSessionId, + sessionId, this.connection!, - workspacePath, + undefined, this._telemetry, config.model, config.provider, @@ -682,7 +662,48 @@ export class CopilotClient { if (config.hooks) { session.registerHooks(config.hooks); } - this.sessions.set(resumedSessionId, session); + this.sessions.set(sessionId, session); + + try { + const response = await this.connection!.sendRequest("session.resume", { + sessionId, + clientName: config.clientName, + model: config.model, + reasoningEffort: config.reasoningEffort, + systemMessage: config.systemMessage, + availableTools: config.availableTools, + excludedTools: config.excludedTools, + tools: config.tools?.map((tool) => ({ + name: tool.name, + description: tool.description, + parameters: toJsonSchema(tool.parameters), + overridesBuiltInTool: tool.overridesBuiltInTool, + })), + provider: config.provider, + requestPermission: true, + requestUserInput: !!config.onUserInputRequest, + hooks: !!(config.hooks && Object.values(config.hooks).some(Boolean)), + workingDirectory: config.workingDirectory, + configDir: config.configDir, + streaming: config.streaming, + mcpServers: config.mcpServers, + envValueMode: "direct", + customAgents: config.customAgents, + skillDirectories: config.skillDirectories, + disabledSkills: config.disabledSkills, + infiniteSessions: config.infiniteSessions, + disableResume: config.disableResume, + }); + + const { workspacePath } = response as { + sessionId: string; + workspacePath?: string; + }; + session["_workspacePath"] = workspacePath; + } catch (e) { + this.sessions.delete(sessionId); + throw e; + } return session; } diff --git a/nodejs/src/session.ts b/nodejs/src/session.ts index cbb917e7c..9f1231f7e 100644 --- a/nodejs/src/session.ts +++ b/nodejs/src/session.ts @@ -91,7 +91,7 @@ export class CopilotSession { constructor( public readonly sessionId: string, private connection: MessageConnection, - private readonly _workspacePath?: string, + private _workspacePath?: string, telemetry?: CopilotTelemetry, model?: string, provider?: ProviderConfig, diff --git a/python/copilot/client.py b/python/copilot/client.py index a366dcb68..61b782fae 100644 --- a/python/copilot/client.py +++ b/python/copilot/client.py @@ -20,6 +20,7 @@ import sys import threading import time +import uuid from collections.abc import Callable from dataclasses import asdict, is_dataclass from pathlib import Path @@ -489,8 +490,6 @@ async def create_session(self, config: SessionConfig) -> CopilotSession: payload: dict[str, Any] = {} if cfg.get("model"): payload["model"] = cfg["model"] - if cfg.get("session_id"): - payload["sessionId"] = cfg["session_id"] if cfg.get("client_name"): payload["clientName"] = cfg["client_name"] if cfg.get("reasoning_effort"): @@ -586,14 +585,16 @@ async def create_session(self, config: SessionConfig) -> CopilotSession: if not self._client: raise RuntimeError("Client not connected") - response = await self._client.request("session.create", payload) - session_id = response["sessionId"] - workspace_path = response.get("workspacePath") + session_id = cfg.get("session_id") or str(uuid.uuid4()) + payload["sessionId"] = session_id + + # Create and register the session before issuing the RPC so that + # events emitted by the CLI (e.g. session.start) are not dropped. session = CopilotSession( session_id, self._client, - workspace_path, + None, self._telemetry, model=cfg.get("model"), provider=cfg.get("provider"), @@ -612,6 +613,14 @@ async def create_session(self, config: SessionConfig) -> CopilotSession: with self._sessions_lock: self._sessions[session_id] = session + try: + response = await self._client.request("session.create", payload) + session._workspace_path = response.get("workspacePath") + except BaseException: + with self._sessions_lock: + self._sessions.pop(session_id, None) + raise + return session async def resume_session(self, session_id: str, config: ResumeSessionConfig) -> CopilotSession: @@ -782,14 +791,13 @@ async def resume_session(self, session_id: str, config: ResumeSessionConfig) -> if not self._client: raise RuntimeError("Client not connected") - response = await self._client.request("session.resume", payload) - resumed_session_id = response["sessionId"] - workspace_path = response.get("workspacePath") + # Create and register the session before issuing the RPC so that + # events emitted by the CLI (e.g. session.start) are not dropped. session = CopilotSession( - resumed_session_id, + session_id, self._client, - workspace_path, + None, self._telemetry, model=cfg.get("model"), provider=cfg.get("provider"), @@ -806,7 +814,15 @@ async def resume_session(self, session_id: str, config: ResumeSessionConfig) -> if hooks: session._register_hooks(hooks) with self._sessions_lock: - self._sessions[resumed_session_id] = session + self._sessions[session_id] = session + + try: + response = await self._client.request("session.resume", payload) + session._workspace_path = response.get("workspacePath") + except BaseException: + with self._sessions_lock: + self._sessions.pop(session_id, None) + raise return session From 70859903db45dcd79af65e1d9127187c54d99b1a Mon Sep 17 00:00:00 2001 From: Stephen Toub Date: Tue, 3 Mar 2026 17:46:15 -0500 Subject: [PATCH 5/7] Refactor to be purely event-driven in order to support direct Rpc use --- dotnet/src/CopilotTelemetry.cs | 857 ++++++++++----------------- dotnet/src/Session.cs | 75 +-- go/client.go | 10 +- go/copilot_telemetry.go | 106 ++-- go/copilot_telemetry_test.go | 54 +- go/session.go | 34 +- nodejs/src/copilot-telemetry.ts | 92 ++- nodejs/src/session.ts | 48 +- nodejs/test/opentelemetry.test.ts | 81 +-- python/copilot/_copilot_telemetry.py | 91 ++- python/copilot/session.py | 60 +- python/test_opentelemetry.py | 91 +-- 12 files changed, 659 insertions(+), 940 deletions(-) diff --git a/dotnet/src/CopilotTelemetry.cs b/dotnet/src/CopilotTelemetry.cs index e84b2671b..46e9b0a84 100644 --- a/dotnet/src/CopilotTelemetry.cs +++ b/dotnet/src/CopilotTelemetry.cs @@ -105,10 +105,6 @@ public CopilotTelemetry(TelemetryConfig? config) return null; } - string displayName = string.IsNullOrWhiteSpace(agentName) - ? OpenTelemetryConsts.GenAI.OperationNames.InvokeAgent - : $"{OpenTelemetryConsts.GenAI.OperationNames.InvokeAgent} {agentName}"; - ActivityTagsCollection tags = new() { { OpenTelemetryConsts.GenAI.Operation.Name, OpenTelemetryConsts.GenAI.OperationNames.InvokeAgent }, @@ -117,30 +113,20 @@ public CopilotTelemetry(TelemetryConfig? config) { OpenTelemetryConsts.GenAI.Conversation.Id, sessionId }, }; - if (!string.IsNullOrWhiteSpace(model)) - { - tags.Add(OpenTelemetryConsts.GenAI.Request.Model, model); - } - - if (!string.IsNullOrWhiteSpace(agentName)) - { - tags.Add(OpenTelemetryConsts.GenAI.Agent.Name, agentName); - } - - if (!string.IsNullOrWhiteSpace(agentDescription)) - { - tags.Add(OpenTelemetryConsts.GenAI.Agent.Description, agentDescription); - } + AddIfNotEmpty(tags, OpenTelemetryConsts.GenAI.Request.Model, model); + AddIfNotEmpty(tags, OpenTelemetryConsts.GenAI.Agent.Name, agentName); + AddIfNotEmpty(tags, OpenTelemetryConsts.GenAI.Agent.Description, agentDescription); if (!string.IsNullOrWhiteSpace(serverAddress)) { tags.Add(OpenTelemetryConsts.Server.Address, serverAddress); - if (serverPort is int port) - { - tags.Add(OpenTelemetryConsts.Server.Port, port); - } + AddIfNotNull(tags, OpenTelemetryConsts.Server.Port, serverPort); } + string displayName = string.IsNullOrWhiteSpace(agentName) + ? OpenTelemetryConsts.GenAI.OperationNames.InvokeAgent + : $"{OpenTelemetryConsts.GenAI.OperationNames.InvokeAgent} {agentName}"; + return ActivitySource.StartActivity(displayName, ActivityKind.Client, parentContext, tags); } @@ -158,35 +144,25 @@ public CopilotTelemetry(TelemetryConfig? config) return null; } - string displayName = string.IsNullOrWhiteSpace(model) - ? OpenTelemetryConsts.GenAI.OperationNames.Chat - : $"{OpenTelemetryConsts.GenAI.OperationNames.Chat} {model}"; - ActivityTagsCollection tags = new() { { OpenTelemetryConsts.GenAI.Operation.Name, OpenTelemetryConsts.GenAI.OperationNames.Chat }, { OpenTelemetryConsts.GenAI.Provider.Name, providerName }, }; - if (!string.IsNullOrWhiteSpace(model)) - { - tags.Add(OpenTelemetryConsts.GenAI.Request.Model, model); - } - - if (!string.IsNullOrWhiteSpace(conversationId)) - { - tags.Add(OpenTelemetryConsts.GenAI.Conversation.Id, conversationId); - } + AddIfNotEmpty(tags, OpenTelemetryConsts.GenAI.Request.Model, model); + AddIfNotEmpty(tags, OpenTelemetryConsts.GenAI.Conversation.Id, conversationId); if (!string.IsNullOrWhiteSpace(serverAddress)) { tags.Add(OpenTelemetryConsts.Server.Address, serverAddress); - if (serverPort is int port) - { - tags.Add(OpenTelemetryConsts.Server.Port, port); - } + AddIfNotNull(tags, OpenTelemetryConsts.Server.Port, serverPort); } + string displayName = string.IsNullOrWhiteSpace(model) + ? OpenTelemetryConsts.GenAI.OperationNames.Chat + : $"{OpenTelemetryConsts.GenAI.OperationNames.Chat} {model}"; + return ActivitySource.StartActivity(displayName, ActivityKind.Client, parentContext, tags); } @@ -198,8 +174,6 @@ public CopilotTelemetry(TelemetryConfig? config) return null; } - string displayName = $"{OpenTelemetryConsts.GenAI.OperationNames.ExecuteTool} {toolName}"; - ActivityTagsCollection tags = new() { { OpenTelemetryConsts.GenAI.Operation.Name, OpenTelemetryConsts.GenAI.OperationNames.ExecuteTool }, @@ -208,16 +182,15 @@ public CopilotTelemetry(TelemetryConfig? config) { OpenTelemetryConsts.GenAI.Tool.Type, "function" }, }; - if (!string.IsNullOrWhiteSpace(description)) - { - tags.Add(OpenTelemetryConsts.GenAI.Tool.Description, description); - } + AddIfNotEmpty(tags, OpenTelemetryConsts.GenAI.Tool.Description, description); - if (EnableSensitiveData && arguments is not null) + if (arguments is not null && EnableSensitiveData) { tags.Add(OpenTelemetryConsts.GenAI.Tool.CallArguments, SerializeTagValue(arguments)); } + string displayName = $"{OpenTelemetryConsts.GenAI.OperationNames.ExecuteTool} {toolName}"; + return ActivitySource.StartActivity(displayName, ActivityKind.Internal, parentContext, tags); } @@ -233,30 +206,21 @@ public void RecordTokenUsageMetrics( Exception? error, string operationName) { - if (!TokenUsageHistogram.Enabled) - { - return; - } - - TagList baseTags = CreateMetricTags(operationName, requestModel, responseModel, providerName, serverAddress, serverPort); - - if (error is not null) + if (TokenUsageHistogram.Enabled) { - baseTags.Add(OpenTelemetryConsts.Error.Type, error.GetType().Name); - } - - if (inputTokens is int inputCount) - { - TagList tags = baseTags; - tags.Add(OpenTelemetryConsts.GenAI.Token.Type, OpenTelemetryConsts.TokenTypeInput); - TokenUsageHistogram.Record(inputCount, tags); - } + if (inputTokens is int inputCount) + { + TagList tags = CreateMetricTags(operationName, requestModel, responseModel, providerName, serverAddress, serverPort, error); + tags.Add(OpenTelemetryConsts.GenAI.Token.Type, OpenTelemetryConsts.TokenTypeInput); + TokenUsageHistogram.Record(inputCount, tags); + } - if (outputTokens is int outputCount) - { - TagList tags = baseTags; - tags.Add(OpenTelemetryConsts.GenAI.Token.Type, OpenTelemetryConsts.TokenTypeOutput); - TokenUsageHistogram.Record(outputCount, tags); + if (outputTokens is int outputCount) + { + TagList tags = CreateMetricTags(operationName, requestModel, responseModel, providerName, serverAddress, serverPort, error); + tags.Add(OpenTelemetryConsts.GenAI.Token.Type, OpenTelemetryConsts.TokenTypeOutput); + TokenUsageHistogram.Record(outputCount, tags); + } } } @@ -273,14 +237,9 @@ public void RecordOperationDuration( { if (OperationDurationHistogram.Enabled) { - TagList tags = CreateMetricTags(operationName, requestModel, responseModel, providerName, serverAddress, serverPort); - - if (error is not null) - { - tags.Add(OpenTelemetryConsts.Error.Type, error.GetType().Name); - } - - OperationDurationHistogram.Record(durationSeconds, tags); + OperationDurationHistogram.Record( + durationSeconds, + CreateMetricTags(operationName, requestModel, responseModel, providerName, serverAddress, serverPort, error)); } } @@ -330,8 +289,8 @@ public void RecordTimePerOutputChunk( public void SetExecuteToolResult(Activity? activity, object? result) { - if (EnableSensitiveData && - result is not null && + if (result is not null && + EnableSensitiveData && activity is { IsAllDataRequested: true }) { activity.SetTag(OpenTelemetryConsts.GenAI.Tool.CallResult, SerializeTagValue(result)); @@ -373,13 +332,46 @@ private static (string? Address, int? Port) ParseServerAddress(string? baseUrl) return (null, null); } + private static void AddIfNotEmpty(ActivityTagsCollection tags, string key, string? value) + { + if (!string.IsNullOrWhiteSpace(value)) + { + tags.Add(key, value); + } + } + + private static void AddIfNotNull(ActivityTagsCollection tags, string key, int? value) + { + if (value is int v) + { + tags.Add(key, v); + } + } + + private static void AddAsInt64IfNotZero(ActivityTagsCollection tags, string key, double value) + { + if (value != 0) + { + tags.Add(key, (long)value); + } + } + + private static void AddAsInt64IfNotNull(ActivityTagsCollection tags, string key, double? value) + { + if (value is double d) + { + tags.Add(key, (long)d); + } + } + private static TagList CreateMetricTags( string operationName, string? requestModel, string? responseModel, string providerName, string? serverAddress, - int? serverPort) + int? serverPort, + Exception? error = null) { TagList tags = default; tags.Add(OpenTelemetryConsts.GenAI.Operation.Name, operationName); @@ -404,6 +396,11 @@ private static TagList CreateMetricTags( } } + if (error is not null) + { + tags.Add(OpenTelemetryConsts.Error.Type, error.GetType().Name); + } + return tags; } @@ -547,12 +544,9 @@ internal ActivityContext GetToolCallParentContext(string toolCallId) { lock (_lock) { - if (_pendingToolParents is not null && _pendingToolParents.Remove(toolCallId, out var ctx)) - { - return ctx; - } - - return _agentActivity?.Context ?? default; + return _pendingToolParents is not null && _pendingToolParents.Remove(toolCallId, out var ctx) + ? ctx + : _agentActivity?.Context ?? default; } } @@ -567,63 +561,44 @@ internal void CompleteOnDispose() if (_agentActivity is not null) { var disposeError = new ObjectDisposedException("Session disposed while agent turn was in progress"); - CompleteChatTurnLocked(disposeError); - CompleteAgentTurnLocked(disposeError); + CompleteChatTurn(disposeError); + CompleteAgentTurn(disposeError); } } } /// - /// Called at the start of each SendAsync. Starts a new invoke_agent span - /// if one isn't already active, and records a user message event. + /// Processes a dispatched session event, enriching the current span and + /// completing the turn on idle/error events. /// - internal void BeginSend(string? prompt) + internal void ProcessEvent(SessionEvent sessionEvent) { lock (_lock) { - if (_agentActivity is null) - { - _agentActivity = _telemetry.StartInvokeAgentActivity( - _sessionId, - _requestModel, - ProviderName, - ServerAddress, - ServerPort, - _agentName, - _agentDescription); - _agentTimestamp = Stopwatch.GetTimestamp(); - _agentInputMessages = []; - } - - // Agent-level input = what the caller sent (all user prompts). - if (_agentInputMessages is not null && !string.IsNullOrWhiteSpace(prompt)) + // A UserMessageEvent starts a new invoke_agent span (if not already + // active) and records the user prompt. + if (sessionEvent is UserMessageEvent userMsg) { - _agentInputMessages.Add(new("user", [new("text", Content: prompt)])); - } + var prompt = userMsg.Data?.Content; + EnsureAgentSpan(); - // Record the user prompt as an input message on the first chat - // turn's message list when that turn starts. Stash it here for now. - _pendingUserPrompt = prompt; - } - } + if (!string.IsNullOrWhiteSpace(prompt)) + { + var msg = new OtelMsg("user", [new("text", Content: prompt)]); + _agentInputMessages?.Add(msg); + (_inputMessages ??= []).Add(msg); + } - private string? _pendingUserPrompt; + return; + } - /// - /// Processes a dispatched session event, enriching the current span and - /// completing the turn on idle/error events. - /// - internal void ProcessEvent(SessionEvent sessionEvent) - { - lock (_lock) - { // Route subagent events by ParentToolCallId. var parentToolCallId = GetParentToolCallId(sessionEvent); if (!string.IsNullOrEmpty(parentToolCallId)) { - if (_activeSubagents?.TryGetValue(parentToolCallId, out var subagentState) == true) + if (_activeSubagents?.TryGetValue(parentToolCallId, out var subagentState) is true) { - ProcessSubagentEventLocked(subagentState, sessionEvent); + ProcessSubagentEvent(subagentState, sessionEvent); } return; @@ -633,19 +608,21 @@ internal void ProcessEvent(SessionEvent sessionEvent) switch (sessionEvent) { case SubagentStartedEvent started: - BeginSubagentLocked(started); + BeginSubagent(started); return; + case SubagentCompletedEvent completed when completed.Data is not null: - CompleteSubagentLocked(completed.Data.ToolCallId, error: null); + CompleteSubagent(completed.Data.ToolCallId, error: null); return; + case SubagentFailedEvent failed when failed.Data is not null: - CompleteSubagentLocked(failed.Data.ToolCallId, + CompleteSubagent(failed.Data.ToolCallId, new InvalidOperationException($"Subagent '{failed.Data.AgentName}' failed: {failed.Data.Error}")); return; } // Record chunk timing for main agent events during a turn. - RecordOutputChunkMetricLocked(); + RecordOutputChunkMetric(); // Per-turn event processing (writes to the chat child span). if (_turnActivity is not null) @@ -675,6 +652,7 @@ internal void ProcessEvent(SessionEvent sessionEvent) case AssistantUsageEvent usageEvent: _responseModel = usageEvent.Data.Model; + if (!string.IsNullOrWhiteSpace(usageEvent.Data.ApiCallId)) { _responseId = usageEvent.Data.ApiCallId; @@ -683,36 +661,28 @@ internal void ProcessEvent(SessionEvent sessionEvent) { _responseId = usageEvent.Data.ProviderCallId; } - if (usageEvent.Data.InputTokens is double inTok) - { - _inputTokens += (int)inTok; - } - if (usageEvent.Data.OutputTokens is double outTok) - { - _outputTokens += (int)outTok; - } - if (usageEvent.Data.CacheReadTokens is double cacheRead) - { - _cacheReadTokens += (int)cacheRead; - } - if (usageEvent.Data.CacheWriteTokens is double cacheWrite) - { - _cacheCreationTokens += (int)cacheWrite; - } + + _inputTokens += usageEvent.Data.InputTokens is double inTok ? (int)inTok : 0; + _outputTokens += usageEvent.Data.OutputTokens is double outTok ? (int)outTok : 0; + _cacheReadTokens += usageEvent.Data.CacheReadTokens is double cacheRead ? (int)cacheRead : 0; + _cacheCreationTokens += usageEvent.Data.CacheWriteTokens is double cacheWrite ? (int)cacheWrite : 0; // Copilot-specific vendor attributes if (usageEvent.Data.Cost is double cost) { _turnCost = (_turnCost ?? 0) + cost; } + if (usageEvent.Data.Duration is double dur) { _turnServerDuration = (_turnServerDuration ?? 0) + dur; } + if (!string.IsNullOrWhiteSpace(usageEvent.Data.Initiator)) { _turnInitiator = usageEvent.Data.Initiator; } + if (usageEvent.Data.CopilotUsage is { } copilotUsage) { _turnAiu = (_turnAiu ?? 0) + copilotUsage.TotalNanoAiu; @@ -723,54 +693,45 @@ internal void ProcessEvent(SessionEvent sessionEvent) _responseModel = modelChangeEvent.Data.NewModel; break; - case ToolExecutionStartEvent toolStartEvent: + case ToolExecutionStartEvent { Data: { } startData }: { - if (toolStartEvent.Data is { } startData) + var isServerTool = startData.McpServerName is not null; + if (isServerTool && startData.ToolCallId is not null) { - var isServerTool = startData.McpServerName is not null; - if (isServerTool && startData.ToolCallId is not null) - { - _serverToolCallIds ??= []; - _serverToolCallIds[startData.ToolCallId] = startData.McpServerName!; - } + (_serverToolCallIds ??= [])[startData.ToolCallId] = startData.McpServerName!; + } - _outputMessages?.Add(new("assistant", - [ - new(isServerTool ? "server_tool_call" : "tool_call", + _outputMessages?.Add(new("assistant", + [ + new(isServerTool ? "server_tool_call" : "tool_call", Id: startData.ToolCallId, Name: startData.ToolName, Arguments: startData.Arguments, McpServerName: startData.McpServerName) - ])); + ])); - // For main agent tool calls, parent is the root invoke_agent. - if (_agentActivity is not null && startData.ToolCallId is not null) - { - _pendingToolParents ??= []; - _pendingToolParents[startData.ToolCallId] = _agentActivity.Context; - } + // For main agent tool calls, parent is the root invoke_agent. + if (_agentActivity is not null && startData.ToolCallId is not null) + { + _pendingToolParents ??= []; + _pendingToolParents[startData.ToolCallId] = _agentActivity.Context; } break; } - case ToolExecutionCompleteEvent toolCompleteEvent: + case ToolExecutionCompleteEvent { Data: { } toolData }: { - if (toolCompleteEvent.Data is { } toolData) - { - var resultContent = toolData.Result?.Content ?? toolData.Error?.Message; - string? serverName = null; - var isServerTool = _serverToolCallIds is not null - && _serverToolCallIds.Remove(toolData.ToolCallId, out serverName); - - _inputMessages?.Add(new("tool", - [ - new(isServerTool ? "server_tool_call_response" : "tool_call_response", + string? serverName = null; + var isServerTool = _serverToolCallIds is not null && _serverToolCallIds.Remove(toolData.ToolCallId, out serverName); + + _inputMessages?.Add(new("tool", + [ + new(isServerTool ? "server_tool_call_response" : "tool_call_response", Id: toolData.ToolCallId, - Response: resultContent, + Response: toolData.Result?.Content ?? toolData.Error?.Message, McpServerName: serverName) - ])); - } + ])); break; } @@ -788,38 +749,19 @@ internal void ProcessEvent(SessionEvent sessionEvent) case SessionTruncationEvent { Data: { } trunc }: { ActivityTagsCollection truncTags = []; - if (trunc.TokenLimit != 0) - { - truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.TokenLimit, (long)trunc.TokenLimit); - } - if (trunc.PreTruncationTokensInMessages != 0) - { - truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.PreTokens, (long)trunc.PreTruncationTokensInMessages); - } - if (trunc.PostTruncationTokensInMessages != 0) - { - truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.PostTokens, (long)trunc.PostTruncationTokensInMessages); - } - if (trunc.PreTruncationMessagesLength != 0) - { - truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.PreMessages, (long)trunc.PreTruncationMessagesLength); - } - if (trunc.PostTruncationMessagesLength != 0) - { - truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.PostMessages, (long)trunc.PostTruncationMessagesLength); - } - if (trunc.TokensRemovedDuringTruncation != 0) - { - truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.TokensRemoved, (long)trunc.TokensRemovedDuringTruncation); - } - if (trunc.MessagesRemovedDuringTruncation != 0) - { - truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.MessagesRemoved, (long)trunc.MessagesRemovedDuringTruncation); - } + + AddAsInt64IfNotZero(truncTags, OpenTelemetryConsts.GenAI.CopilotEvent.TokenLimit, trunc.TokenLimit); + AddAsInt64IfNotZero(truncTags, OpenTelemetryConsts.GenAI.CopilotEvent.PreTokens, trunc.PreTruncationTokensInMessages); + AddAsInt64IfNotZero(truncTags, OpenTelemetryConsts.GenAI.CopilotEvent.PostTokens, trunc.PostTruncationTokensInMessages); + AddAsInt64IfNotZero(truncTags, OpenTelemetryConsts.GenAI.CopilotEvent.PreMessages, trunc.PreTruncationMessagesLength); + AddAsInt64IfNotZero(truncTags, OpenTelemetryConsts.GenAI.CopilotEvent.PostMessages, trunc.PostTruncationMessagesLength); + AddAsInt64IfNotZero(truncTags, OpenTelemetryConsts.GenAI.CopilotEvent.TokensRemoved, trunc.TokensRemovedDuringTruncation); + AddAsInt64IfNotZero(truncTags, OpenTelemetryConsts.GenAI.CopilotEvent.MessagesRemoved, trunc.MessagesRemovedDuringTruncation); if (trunc.PerformedBy is not null) { truncTags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.PerformedBy, trunc.PerformedBy); } + target.AddEvent(new(OpenTelemetryConsts.GenAI.CopilotEvent.SessionTruncation, tags: truncTags)); break; } @@ -831,34 +773,19 @@ internal void ProcessEvent(SessionEvent sessionEvent) case SessionCompactionCompleteEvent { Data: { } compaction }: { ActivityTagsCollection tags = new() - { - { OpenTelemetryConsts.GenAI.CopilotEvent.Success, compaction.Success }, - }; - - if (_telemetry.EnableSensitiveData && compaction.Error is not null) - { - tags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.Message, compaction.Error); - } - - if (compaction.PreCompactionTokens is { } preTokens) - { - tags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.PreTokens, (long)preTokens); - } - - if (compaction.PostCompactionTokens is { } postTokens) { - tags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.PostTokens, (long)postTokens); - } + { OpenTelemetryConsts.GenAI.CopilotEvent.Success, compaction.Success }, + }; - if (compaction.TokensRemoved is { } tokensRemoved) + if (compaction.Error is not null && _telemetry.EnableSensitiveData) { - tags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.TokensRemoved, (long)tokensRemoved); + tags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.Message, compaction.Error); } - if (compaction.MessagesRemoved is { } messagesRemoved) - { - tags.Add(OpenTelemetryConsts.GenAI.CopilotEvent.MessagesRemoved, (long)messagesRemoved); - } + AddAsInt64IfNotNull(tags, OpenTelemetryConsts.GenAI.CopilotEvent.PreTokens, compaction.PreCompactionTokens); + AddAsInt64IfNotNull(tags, OpenTelemetryConsts.GenAI.CopilotEvent.PostTokens, compaction.PostCompactionTokens); + AddAsInt64IfNotNull(tags, OpenTelemetryConsts.GenAI.CopilotEvent.TokensRemoved, compaction.TokensRemoved); + AddAsInt64IfNotNull(tags, OpenTelemetryConsts.GenAI.CopilotEvent.MessagesRemoved, compaction.MessagesRemoved); target.AddEvent(new(OpenTelemetryConsts.GenAI.CopilotEvent.SessionCompactionComplete, tags: tags)); break; @@ -867,10 +794,10 @@ internal void ProcessEvent(SessionEvent sessionEvent) case SkillInvokedEvent { Data: { } skill }: { ActivityTagsCollection tags = new() - { - { OpenTelemetryConsts.GenAI.CopilotEvent.SkillName, skill.Name }, - { OpenTelemetryConsts.GenAI.CopilotEvent.SkillPath, skill.Path }, - }; + { + { OpenTelemetryConsts.GenAI.CopilotEvent.SkillName, skill.Name }, + { OpenTelemetryConsts.GenAI.CopilotEvent.SkillPath, skill.Path }, + }; if (skill.PluginName is not null) { @@ -898,7 +825,7 @@ internal void ProcessEvent(SessionEvent sessionEvent) switch (sessionEvent) { case AssistantTurnStartEvent turnStartEvent: - BeginChatTurnLocked(); + BeginChatTurn(); if (turnStartEvent.Data is { } turnStartData) { _turnId = turnStartData.TurnId; @@ -907,32 +834,44 @@ internal void ProcessEvent(SessionEvent sessionEvent) break; case AssistantTurnEndEvent: - CompleteChatTurnLocked(error: null); + CompleteChatTurn(error: null); break; case SessionIdleEvent: - CompleteChatTurnLocked(error: null); - CompleteAgentTurnLocked(error: null); + CompleteChatTurn(error: null); + CompleteAgentTurn(error: null); break; case SessionErrorEvent errorEvent: var ex = new InvalidOperationException($"Session error: {errorEvent.Data?.Message ?? "unknown error"}"); - CompleteChatTurnLocked(ex); - CompleteAgentTurnLocked(ex); + CompleteChatTurn(ex); + CompleteAgentTurn(ex); break; } } } /// - /// Completes the current turn with an error (e.g. from a failed SendAsync). + /// Ensures the invoke_agent span exists, creating it on demand if needed. + /// This is called from both the user.message handler and BeginChatTurn + /// so that RPC-initiated turns (no user.message) still get an agent span. + /// Caller must hold . /// - internal void CompleteTurnWithError(Exception error) + private void EnsureAgentSpan() { - lock (_lock) + Debug.Assert(Monitor.IsEntered(_lock)); + if (_agentActivity is null) { - CompleteChatTurnLocked(error); - CompleteAgentTurnLocked(error); + _agentActivity = _telemetry.StartInvokeAgentActivity( + _sessionId, + _requestModel, + ProviderName, + ServerAddress, + ServerPort, + _agentName, + _agentDescription); + _agentTimestamp = Stopwatch.GetTimestamp(); + _agentInputMessages = []; } } @@ -940,12 +879,16 @@ internal void CompleteTurnWithError(Exception error) /// Starts a new chat child span for an LLM turn. /// Caller must hold . /// - private void BeginChatTurnLocked() + private void BeginChatTurn() { Debug.Assert(Monitor.IsEntered(_lock)); // If there's already an active turn, complete it first (shouldn't normally happen). - CompleteChatTurnLocked(error: null); + CompleteChatTurn(error: null); + + // Ensure the parent agent span exists — covers RPC-initiated turns + // where no user.message event preceded the assistant.turn_start. + EnsureAgentSpan(); _responseModel = null; _responseId = null; @@ -955,7 +898,7 @@ private void BeginChatTurnLocked() _cacheCreationTokens = 0; _firstOutputChunkRecorded = false; _lastOutputChunkElapsed = TimeSpan.Zero; - _inputMessages = []; + _inputMessages ??= []; _outputMessages = []; _turnCost = null; _turnServerDuration = null; @@ -964,13 +907,6 @@ private void BeginChatTurnLocked() _turnId = null; _turnInteractionId = null; - // Add stashed user prompt as input message for the first turn. - if (!string.IsNullOrWhiteSpace(_pendingUserPrompt)) - { - _inputMessages.Add(new("user", [new("text", Content: _pendingUserPrompt)])); - _pendingUserPrompt = null; - } - var parentContext = _agentActivity?.Context ?? default; _turnActivity = _telemetry.StartChatActivity( _requestModel, @@ -987,48 +923,31 @@ private void BeginChatTurnLocked() /// Completes the current chat child span with per-turn attributes and metrics. /// Caller must hold . /// - private void CompleteChatTurnLocked(Exception? error) + private void CompleteChatTurn(Exception? error) { Debug.Assert(Monitor.IsEntered(_lock)); - var activity = _turnActivity; + var activity = GetAndReset(ref _turnActivity); if (activity is null) { return; } - var timestamp = _turnTimestamp; - var inputMessages = _inputMessages; - var outputMessages = _outputMessages; - var responseModel = _responseModel; - var responseId = _responseId; - var inputTokens = _inputTokens; - var outputTokens = _outputTokens; - var cacheReadTokens = _cacheReadTokens; - var cacheCreationTokens = _cacheCreationTokens; - var turnCost = _turnCost; - var turnServerDuration = _turnServerDuration; - var turnInitiator = _turnInitiator; - var turnAiu = _turnAiu; - var turnId = _turnId; - var turnInteractionId = _turnInteractionId; - - _turnActivity = null; - _turnTimestamp = 0; - _inputMessages = null; - _outputMessages = null; - _responseModel = null; - _responseId = null; - _inputTokens = 0; - _outputTokens = 0; - _cacheReadTokens = 0; - _cacheCreationTokens = 0; - _turnCost = null; - _turnServerDuration = null; - _turnInitiator = null; - _turnAiu = null; - _turnId = null; - _turnInteractionId = null; + var timestamp = GetAndReset(ref _turnTimestamp); + var inputMessages = GetAndReset(ref _inputMessages); + var outputMessages = GetAndReset(ref _outputMessages); + var responseModel = GetAndReset(ref _responseModel); + var responseId = GetAndReset(ref _responseId); + var inputTokens = GetAndReset(ref _inputTokens); + var outputTokens = GetAndReset(ref _outputTokens); + var cacheReadTokens = GetAndReset(ref _cacheReadTokens); + var cacheCreationTokens = GetAndReset(ref _cacheCreationTokens); + var turnCost = GetAndReset(ref _turnCost); + var turnServerDuration = GetAndReset(ref _turnServerDuration); + var turnInitiator = GetAndReset(ref _turnInitiator); + var turnAiu = GetAndReset(ref _turnAiu); + var turnId = GetAndReset(ref _turnId); + var turnInteractionId = GetAndReset(ref _turnInteractionId); if (error is not null) { @@ -1074,72 +993,25 @@ private void CompleteChatTurnLocked(Exception? error) // Set usage-related span attributes for this LLM turn if (activity.IsAllDataRequested) { - if (responseModel is not null) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Response.Model, responseModel); - } - if (responseId is not null) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Response.Id, responseId); - } - if (inputTokens > 0) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Usage.InputTokens, inputTokens); - } - if (outputTokens > 0) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Usage.OutputTokens, outputTokens); - } - if (cacheReadTokens > 0) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheReadInputTokens, cacheReadTokens); - } - if (cacheCreationTokens > 0) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheCreationInputTokens, cacheCreationTokens); - } - - // Copilot vendor-prefixed attributes on chat spans - if (turnCost is not null) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.Cost, turnCost.Value); - } - if (turnServerDuration is not null) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.ServerDuration, turnServerDuration.Value); - } - if (turnInitiator is not null) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.Initiator, turnInitiator); - } - if (turnAiu is not null) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.Aiu, turnAiu.Value); - } - if (turnId is not null) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.TurnId, turnId); - } - if (turnInteractionId is not null) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.InteractionId, turnInteractionId); - } + SetTagIfNotEmpty(activity, OpenTelemetryConsts.GenAI.Response.Model, responseModel); + SetTagIfNotEmpty(activity, OpenTelemetryConsts.GenAI.Response.Id, responseId); + SetTagIfPositive(activity, OpenTelemetryConsts.GenAI.Usage.InputTokens, inputTokens); + SetTagIfPositive(activity, OpenTelemetryConsts.GenAI.Usage.OutputTokens, outputTokens); + SetTagIfPositive(activity, OpenTelemetryConsts.GenAI.Usage.CacheReadInputTokens, cacheReadTokens); + SetTagIfPositive(activity, OpenTelemetryConsts.GenAI.Usage.CacheCreationInputTokens, cacheCreationTokens); + SetTagIfNotNull(activity, OpenTelemetryConsts.GenAI.Copilot.Cost, turnCost); + SetTagIfNotNull(activity, OpenTelemetryConsts.GenAI.Copilot.ServerDuration, turnServerDuration); + SetTagIfNotEmpty(activity, OpenTelemetryConsts.GenAI.Copilot.Initiator, turnInitiator); + SetTagIfNotNull(activity, OpenTelemetryConsts.GenAI.Copilot.Aiu, turnAiu); + SetTagIfNotEmpty(activity, OpenTelemetryConsts.GenAI.Copilot.TurnId, turnId); + SetTagIfNotEmpty(activity, OpenTelemetryConsts.GenAI.Copilot.InteractionId, turnInteractionId); } // Set input/output message content as span attributes (sensitive) if (_telemetry.EnableSensitiveData) { - var inputMessagesJson = BuildMessagesJson(inputMessages); - if (!string.IsNullOrWhiteSpace(inputMessagesJson)) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Input.Messages, inputMessagesJson); - } - - var outputMessagesJson = BuildMessagesJson(outputMessages, finishReason: finishReason); - if (!string.IsNullOrWhiteSpace(outputMessagesJson)) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Output.Messages, outputMessagesJson); - } + SetTagIfNotEmpty(activity, OpenTelemetryConsts.GenAI.Input.Messages, BuildMessagesJson(inputMessages)); + SetTagIfNotEmpty(activity, OpenTelemetryConsts.GenAI.Output.Messages, BuildMessagesJson(outputMessages, finishReason: finishReason)); } // Token usage metrics (per-turn) @@ -1177,32 +1049,26 @@ private void CompleteChatTurnLocked(Exception? error) /// Completes the invoke_agent span and records overall operation duration. /// Caller must hold . /// - private void CompleteAgentTurnLocked(Exception? error) + private void CompleteAgentTurn(Exception? error) { Debug.Assert(Monitor.IsEntered(_lock)); - var activity = _agentActivity; + var activity = GetAndReset(ref _agentActivity); if (activity is null) { return; } - var timestamp = _agentTimestamp; - - _agentActivity = null; - _agentTimestamp = 0; - _pendingUserPrompt = null; - var agentInputMessages = _agentInputMessages; - var agentOutputMessages = _agentOutputMessages; - _agentInputMessages = null; - _agentOutputMessages = null; + var timestamp = GetAndReset(ref _agentTimestamp); + var agentInputMessages = GetAndReset(ref _agentInputMessages); + var agentOutputMessages = GetAndReset(ref _agentOutputMessages); // Complete any remaining subagents before closing the parent. if (_activeSubagents is { Count: > 0 }) { - foreach (var key in _activeSubagents.Keys.ToList()) + foreach (var activeSubagent in _activeSubagents) { - CompleteSubagentLocked(key, error); + CompleteSubagent(activeSubagent.Key, error); } } @@ -1219,86 +1085,35 @@ private void CompleteAgentTurnLocked(Exception? error) activity.SetTag(OpenTelemetryConsts.GenAI.Response.FinishReasons, new[] { finishReason }); // Set accumulated usage across all chat turns on the invoke_agent span. - var agentResponseModel = _agentResponseModel; - var agentResponseId = _agentResponseId; - var agentTotalInputTokens = _agentTotalInputTokens; - var agentTotalOutputTokens = _agentTotalOutputTokens; - var agentTotalCacheReadTokens = _agentTotalCacheReadTokens; - var agentTotalCacheCreationTokens = _agentTotalCacheCreationTokens; - var agentTotalCost = _agentTotalCost; - var agentTotalAiu = _agentTotalAiu; - _agentResponseModel = null; - _agentResponseId = null; - _agentTotalInputTokens = 0; - _agentTotalOutputTokens = 0; - _agentTotalCacheReadTokens = 0; - _agentTotalCacheCreationTokens = 0; - _agentTotalCost = 0; - _agentTotalAiu = 0; + var agentResponseModel = GetAndReset(ref _agentResponseModel); + var agentResponseId = GetAndReset(ref _agentResponseId); + var agentTotalInputTokens = GetAndReset(ref _agentTotalInputTokens); + var agentTotalOutputTokens = GetAndReset(ref _agentTotalOutputTokens); + var agentTotalCacheReadTokens = GetAndReset(ref _agentTotalCacheReadTokens); + var agentTotalCacheCreationTokens = GetAndReset(ref _agentTotalCacheCreationTokens); + var agentTotalCost = GetAndReset(ref _agentTotalCost); + var agentTotalAiu = GetAndReset(ref _agentTotalAiu); if (activity.IsAllDataRequested) { - if (agentResponseModel is not null) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Response.Model, agentResponseModel); - } - if (agentResponseId is not null) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Response.Id, agentResponseId); - } - if (agentTotalInputTokens > 0) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Usage.InputTokens, agentTotalInputTokens); - } - if (agentTotalOutputTokens > 0) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Usage.OutputTokens, agentTotalOutputTokens); - } - if (agentTotalCacheReadTokens > 0) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheReadInputTokens, agentTotalCacheReadTokens); - } - if (agentTotalCacheCreationTokens > 0) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheCreationInputTokens, agentTotalCacheCreationTokens); - } - - // Copilot vendor-prefixed attributes on invoke_agent span - if (agentTotalCost > 0) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.Cost, agentTotalCost); - } - if (agentTotalAiu > 0) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Copilot.Aiu, agentTotalAiu); - } + SetTagIfNotEmpty(activity, OpenTelemetryConsts.GenAI.Response.Model, agentResponseModel); + SetTagIfNotEmpty(activity, OpenTelemetryConsts.GenAI.Response.Id, agentResponseId); + SetTagIfPositive(activity, OpenTelemetryConsts.GenAI.Usage.InputTokens, agentTotalInputTokens); + SetTagIfPositive(activity, OpenTelemetryConsts.GenAI.Usage.OutputTokens, agentTotalOutputTokens); + SetTagIfPositive(activity, OpenTelemetryConsts.GenAI.Usage.CacheReadInputTokens, agentTotalCacheReadTokens); + SetTagIfPositive(activity, OpenTelemetryConsts.GenAI.Usage.CacheCreationInputTokens, agentTotalCacheCreationTokens); + SetTagIfPositive(activity, OpenTelemetryConsts.GenAI.Copilot.Cost, agentTotalCost); + SetTagIfPositive(activity, OpenTelemetryConsts.GenAI.Copilot.Aiu, agentTotalAiu); } - // Agent-level input = caller's message; output = agent's final response. if (_telemetry.EnableSensitiveData) { - var inputJson = BuildMessagesJson(agentInputMessages); - if (inputJson is not null) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Input.Messages, inputJson); - } - - var outputJson = BuildMessagesJson(agentOutputMessages); - if (outputJson is not null) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Output.Messages, outputJson); - } - - if (!string.IsNullOrWhiteSpace(_systemInstructionsJson)) - { - activity.SetTag(OpenTelemetryConsts.GenAI.SystemInstructions, _systemInstructionsJson); - } + SetTagIfNotEmpty(activity, OpenTelemetryConsts.GenAI.Input.Messages, BuildMessagesJson(agentInputMessages)); + SetTagIfNotEmpty(activity, OpenTelemetryConsts.GenAI.Output.Messages, BuildMessagesJson(agentOutputMessages)); + SetTagIfNotEmpty(activity, OpenTelemetryConsts.GenAI.SystemInstructions, _systemInstructionsJson); } - if (!string.IsNullOrWhiteSpace(_toolDefinitionsJson)) - { - activity.SetTag(OpenTelemetryConsts.GenAI.Tool.Definitions, _toolDefinitionsJson); - } + SetTagIfNotEmpty(activity, OpenTelemetryConsts.GenAI.Tool.Definitions, _toolDefinitionsJson); if (_telemetry.OperationDurationHistogram.Enabled) { @@ -1320,7 +1135,7 @@ private void CompleteAgentTurnLocked(Exception? error) /// Records streaming chunk timing metrics. /// Caller must hold . /// - private void RecordOutputChunkMetricLocked() + private void RecordOutputChunkMetric() { Debug.Assert(Monitor.IsEntered(_lock)); @@ -1360,21 +1175,24 @@ private void RecordOutputChunkMetricLocked() /// Extracts ParentToolCallId from events that carry it. /// A non-null/non-empty value indicates the event belongs to a subagent. /// - private static string? GetParentToolCallId(SessionEvent evt) => evt switch + private static string? GetParentToolCallId(SessionEvent evt) { - AssistantUsageEvent e => e.Data?.ParentToolCallId, - AssistantMessageEvent e => e.Data?.ParentToolCallId, - AssistantMessageDeltaEvent e => e.Data?.ParentToolCallId, - ToolExecutionStartEvent e => e.Data?.ParentToolCallId, - ToolExecutionCompleteEvent e => e.Data?.ParentToolCallId, - _ => null, - }; + return evt switch + { + AssistantUsageEvent e => e.Data?.ParentToolCallId, + AssistantMessageEvent e => e.Data?.ParentToolCallId, + AssistantMessageDeltaEvent e => e.Data?.ParentToolCallId, + ToolExecutionStartEvent e => e.Data?.ParentToolCallId, + ToolExecutionCompleteEvent e => e.Data?.ParentToolCallId, + _ => null, + }; + } /// /// Creates a nested invoke_agent + chat span pair for a subagent. /// Caller must hold . /// - private void BeginSubagentLocked(SubagentStartedEvent started) + private void BeginSubagent(SubagentStartedEvent started) { Debug.Assert(Monitor.IsEntered(_lock)); @@ -1424,7 +1242,7 @@ private void BeginSubagentLocked(SubagentStartedEvent started) /// Routes an event to its owning subagent's spans. /// Caller must hold . /// - private void ProcessSubagentEventLocked(SubagentState subagent, SessionEvent sessionEvent) + private void ProcessSubagentEvent(SubagentState subagent, SessionEvent sessionEvent) { Debug.Assert(Monitor.IsEntered(_lock)); @@ -1449,26 +1267,10 @@ private void ProcessSubagentEventLocked(SubagentState subagent, SessionEvent ses subagent.ResponseId = usageEvent.Data.ProviderCallId; } - if (usageEvent.Data.InputTokens is double inTok) - { - subagent.InputTokens += (int)inTok; - } - - if (usageEvent.Data.OutputTokens is double outTok) - { - subagent.OutputTokens += (int)outTok; - } - - if (usageEvent.Data.CacheReadTokens is double cacheRead) - { - subagent.CacheReadTokens += (int)cacheRead; - } - - if (usageEvent.Data.CacheWriteTokens is double cacheWrite) - { - subagent.CacheCreationTokens += (int)cacheWrite; - } - + subagent.InputTokens += usageEvent.Data.InputTokens is double inTok ? (int)inTok : 0; + subagent.OutputTokens += usageEvent.Data.OutputTokens is double outTok ? (int)outTok : 0; + subagent.CacheReadTokens += usageEvent.Data.CacheReadTokens is double cacheRead ? (int)cacheRead : 0; + subagent.CacheCreationTokens += usageEvent.Data.CacheWriteTokens is double cacheWrite ? (int)cacheWrite : 0; break; case AssistantMessageEvent messageEvent: @@ -1551,7 +1353,7 @@ private void ProcessSubagentEventLocked(SubagentState subagent, SessionEvent ses /// Completes a subagent's chat and invoke_agent spans. /// Caller must hold . /// - private void CompleteSubagentLocked(string toolCallId, Exception? error) + private void CompleteSubagent(string toolCallId, Exception? error) { Debug.Assert(Monitor.IsEntered(_lock)); @@ -1575,55 +1377,24 @@ private void CompleteSubagentLocked(string toolCallId, Exception? error) if (chatActivity.IsAllDataRequested) { - if (subagent.ResponseModel is not null) - { - chatActivity.SetTag(OpenTelemetryConsts.GenAI.Response.Model, subagent.ResponseModel); - } - - if (subagent.ResponseId is not null) - { - chatActivity.SetTag(OpenTelemetryConsts.GenAI.Response.Id, subagent.ResponseId); - } - - if (subagent.InputTokens > 0) - { - chatActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.InputTokens, subagent.InputTokens); - } - - if (subagent.OutputTokens > 0) - { - chatActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.OutputTokens, subagent.OutputTokens); - } - - if (subagent.CacheReadTokens > 0) - { - chatActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheReadInputTokens, subagent.CacheReadTokens); - } - - if (subagent.CacheCreationTokens > 0) - { - chatActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheCreationInputTokens, subagent.CacheCreationTokens); - } + SetTagIfNotEmpty(chatActivity, OpenTelemetryConsts.GenAI.Response.Model, subagent.ResponseModel); + SetTagIfNotEmpty(chatActivity, OpenTelemetryConsts.GenAI.Response.Id, subagent.ResponseId); + SetTagIfPositive(chatActivity, OpenTelemetryConsts.GenAI.Usage.InputTokens, subagent.InputTokens); + SetTagIfPositive(chatActivity, OpenTelemetryConsts.GenAI.Usage.OutputTokens, subagent.OutputTokens); + SetTagIfPositive(chatActivity, OpenTelemetryConsts.GenAI.Usage.CacheReadInputTokens, subagent.CacheReadTokens); + SetTagIfPositive(chatActivity, OpenTelemetryConsts.GenAI.Usage.CacheCreationInputTokens, subagent.CacheCreationTokens); } if (_telemetry.EnableSensitiveData) { if (subagent.InputMessages.Count > 0) { - var inputJson = BuildMessagesJson(subagent.InputMessages); - if (inputJson is not null) - { - chatActivity.SetTag(OpenTelemetryConsts.GenAI.Input.Messages, inputJson); - } + SetTagIfNotEmpty(chatActivity, OpenTelemetryConsts.GenAI.Input.Messages, BuildMessagesJson(subagent.InputMessages)); } if (subagent.OutputMessages.Count > 0) { - var outputJson = BuildMessagesJson(subagent.OutputMessages, finishReason: finishReason); - if (outputJson is not null) - { - chatActivity.SetTag(OpenTelemetryConsts.GenAI.Output.Messages, outputJson); - } + SetTagIfNotEmpty(chatActivity, OpenTelemetryConsts.GenAI.Output.Messages, BuildMessagesJson(subagent.OutputMessages, finishReason: finishReason)); } } @@ -1654,45 +1425,18 @@ private void CompleteSubagentLocked(string toolCallId, Exception? error) if (invokeActivity.IsAllDataRequested) { - if (subagent.ResponseModel is not null) - { - invokeActivity.SetTag(OpenTelemetryConsts.GenAI.Response.Model, subagent.ResponseModel); - } - - if (subagent.ResponseId is not null) - { - invokeActivity.SetTag(OpenTelemetryConsts.GenAI.Response.Id, subagent.ResponseId); - } - - if (subagent.InputTokens > 0) - { - invokeActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.InputTokens, subagent.InputTokens); - } - - if (subagent.OutputTokens > 0) - { - invokeActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.OutputTokens, subagent.OutputTokens); - } - - if (subagent.CacheReadTokens > 0) - { - invokeActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheReadInputTokens, subagent.CacheReadTokens); - } - - if (subagent.CacheCreationTokens > 0) - { - invokeActivity.SetTag(OpenTelemetryConsts.GenAI.Usage.CacheCreationInputTokens, subagent.CacheCreationTokens); - } + SetTagIfNotEmpty(invokeActivity, OpenTelemetryConsts.GenAI.Response.Model, subagent.ResponseModel); + SetTagIfNotEmpty(invokeActivity, OpenTelemetryConsts.GenAI.Response.Id, subagent.ResponseId); + SetTagIfPositive(invokeActivity, OpenTelemetryConsts.GenAI.Usage.InputTokens, subagent.InputTokens); + SetTagIfPositive(invokeActivity, OpenTelemetryConsts.GenAI.Usage.OutputTokens, subagent.OutputTokens); + SetTagIfPositive(invokeActivity, OpenTelemetryConsts.GenAI.Usage.CacheReadInputTokens, subagent.CacheReadTokens); + SetTagIfPositive(invokeActivity, OpenTelemetryConsts.GenAI.Usage.CacheCreationInputTokens, subagent.CacheCreationTokens); } if (_telemetry.EnableSensitiveData && subagent.OutputMessages.Count > 0) { - var outputJson = BuildMessagesJson( - subagent.OutputMessages.Select(m => m with { FinishReason = finishReason }).ToList()); - if (outputJson is not null) - { - invokeActivity.SetTag(OpenTelemetryConsts.GenAI.Output.Messages, outputJson); - } + SetTagIfNotEmpty(invokeActivity, OpenTelemetryConsts.GenAI.Output.Messages, + BuildMessagesJson(subagent.OutputMessages.Select(m => m with { FinishReason = finishReason }).ToList())); } if (_telemetry.OperationDurationHistogram.Enabled) @@ -1877,6 +1621,45 @@ private static void WriteJsonValue(Utf8JsonWriter writer, object value) } } + private static T? GetAndReset(ref T? field) + { + var value = field; + field = default; + return value; + } + + private static void SetTagIfNotEmpty(Activity activity, string key, string? value) + { + if (!string.IsNullOrWhiteSpace(value)) + { + activity.SetTag(key, value); + } + } + + private static void SetTagIfPositive(Activity activity, string key, int value) + { + if (value > 0) + { + activity.SetTag(key, value); + } + } + + private static void SetTagIfPositive(Activity activity, string key, double value) + { + if (value > 0) + { + activity.SetTag(key, value); + } + } + + private static void SetTagIfNotNull(Activity activity, string key, T? value) where T : struct + { + if (value.HasValue) + { + activity.SetTag(key, value.Value); + } + } + private sealed record OtelMsg( string Role, List Parts, diff --git a/dotnet/src/Session.cs b/dotnet/src/Session.cs index acd6d9e31..83f85bc57 100644 --- a/dotnet/src/Session.cs +++ b/dotnet/src/Session.cs @@ -54,7 +54,7 @@ public partial class CopilotSession : IAsyncDisposable private event SessionEventHandler? _eventHandlers; private readonly Dictionary _toolHandlers = new(); private readonly JsonRpc _rpc; - private readonly CopilotTelemetry.AgentTurnTracker? _turnTracker; + private readonly CopilotTelemetry.AgentTurnTracker? _telemetryTracker; private volatile PermissionRequestHandler? _permissionHandler; private volatile UserInputHandler? _userInputHandler; private SessionHooks? _hooks; @@ -82,12 +82,12 @@ public partial class CopilotSession : IAsyncDisposable /// public string? WorkspacePath { get; internal set; } - internal string TelemetryProviderName => _turnTracker?.ProviderName ?? OpenTelemetryConsts.DefaultProviderName; - internal string? TelemetryServerAddress => _turnTracker?.ServerAddress; - internal int? TelemetryServerPort => _turnTracker?.ServerPort; - internal ActivityContext TelemetryActivityContext => _turnTracker?.GetActivityContext() ?? default; + internal string TelemetryProviderName => _telemetryTracker?.ProviderName ?? OpenTelemetryConsts.DefaultProviderName; + internal string? TelemetryServerAddress => _telemetryTracker?.ServerAddress; + internal int? TelemetryServerPort => _telemetryTracker?.ServerPort; + internal ActivityContext TelemetryActivityContext => _telemetryTracker?.GetActivityContext() ?? default; internal ActivityContext GetTelemetryToolCallParentContext(string toolCallId) => - _turnTracker?.GetToolCallParentContext(toolCallId) ?? default; + _telemetryTracker?.GetToolCallParentContext(toolCallId) ?? default; /// /// Initializes a new instance of the class. @@ -119,7 +119,7 @@ internal CopilotSession( { SessionId = sessionId; _rpc = rpc; - _turnTracker = telemetry is not null ? new CopilotTelemetry.AgentTurnTracker(telemetry, sessionId, model, provider, systemMessage, tools, streaming, agentName, agentDescription) : null; + _telemetryTracker = telemetry is not null ? new CopilotTelemetry.AgentTurnTracker(telemetry, sessionId, model, provider, systemMessage, tools, streaming, agentName, agentDescription) : null; WorkspacePath = workspacePath; } @@ -156,27 +156,18 @@ private Task InvokeRpcAsync(string method, object?[]? args, CancellationTo /// public async Task SendAsync(MessageOptions options, CancellationToken cancellationToken = default) { - _turnTracker?.BeginSend(options.Prompt); - try + var request = new SendMessageRequest { - var request = new SendMessageRequest - { - SessionId = SessionId, - Prompt = options.Prompt, - Attachments = options.Attachments, - Mode = options.Mode - }; + SessionId = SessionId, + Prompt = options.Prompt, + Attachments = options.Attachments, + Mode = options.Mode + }; - var response = await InvokeRpcAsync( - "session.send", [request], cancellationToken); + var response = await InvokeRpcAsync( + "session.send", [request], cancellationToken); - return response.MessageId; - } - catch (Exception ex) when (_turnTracker is { } tracker) - { - tracker.CompleteTurnWithError(ex); - throw; - } + return response.MessageId; } /// @@ -238,28 +229,18 @@ void Handler(SessionEvent evt) await SendAsync(options, cancellationToken); - try - { - using var cts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); - cts.CancelAfter(effectiveTimeout); - - using var registration = cts.Token.Register(() => - { - if (cancellationToken.IsCancellationRequested) - tcs.TrySetCanceled(cancellationToken); - else - tcs.TrySetException(new TimeoutException($"SendAndWaitAsync timed out after {effectiveTimeout}")); - }); + using var cts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); + cts.CancelAfter(effectiveTimeout); - return await tcs.Task; - } - catch (Exception ex) when (_turnTracker is { } tracker) + using var registration = cts.Token.Register(() => { - // If timeout/cancellation occurs before DispatchEvent handles the turn-ending event, - // complete the telemetry span with the error (idempotent if already completed). - tracker.CompleteTurnWithError(ex); - throw; - } + if (cancellationToken.IsCancellationRequested) + tcs.TrySetCanceled(cancellationToken); + else + tcs.TrySetException(new TimeoutException($"SendAndWaitAsync timed out after {effectiveTimeout}")); + }); + + return await tcs.Task; } /// @@ -309,7 +290,7 @@ public IDisposable On(SessionEventHandler handler) /// internal void DispatchEvent(SessionEvent sessionEvent) { - _turnTracker?.ProcessEvent(sessionEvent); + _telemetryTracker?.ProcessEvent(sessionEvent); // Reading the field once gives us a snapshot; delegates are immutable. _eventHandlers?.Invoke(sessionEvent); @@ -620,7 +601,7 @@ await InvokeRpcAsync( _eventHandlers = null; _toolHandlers.Clear(); - _turnTracker?.CompleteOnDispose(); + _telemetryTracker?.CompleteOnDispose(); _permissionHandler = null; } diff --git a/go/client.go b/go/client.go index 9658ad5ac..5f38084f7 100644 --- a/go/client.go +++ b/go/client.go @@ -1354,7 +1354,7 @@ func (c *Client) executeToolCall( if c.telemetry != nil { toolSpanCtx := context.Background() - if session.turnTracker != nil { + if session.telemetryTracker != nil { if tCtx := session.getToolCallParentContext(toolCallID); tCtx != nil { toolSpanCtx = tCtx } @@ -1380,10 +1380,10 @@ func (c *Client) executeToolCall( providerName := otelDefaultProviderName var serverAddress string var serverPort int - if session.turnTracker != nil { - providerName = session.turnTracker.getProviderName() - serverAddress = session.turnTracker.getServerAddress() - serverPort = session.turnTracker.getServerPort() + if session.telemetryTracker != nil { + providerName = session.telemetryTracker.getProviderName() + serverAddress = session.telemetryTracker.getServerAddress() + serverPort = session.telemetryTracker.getServerPort() } ctx := spanCtx if ctx == nil { diff --git a/go/copilot_telemetry.go b/go/copilot_telemetry.go index 6b6d6c1f4..12191dff4 100644 --- a/go/copilot_telemetry.go +++ b/go/copilot_telemetry.go @@ -573,8 +573,6 @@ type agentTurnTracker struct { turnID *string turnInteractionID *string - // Stashed user prompt for the first chat turn - pendingUserPrompt string } func newAgentTurnTracker(telemetry *copilotTelemetry, sessionID string, model string, provider *ProviderConfig, systemMessage *SystemMessageConfig, tools []Tool, streaming bool, agentName string, agentDescription string) *agentTurnTracker { @@ -641,45 +639,34 @@ func (t *agentTurnTracker) completeOnDispose() { } } -// beginSend is called at the start of Send() to start a span and record the user message. -func (t *agentTurnTracker) beginSend(ctx context.Context, prompt string) { - t.mu.Lock() - defer t.mu.Unlock() - if t.agentSpan == nil { - spanCtx, span := t.telemetry.startInvokeAgentSpan( - ctx, - t.sessionID, - t.requestModel, - t.providerName, - t.serverAddress, - t.serverPort, - t.agentName, - t.agentDescription, - ) - t.agentSpan = span - t.agentSpanCtx = spanCtx - t.agentStartTime = time.Now() - t.agentInputMsgs = nil - t.agentOutputMsgs = nil - } - - // Agent-level input = what the caller sent (all user prompts). - if prompt != "" { - t.agentInputMsgs = append(t.agentInputMsgs, otelMsg{ - Role: "user", - Parts: []otelPart{{Type: "text", Content: prompt}}, - }) - } - - // Stash user prompt for the first chat turn's input messages. - t.pendingUserPrompt = prompt -} - // processEvent handles telemetry enrichment for dispatched events. func (t *agentTurnTracker) processEvent(event SessionEvent) { t.mu.Lock() defer t.mu.Unlock() + // A user.message event starts a new invoke_agent span (if not already + // active) and records the user prompt. + if event.Type == UserMessage { + prompt := "" + if event.Data.Content != nil { + prompt = *event.Data.Content + } + t.ensureAgentSpan() + + if prompt != "" { + t.agentInputMsgs = append(t.agentInputMsgs, otelMsg{ + Role: "user", + Parts: []otelPart{{Type: "text", Content: prompt}}, + }) + t.turnInputMsgs = append(t.turnInputMsgs, otelMsg{ + Role: "user", + Parts: []otelPart{{Type: "text", Content: prompt}}, + }) + } + + return + } + // Route subagent events by parentToolCallId. parentToolCallID := getParentToolCallID(event) if parentToolCallID != "" { @@ -968,23 +955,43 @@ func (t *agentTurnTracker) processEvent(event SessionEvent) { } } -// completeTurnWithError completes the current turn with an error. -func (t *agentTurnTracker) completeTurnWithError(err error) { - t.mu.Lock() - defer t.mu.Unlock() - t.completeChatTurnLocked(err) - t.completeAgentTurnLocked(err) -} - // ============================================================================ // Chat turn lifecycle // ============================================================================ // beginChatTurnLocked starts a new chat child span for an LLM turn. Caller must hold mu. +// ensureAgentSpan ensures the invoke_agent span exists, creating it on demand +// if needed. Called from both the user.message handler and beginChatTurnLocked +// so that RPC-initiated turns (no user.message) still get an agent span. +// Caller must hold mu. +func (t *agentTurnTracker) ensureAgentSpan() { + if t.agentSpan == nil { + spanCtx, span := t.telemetry.startInvokeAgentSpan( + context.Background(), + t.sessionID, + t.requestModel, + t.providerName, + t.serverAddress, + t.serverPort, + t.agentName, + t.agentDescription, + ) + t.agentSpan = span + t.agentSpanCtx = spanCtx + t.agentStartTime = time.Now() + t.agentInputMsgs = nil + t.agentOutputMsgs = nil + } +} + func (t *agentTurnTracker) beginChatTurnLocked() { // If there's already an active turn, complete it first. t.completeChatTurnLocked(nil) + // Ensure the parent agent span exists — covers RPC-initiated turns + // where no user.message event preceded the assistant.turn_start. + t.ensureAgentSpan() + t.turnResponseModel = "" t.turnResponseID = "" t.turnInputTokens = 0 @@ -993,7 +1000,6 @@ func (t *agentTurnTracker) beginChatTurnLocked() { t.turnCacheCreationTokens = 0 t.firstOutputChunkRecorded = false t.lastOutputChunkTime = time.Time{} - t.turnInputMsgs = nil t.turnOutputMsgs = nil t.turnCost = nil t.turnServerDuration = nil @@ -1002,15 +1008,6 @@ func (t *agentTurnTracker) beginChatTurnLocked() { t.turnID = nil t.turnInteractionID = nil - // Add stashed user prompt as input message for the first turn. - if t.pendingUserPrompt != "" { - t.turnInputMsgs = append(t.turnInputMsgs, otelMsg{ - Role: "user", - Parts: []otelPart{{Type: "text", Content: t.pendingUserPrompt}}, - }) - t.pendingUserPrompt = "" - } - parentCtx := t.agentSpanCtx if parentCtx == nil { parentCtx = context.Background() @@ -1222,7 +1219,6 @@ func (t *agentTurnTracker) completeAgentTurnLocked(err error) { t.agentSpan = nil t.agentSpanCtx = nil t.agentStartTime = time.Time{} - t.pendingUserPrompt = "" t.agentInputMsgs = nil t.agentOutputMsgs = nil diff --git a/go/copilot_telemetry_test.go b/go/copilot_telemetry_test.go index c7f34aa38..485c0556f 100644 --- a/go/copilot_telemetry_test.go +++ b/go/copilot_telemetry_test.go @@ -484,7 +484,7 @@ func TestOpenTelemetry_TwoLevelSpanHierarchy(t *testing.T) { session := newTestSession(ct, false) // Simulate send - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) // AssistantTurnStart creates the chat child span session.dispatchEvent(SessionEvent{ @@ -562,7 +562,7 @@ func TestOpenTelemetry_MultiTurnAccumulatedUsage(t *testing.T) { ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) // Turn 1 session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) @@ -600,7 +600,7 @@ func TestOpenTelemetry_VendorPrefixedAttributes(t *testing.T) { ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) session.dispatchEvent(SessionEvent{Type: AssistantUsage, Data: Data{ @@ -645,7 +645,7 @@ func TestOpenTelemetry_RichMessageContent(t *testing.T) { ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{EnableSensitiveData: Bool(true)}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) @@ -694,7 +694,7 @@ func TestOpenTelemetry_ToolCallParts(t *testing.T) { ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{EnableSensitiveData: Bool(true)}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) @@ -763,7 +763,7 @@ func TestOpenTelemetry_MCPServerToolTracking(t *testing.T) { ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{EnableSensitiveData: Bool(true)}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) @@ -824,7 +824,7 @@ func TestOpenTelemetry_SessionTruncationEvent(t *testing.T) { ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) session.dispatchEvent(SessionEvent{ @@ -863,7 +863,7 @@ func TestOpenTelemetry_CompactionEvents(t *testing.T) { ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) session.dispatchEvent(SessionEvent{Type: SessionCompactionStart}) @@ -902,7 +902,7 @@ func TestOpenTelemetry_SkillInvokedEvent(t *testing.T) { ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{EnableSensitiveData: Bool(true)}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) session.dispatchEvent(SessionEvent{ @@ -944,7 +944,7 @@ func TestOpenTelemetry_SessionErrorCompletesTurn(t *testing.T) { ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("")}}) errMsg := "model rate limited" session.dispatchEvent(SessionEvent{ @@ -969,11 +969,11 @@ func TestOpenTelemetry_CompleteOnDispose(t *testing.T) { ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) // Simulate dispose while turn is active - session.turnTracker.completeOnDispose() + session.telemetryTracker.completeOnDispose() spans := exporter.GetSpans() // Should have chat span + invoke_agent span, both with error @@ -995,7 +995,7 @@ func TestOpenTelemetry_StreamingChunkMetrics(t *testing.T) { ct, _, reader := setupTestTelemetry(t, &TelemetryConfig{}) session := newTestSession(ct, true) // streaming=true - session.turnTracker.beginSend(context.Background(), "") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("")}}) // Start a chat turn session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) @@ -1032,7 +1032,7 @@ func TestOpenTelemetry_SubagentSpans(t *testing.T) { ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) // Subagent started @@ -1100,7 +1100,7 @@ func TestOpenTelemetry_ToolCallParentContext(t *testing.T) { ct, _, _ := setupTestTelemetry(t, &TelemetryConfig{}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) // Simulate tool execution start to register pending parent @@ -1132,7 +1132,7 @@ func TestOpenTelemetry_ModelChangeUpdatesResponseModel(t *testing.T) { ct, _, _ := setupTestTelemetry(t, &TelemetryConfig{}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) newModel := "gpt-4-turbo" @@ -1141,11 +1141,11 @@ func TestOpenTelemetry_ModelChangeUpdatesResponseModel(t *testing.T) { Data: Data{NewModel: &newModel}, }) - session.turnTracker.mu.Lock() - if session.turnTracker.turnResponseModel != "gpt-4-turbo" { - t.Errorf("Expected response model to be updated to gpt-4-turbo, got %q", session.turnTracker.turnResponseModel) + session.telemetryTracker.mu.Lock() + if session.telemetryTracker.turnResponseModel != "gpt-4-turbo" { + t.Errorf("Expected response model to be updated to gpt-4-turbo, got %q", session.telemetryTracker.turnResponseModel) } - session.turnTracker.mu.Unlock() + session.telemetryTracker.mu.Unlock() session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) session.dispatchEvent(SessionEvent{Type: SessionIdle}) @@ -1157,10 +1157,10 @@ func TestOpenTelemetry_SpanReuseWithinTurn(t *testing.T) { session := newTestSession(ct, false) // First send - session.turnTracker.beginSend(context.Background(), "First question") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("First question")}}) // Second send within the same turn (no SessionIdle yet) - session.turnTracker.beginSend(context.Background(), "Another question") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Another question")}}) // Complete session.dispatchEvent(SessionEvent{Type: SessionIdle}) @@ -1182,7 +1182,7 @@ func TestOpenTelemetry_FinishReasons(t *testing.T) { ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnEnd}) session.dispatchEvent(SessionEvent{Type: SessionIdle}) @@ -1214,7 +1214,7 @@ func TestOpenTelemetry_MessageContentExcludedWhenSensitiveDisabled(t *testing.T) tools := []Tool{{Name: "myTool", Description: "A tool", Parameters: map[string]any{"type": "object"}}} session.configureTelemetryContext("gpt-4", nil, sysMsg, tools, false, "", "") - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) session.dispatchEvent(SessionEvent{ Type: AssistantMessage, @@ -1247,7 +1247,7 @@ func TestOpenTelemetry_CacheTokenAttributes(t *testing.T) { ct, exporter, _ := setupTestTelemetry(t, &TelemetryConfig{}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) session.dispatchEvent(SessionEvent{ @@ -1277,7 +1277,7 @@ func TestOpenTelemetry_ChunkTimingUsesChat(t *testing.T) { ct, _, reader := setupTestTelemetry(t, &TelemetryConfig{}) session := newTestSession(ct, true) // streaming=true - session.turnTracker.beginSend(context.Background(), "") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) session.dispatchEvent(SessionEvent{ @@ -1311,7 +1311,7 @@ func TestOpenTelemetry_TokenUsageMetricsUseChat(t *testing.T) { ct, _, reader := setupTestTelemetry(t, &TelemetryConfig{}) session := newTestSession(ct, false) - session.turnTracker.beginSend(context.Background(), "Hello") + session.telemetryTracker.processEvent(SessionEvent{Type: UserMessage, Data: Data{Content: ptrString("Hello")}}) session.dispatchEvent(SessionEvent{Type: AssistantTurnStart, Data: Data{}}) session.dispatchEvent(SessionEvent{ Type: AssistantUsage, diff --git a/go/session.go b/go/session.go index 660623e5a..b34aebe02 100644 --- a/go/session.go +++ b/go/session.go @@ -66,7 +66,7 @@ type Session struct { hooks *SessionHooks hooksMux sync.RWMutex telemetry *copilotTelemetry - turnTracker *agentTurnTracker + telemetryTracker *agentTurnTracker // RPC provides typed session-scoped RPC methods. RPC *rpc.SessionRpc @@ -115,10 +115,6 @@ func newSession(sessionID string, client *jsonrpc2.Client, workspacePath string) // log.Printf("Failed to send message: %v", err) // } func (s *Session) Send(ctx context.Context, options MessageOptions) (string, error) { - if s.turnTracker != nil { - s.turnTracker.beginSend(ctx, options.Prompt) - } - req := sessionSendRequest{ SessionID: s.SessionID, Prompt: options.Prompt, @@ -128,9 +124,6 @@ func (s *Session) Send(ctx context.Context, options MessageOptions) (string, err result, err := s.client.Request("session.send", req) if err != nil { - if s.turnTracker != nil { - s.turnTracker.completeTurnWithError(err) - } return "", fmt.Errorf("failed to send message: %w", err) } @@ -217,18 +210,9 @@ func (s *Session) SendAndWait(ctx context.Context, options MessageOptions) (*Ses mu.Unlock() return result, nil case err := <-errCh: - // Complete telemetry spans on session error (idempotent if already completed). - if s.turnTracker != nil { - s.turnTracker.completeTurnWithError(err) - } return nil, err case <-ctx.Done(): - ctxErr := fmt.Errorf("waiting for session.idle: %w", ctx.Err()) - // Complete telemetry spans on timeout/cancellation (idempotent if already completed). - if s.turnTracker != nil { - s.turnTracker.completeTurnWithError(ctxErr) - } - return nil, ctxErr + return nil, fmt.Errorf("waiting for session.idle: %w", ctx.Err()) } } @@ -487,8 +471,8 @@ func (s *Session) handleHooksInvoke(hookType string, rawInput json.RawMessage) ( // This is an internal method; handlers are called synchronously and any panics // are recovered to prevent crashing the event dispatcher. func (s *Session) dispatchEvent(event SessionEvent) { - if s.turnTracker != nil { - s.turnTracker.processEvent(event) + if s.telemetryTracker != nil { + s.telemetryTracker.processEvent(event) } s.handlerMutex.RLock() @@ -561,8 +545,8 @@ func (s *Session) GetMessages(ctx context.Context) ([]SessionEvent, error) { // } func (s *Session) Destroy() error { // Close any open telemetry spans before destroying. - if s.turnTracker != nil { - s.turnTracker.completeOnDispose() + if s.telemetryTracker != nil { + s.telemetryTracker.completeOnDispose() } _, err := s.client.Request("session.destroy", sessionDestroyRequest{SessionID: s.SessionID}) @@ -629,17 +613,17 @@ func (s *Session) configureTelemetryContext( if s.telemetry == nil { return } - s.turnTracker = newAgentTurnTracker(s.telemetry, s.SessionID, model, provider, systemMessage, tools, streaming, agentName, agentDescription) + s.telemetryTracker = newAgentTurnTracker(s.telemetry, s.SessionID, model, provider, systemMessage, tools, streaming, agentName, agentDescription) } // getToolCallParentContext returns the parent context for a tool call span. // For subagent tool calls this returns the subagent's invoke_agent context; // for main agent tool calls this returns the root invoke_agent context. func (s *Session) getToolCallParentContext(toolCallID string) context.Context { - if s.turnTracker == nil { + if s.telemetryTracker == nil { return nil } - return s.turnTracker.getToolCallParentContext(toolCallID) + return s.telemetryTracker.getToolCallParentContext(toolCallID) } // SetModel changes the model for this session. diff --git a/nodejs/src/copilot-telemetry.ts b/nodejs/src/copilot-telemetry.ts index 8bf654d74..85c6bc8f1 100644 --- a/nodejs/src/copilot-telemetry.ts +++ b/nodejs/src/copilot-telemetry.ts @@ -781,9 +781,6 @@ export class AgentTurnTracker { private _turnId: string | undefined; private _turnInteractionId: string | undefined; - // Stashed user prompt for the first chat turn - private _pendingUserPrompt: string | undefined; - constructor( telemetry: CopilotTelemetry, sessionId: string, @@ -849,37 +846,26 @@ export class AgentTurnTracker { } } - /** Called at start of send(); starts the invoke_agent span if needed. */ - beginSend(prompt: string | undefined): void { - if (!this._agentSpan) { - this._agentSpan = this._telemetry.startInvokeAgentSpan( - this._sessionId, - this._requestModel, - this.providerName, - this.serverAddress, - this.serverPort, - this._agentName, - this._agentDescription - ); - this._agentSpanContext = trace.setSpan(context.active(), this._agentSpan); - this._agentStartTime = performance.now(); - this._agentInputMessages = []; - } + /** Called from _dispatchEvent; handles telemetry enrichment and turn completion. */ + processEvent(event: SessionEvent): void { + // A user.message event starts a new invoke_agent span (if not already + // active) and records the user prompt. + if (event.type === "user.message") { + const prompt = (event as Extract).data?.content; + this._ensureAgentSpan(); + + if (prompt) { + const msg = { + role: "user" as const, + parts: [{ type: "text" as const, content: prompt }], + }; + this._agentInputMessages?.push(msg); + (this._inputMessages ??= []).push(msg); + } - // Agent-level input = what the caller sent (all user prompts). - if (this._agentInputMessages && prompt) { - this._agentInputMessages.push({ - role: "user", - parts: [{ type: "text", content: prompt }], - }); + return; } - // Stash user prompt for the first chat turn's input messages. - this._pendingUserPrompt = prompt; - } - - /** Called from _dispatchEvent; handles telemetry enrichment and turn completion. */ - processEvent(event: SessionEvent): void { // Route subagent events by parentToolCallId. const parentToolCallId = getParentToolCallId(event); if (parentToolCallId) { @@ -1167,21 +1153,41 @@ export class AgentTurnTracker { } } - /** Called from send() error path; completes turn with error. */ - completeTurnWithError(error: Error): void { - this._completeChatTurn(error); - this._completeAgentTurn(error); - } - // ======================================================================== // Chat turn lifecycle // ======================================================================== /** Starts a new chat child span for an LLM turn. */ + /** + * Ensures the invoke_agent span exists, creating it on demand if needed. + * Called from both the user.message handler and _beginChatTurn so that + * RPC-initiated turns (no user.message) still get an agent span. + */ + private _ensureAgentSpan(): void { + if (!this._agentSpan) { + this._agentSpan = this._telemetry.startInvokeAgentSpan( + this._sessionId, + this._requestModel, + this.providerName, + this.serverAddress, + this.serverPort, + this._agentName, + this._agentDescription + ); + this._agentSpanContext = trace.setSpan(context.active(), this._agentSpan); + this._agentStartTime = performance.now(); + this._agentInputMessages = []; + } + } + private _beginChatTurn(): void { // If there's already an active turn, complete it first. this._completeChatTurn(undefined); + // Ensure the parent agent span exists — covers RPC-initiated turns + // where no user.message event preceded the assistant.turn_start. + this._ensureAgentSpan(); + this._responseModel = undefined; this._responseId = undefined; this._inputTokens = 0; @@ -1190,7 +1196,7 @@ export class AgentTurnTracker { this._cacheCreationTokens = 0; this._firstOutputChunkRecorded = false; this._lastOutputChunkTime = 0; - this._inputMessages = []; + this._inputMessages ??= []; this._outputMessages = []; this._turnCost = undefined; this._turnServerDuration = undefined; @@ -1199,15 +1205,6 @@ export class AgentTurnTracker { this._turnId = undefined; this._turnInteractionId = undefined; - // Add stashed user prompt as input message for the first turn. - if (this._pendingUserPrompt) { - this._inputMessages.push({ - role: "user", - parts: [{ type: "text", content: this._pendingUserPrompt }], - }); - this._pendingUserPrompt = undefined; - } - const parentContext = this._agentSpanContext ?? context.active(); this._turnSpan = this._telemetry.startChatSpan( this._requestModel, @@ -1390,7 +1387,6 @@ export class AgentTurnTracker { this._agentSpan = undefined; this._agentSpanContext = undefined; this._agentStartTime = undefined; - this._pendingUserPrompt = undefined; this._agentInputMessages = undefined; this._agentOutputMessages = undefined; diff --git a/nodejs/src/session.ts b/nodejs/src/session.ts index 9f1231f7e..6cfa4f9a6 100644 --- a/nodejs/src/session.ts +++ b/nodejs/src/session.ts @@ -72,7 +72,7 @@ export class CopilotSession { // Telemetry state private readonly _telemetry: CopilotTelemetry | undefined; - private readonly _turnTracker: AgentTurnTracker | undefined; + private readonly _telemetryTracker: AgentTurnTracker | undefined; /** * Creates a new CopilotSession instance. @@ -102,7 +102,7 @@ export class CopilotSession { agentDescription?: string ) { this._telemetry = telemetry; - this._turnTracker = telemetry + this._telemetryTracker = telemetry ? new AgentTurnTracker( telemetry, sessionId, @@ -141,20 +141,20 @@ export class CopilotSession { return this._telemetry; } get telemetrySpanContext(): Context | undefined { - return this._turnTracker?.getSpanContext(); + return this._telemetryTracker?.getSpanContext(); } get telemetryProviderName(): string { - return this._turnTracker?.providerName ?? "github"; + return this._telemetryTracker?.providerName ?? "github"; } get telemetryServerAddress(): string | undefined { - return this._turnTracker?.serverAddress; + return this._telemetryTracker?.serverAddress; } get telemetryServerPort(): number | undefined { - return this._turnTracker?.serverPort; + return this._telemetryTracker?.serverPort; } /** @internal Gets the parent context for a tool call span (may be subagent context). */ getTelemetryToolCallParentContext(toolCallId: string): Context | undefined { - return this._turnTracker?.getToolCallParentContext(toolCallId); + return this._telemetryTracker?.getToolCallParentContext(toolCallId); } /** @@ -176,24 +176,14 @@ export class CopilotSession { * ``` */ async send(options: MessageOptions): Promise { - // Start telemetry span on first send after idle - this._turnTracker?.beginSend(options.prompt); - - try { - const response = await this.connection.sendRequest("session.send", { - sessionId: this.sessionId, - prompt: options.prompt, - attachments: options.attachments, - mode: options.mode, - }); + const response = await this.connection.sendRequest("session.send", { + sessionId: this.sessionId, + prompt: options.prompt, + attachments: options.attachments, + mode: options.mode, + }); - return (response as { messageId: string }).messageId; - } catch (error) { - this._turnTracker?.completeTurnWithError( - error instanceof Error ? error : new Error(String(error)) - ); - throw error; - } + return (response as { messageId: string }).messageId; } /** @@ -266,12 +256,6 @@ export class CopilotSession { await Promise.race([idlePromise, timeoutPromise]); return lastAssistantMessage; - } catch (ex) { - // Complete telemetry spans on timeout/cancellation (idempotent if already completed). - if (ex instanceof Error) { - this._turnTracker?.completeTurnWithError(ex); - } - throw ex; } finally { if (timeoutId !== undefined) { clearTimeout(timeoutId); @@ -365,7 +349,7 @@ export class CopilotSession { */ _dispatchEvent(event: SessionEvent): void { // Delegate telemetry enrichment and turn completion to the tracker - this._turnTracker?.processEvent(event); + this._telemetryTracker?.processEvent(event); // Dispatch to typed handlers for this specific event type const typedHandlers = this.typedEventHandlers.get(event.type); @@ -616,7 +600,7 @@ export class CopilotSession { this.toolHandlers.clear(); this.toolDescriptions.clear(); this.permissionHandler = undefined; - this._turnTracker?.completeOnDispose(); + this._telemetryTracker?.completeOnDispose(); } /** diff --git a/nodejs/test/opentelemetry.test.ts b/nodejs/test/opentelemetry.test.ts index 18d41e369..35ce43fa7 100644 --- a/nodejs/test/opentelemetry.test.ts +++ b/nodejs/test/opentelemetry.test.ts @@ -121,6 +121,13 @@ function dispatchEvent(session: CopilotSession, event: SessionEvent) { session._dispatchEvent(event); } +/** Send a message and dispatch the matching user.message event (unit tests + * use mock connections that don't produce real events). */ +async function sendWithEvent(session: CopilotSession, prompt: string, extra: Record = {}) { + await session.send({ prompt, ...extra }); + dispatchEvent(session, makeEvent("user.message", { content: prompt })); +} + function makeEvent(type: string, data: Record = {}): SessionEvent { return { id: "evt-1", @@ -233,7 +240,7 @@ describe("CopilotTelemetry", () => { baseUrl: "https://api.openai.com:8080/v1", }); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("session.idle")); const spans = getSpans(); @@ -254,8 +261,8 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "First" }); - await session.send({ prompt: "Second" }); + await sendWithEvent(session, "First"); + await sendWithEvent(session, "Second"); dispatchEvent(session, makeEvent("session.idle")); const spans = getSpans(); @@ -267,11 +274,11 @@ describe("CopilotTelemetry", () => { const session = createTestSession(telemetry); // First turn - await session.send({ prompt: "Turn 1" }); + await sendWithEvent(session, "Turn 1"); dispatchEvent(session, makeEvent("session.idle")); // Second turn - await session.send({ prompt: "Turn 2" }); + await sendWithEvent(session, "Turn 2"); dispatchEvent(session, makeEvent("session.idle")); const spans = getSpans(); @@ -341,7 +348,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry, { model: "gpt-4o" }); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("session.idle")); const scopeMetrics = await getMetrics(); @@ -354,7 +361,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry, { model: "gpt-4o" }); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent( session, @@ -392,7 +399,7 @@ describe("CopilotTelemetry", () => { streaming: true, }); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); // Simulate streaming chunks @@ -477,7 +484,7 @@ describe("CopilotTelemetry", () => { ], }); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent(session, makeEvent("assistant.message", { content: "Hi there!" })); dispatchEvent(session, makeEvent("session.idle")); @@ -538,7 +545,7 @@ describe("CopilotTelemetry", () => { ], }); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent(session, makeEvent("assistant.message", { content: "Hi there!" })); dispatchEvent(session, makeEvent("session.idle")); @@ -570,7 +577,7 @@ describe("CopilotTelemetry", () => { const conn = createMockConnection(); const session = new CopilotSession("test-id", conn); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("session.idle")); const spans = getSpans(); @@ -589,7 +596,7 @@ describe("CopilotTelemetry", () => { }); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("session.idle")); const spans = getSpans(); @@ -612,7 +619,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent( session, makeEvent("session.error", { @@ -631,7 +638,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent( session, @@ -654,7 +661,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent( session, @@ -683,7 +690,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry, { model: "gpt-4o" }); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent( session, @@ -718,7 +725,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent( session, @@ -773,7 +780,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("session.idle")); const spans = getSpans(); @@ -804,7 +811,7 @@ describe("CopilotTelemetry", () => { "A helpful agent" // agentDescription ); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("session.idle")); const spans = getSpans(); @@ -844,7 +851,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent(session, makeEvent("session.idle")); @@ -863,7 +870,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); // Turn 1 dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); @@ -900,7 +907,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); // Turn 1 dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); @@ -953,7 +960,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({ enableSensitiveData: true }); const session = createTestSession(telemetry); - await session.send({ prompt: "Think step by step" }); + await sendWithEvent(session, "Think step by step"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent( session, @@ -983,7 +990,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({ enableSensitiveData: true }); const session = createTestSession(telemetry); - await session.send({ prompt: "Get weather" }); + await sendWithEvent(session, "Get weather"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); // Tool execution start -> output message with tool_call part @@ -1034,7 +1041,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({ enableSensitiveData: true }); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello world" }); + await sendWithEvent(session, "Hello world"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent(session, makeEvent("session.idle")); @@ -1058,7 +1065,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent( session, @@ -1093,7 +1100,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent( session, @@ -1138,7 +1145,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent( session, @@ -1176,7 +1183,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent( session, @@ -1224,7 +1231,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({ enableSensitiveData: true }); const session = createTestSession(telemetry); - await session.send({ prompt: "Use MCP tool" }); + await sendWithEvent(session, "Use MCP tool"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent( @@ -1289,7 +1296,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent( session, makeEvent("assistant.turn_start", { @@ -1333,7 +1340,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); // Turn 1 with cost and AIU dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); @@ -1395,7 +1402,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent( session, @@ -1432,7 +1439,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent(session, makeEvent("session.compaction_start", {})); dispatchEvent(session, makeEvent("session.idle")); @@ -1449,7 +1456,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent( session, @@ -1486,7 +1493,7 @@ describe("CopilotTelemetry", () => { const telemetry = new CopilotTelemetry({}); const session = createTestSession(telemetry); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); // Spans should not be finished yet @@ -1522,7 +1529,7 @@ describe("CopilotTelemetry", () => { streaming: true, }); - await session.send({ prompt: "Hello" }); + await sendWithEvent(session, "Hello"); dispatchEvent(session, makeEvent("assistant.turn_start", { turnId: "turn-1" })); dispatchEvent( session, diff --git a/python/copilot/_copilot_telemetry.py b/python/copilot/_copilot_telemetry.py index 718718a64..eaf8bbb06 100644 --- a/python/copilot/_copilot_telemetry.py +++ b/python/copilot/_copilot_telemetry.py @@ -788,9 +788,6 @@ def __init__( self._turn_id: str | None = None self._turn_interaction_id: str | None = None - # Stashed user prompt for the first chat turn - self._pending_user_prompt: str | None = None - # -- Property accessors -------------------------------------------------- @property @@ -830,44 +827,11 @@ def get_tool_call_parent_context(self, tool_call_id: str) -> Any | None: # -- Public methods ------------------------------------------------------ - def begin_send(self, prompt: str | None) -> None: - """Called at the start of send(); starts the invoke_agent span if needed.""" - with self._lock: - if self._agent_span is None: - self._agent_span, self._agent_span_context = ( - self._telemetry.start_invoke_agent_span( - self._session_id, - self._request_model, - self._provider_name, - self._server_address, - self._server_port, - self._agent_name, - self._agent_description, - ) - ) - self._agent_start_time = time.monotonic() - self._agent_input_messages = [] - - # Agent-level input = what the caller sent (all user prompts). - if self._agent_input_messages is not None and prompt: - self._agent_input_messages.append( - OtelMsg(role="user", parts=[OtelPart(type="text", content=prompt)]) - ) - - # Stash user prompt for the first chat turn's input messages. - self._pending_user_prompt = prompt - def process_event(self, event: SessionEvent) -> None: """Called from _dispatch_event; handles telemetry enrichment and turn completion.""" with self._lock: self._process_event_locked(event) - def complete_turn_with_error(self, error: Exception) -> None: - """Called from send() error path; completes turn with error.""" - with self._lock: - self._complete_chat_turn(error) - self._complete_agent_turn(error) - def complete_on_dispose(self) -> None: """Closes any active spans with an error status. @@ -885,6 +849,22 @@ def complete_on_dispose(self) -> None: def _process_event_locked(self, event: SessionEvent) -> None: """Process a single event under the lock.""" + # A user.message event starts a new invoke_agent span (if not already + # active) and records the user prompt. + if event.type == SessionEventType.USER_MESSAGE: + prompt = getattr(event.data, "content", None) if event.data else None + self._ensure_agent_span() + + if prompt: + msg = OtelMsg(role="user", parts=[OtelPart(type="text", content=prompt)]) + if self._agent_input_messages is not None: + self._agent_input_messages.append(msg) + if self._input_messages is None: + self._input_messages = [] + self._input_messages.append(msg) + + return + # Route subagent events by parentToolCallId. parent_tool_call_id = _get_parent_tool_call_id(event) if parent_tool_call_id: @@ -1157,11 +1137,36 @@ def _process_event_locked(self, event: SessionEvent) -> None: # Chat turn lifecycle # ======================================================================== + def _ensure_agent_span(self) -> None: + """Ensures the invoke_agent span exists, creating it on demand if needed. + + Called from both the user.message handler and _begin_chat_turn so that + RPC-initiated turns (no user.message) still get an agent span. + """ + if self._agent_span is None: + self._agent_span, self._agent_span_context = ( + self._telemetry.start_invoke_agent_span( + self._session_id, + self._request_model, + self._provider_name, + self._server_address, + self._server_port, + self._agent_name, + self._agent_description, + ) + ) + self._agent_start_time = time.monotonic() + self._agent_input_messages = [] + def _begin_chat_turn(self) -> None: """Starts a new chat child span for an LLM turn.""" # If there's already an active turn, complete it first. self._complete_chat_turn(None) + # Ensure the parent agent span exists — covers RPC-initiated turns + # where no user.message event preceded the assistant.turn_start. + self._ensure_agent_span() + self._response_model = None self._response_id = None self._input_tokens = 0 @@ -1170,7 +1175,8 @@ def _begin_chat_turn(self) -> None: self._cache_creation_tokens = 0 self._first_output_chunk_recorded = False self._last_output_chunk_time = 0.0 - self._input_messages = [] + if self._input_messages is None: + self._input_messages = [] self._output_messages = [] self._turn_cost = None self._turn_server_duration = None @@ -1179,16 +1185,6 @@ def _begin_chat_turn(self) -> None: self._turn_id = None self._turn_interaction_id = None - # Add stashed user prompt as input message for the first turn. - if self._pending_user_prompt: - self._input_messages.append( - OtelMsg( - role="user", - parts=[OtelPart(type="text", content=self._pending_user_prompt)], - ) - ) - self._pending_user_prompt = None - parent_context = self._agent_span_context or otel_context.get_current() self._turn_span = self._telemetry.start_chat_span( self._request_model, @@ -1353,7 +1349,6 @@ def _complete_agent_turn(self, error: Exception | None) -> None: self._agent_span = None self._agent_span_context = None self._agent_start_time = None - self._pending_user_prompt = None self._agent_input_messages = None self._agent_output_messages = None diff --git a/python/copilot/session.py b/python/copilot/session.py index 7ad4f0713..f3fe554e4 100644 --- a/python/copilot/session.py +++ b/python/copilot/session.py @@ -110,7 +110,7 @@ def __init__( # Telemetry — tracker encapsulates per-turn state and logic self._telemetry = telemetry # CopilotTelemetry | None (kept for client.py access) - self._turn_tracker: AgentTurnTracker | None = ( + self._telemetry_tracker: AgentTurnTracker | None = ( AgentTurnTracker( telemetry, session_id, @@ -146,17 +146,17 @@ def workspace_path(self) -> str | None: @property def telemetry_provider_name(self) -> str: """Provider name for telemetry metrics (internal).""" - return self._turn_tracker.provider_name if self._turn_tracker else "github" + return self._telemetry_tracker.provider_name if self._telemetry_tracker else "github" @property def telemetry_server_address(self) -> str | None: """Server address for telemetry metrics (internal).""" - return self._turn_tracker.server_address if self._turn_tracker else None + return self._telemetry_tracker.server_address if self._telemetry_tracker else None @property def telemetry_server_port(self) -> int | None: """Server port for telemetry metrics (internal).""" - return self._turn_tracker.server_port if self._turn_tracker else None + return self._telemetry_tracker.server_port if self._telemetry_tracker else None def get_telemetry_tool_call_parent_context(self, tool_call_id: str) -> Any | None: """Get the parent context for a tool call span (internal). @@ -164,8 +164,8 @@ def get_telemetry_tool_call_parent_context(self, tool_call_id: str) -> Any | Non Uses tool-call-specific context when available (e.g. subagent context), otherwise falls back to the root invoke_agent context. """ - if self._turn_tracker is not None: - return self._turn_tracker.get_tool_call_parent_context(tool_call_id) + if self._telemetry_tracker is not None: + return self._telemetry_tracker.get_tool_call_parent_context(tool_call_id) return None async def send(self, options: MessageOptions) -> str: @@ -192,25 +192,16 @@ async def send(self, options: MessageOptions) -> str: ... "attachments": [{"type": "file", "path": "./src/main.py"}] ... }) """ - # Start or continue telemetry span for this turn - if self._turn_tracker is not None: - self._turn_tracker.begin_send(options["prompt"]) - - try: - response = await self._client.request( - "session.send", - { - "sessionId": self.session_id, - "prompt": options["prompt"], - "attachments": options.get("attachments"), - "mode": options.get("mode"), - }, - ) - return response["messageId"] - except Exception as exc: - if self._turn_tracker is not None: - self._turn_tracker.complete_turn_with_error(exc) - raise + response = await self._client.request( + "session.send", + { + "sessionId": self.session_id, + "prompt": options["prompt"], + "attachments": options.get("attachments"), + "mode": options.get("mode"), + }, + ) + return response["messageId"] async def send_and_wait( self, options: MessageOptions, timeout: float | None = None @@ -266,17 +257,6 @@ def handler(event: SessionEventTypeAlias) -> None: if error_event: raise error_event return last_assistant_message - except TimeoutError: - ex = TimeoutError(f"Timeout after {effective_timeout}s waiting for session.idle") - # Complete telemetry spans on timeout (idempotent if already completed). - if self._turn_tracker is not None: - self._turn_tracker.complete_turn_with_error(ex) - raise ex - except Exception as ex: - # Complete telemetry spans on error (idempotent if already completed). - if self._turn_tracker is not None: - self._turn_tracker.complete_turn_with_error(ex) - raise finally: unsubscribe() @@ -327,8 +307,8 @@ def _dispatch_event(self, event: SessionEvent) -> None: event: The session event to dispatch to all handlers. """ # Telemetry enrichment before user handlers - if self._turn_tracker is not None: - self._turn_tracker.process_event(event) + if self._telemetry_tracker is not None: + self._telemetry_tracker.process_event(event) with self._event_handlers_lock: handlers = list(self._event_handlers) @@ -586,8 +566,8 @@ async def destroy(self) -> None: >>> await session.destroy() """ # Close any open telemetry spans before destroying - if self._turn_tracker is not None: - self._turn_tracker.complete_on_dispose() + if self._telemetry_tracker is not None: + self._telemetry_tracker.complete_on_dispose() await self._client.request("session.destroy", {"sessionId": self.session_id}) with self._event_handlers_lock: diff --git a/python/test_opentelemetry.py b/python/test_opentelemetry.py index be9d9982a..eac0c0285 100644 --- a/python/test_opentelemetry.py +++ b/python/test_opentelemetry.py @@ -191,6 +191,19 @@ def _make_session_with_telemetry( ) +async def _send_with_event(session: CopilotSession, prompt: str = "Hello") -> None: + """Send a message and dispatch the matching user.message event. + + Unit tests use mock connections that don't produce real events, so we + manually dispatch the user.message event that would normally arrive from + the CLI. + """ + await session.send({"prompt": prompt}) + session._dispatch_event( + _make_session_event(SessionEventType.USER_MESSAGE, content=prompt) + ) + + def _get_metric_names(reader): """Collect all metric names from the reader.""" data = reader.get_metrics_data() @@ -638,7 +651,7 @@ async def test_includes_messages_when_sensitive_enabled(self, _reset_otel_global tools=[tool], ) - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -702,7 +715,7 @@ async def test_excludes_messages_when_sensitive_disabled(self, _reset_otel_globa tools=[tool], ) - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -776,7 +789,7 @@ def test_no_telemetry_when_not_configured(self, _reset_otel_globals): def test_session_without_telemetry_has_no_span_state(self): session = _make_session_with_telemetry(telemetry=None) assert session._telemetry is None - assert session._turn_tracker is None + assert session._telemetry_tracker is None # --------------------------------------------------------------------------- @@ -791,10 +804,10 @@ async def test_send_creates_invoke_agent_span(self, _reset_otel_globals): telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") # Span started but not ended yet (need idle event) - assert session._turn_tracker._agent_span is not None + assert session._telemetry_tracker._agent_span is not None # Dispatch turn start + turn end + idle to close spans session._dispatch_event( @@ -819,7 +832,7 @@ async def test_two_level_span_hierarchy(self, _reset_otel_globals): telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -844,7 +857,7 @@ async def test_usage_event_sets_per_turn_and_accumulated(self, _reset_otel_globa telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -885,7 +898,7 @@ async def test_multi_turn_accumulates_usage(self, _reset_otel_globals): telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") # Turn 1 session._dispatch_event( @@ -940,14 +953,14 @@ async def test_model_change_event_updates_response_model(self, _reset_otel_globa telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) session._dispatch_event( _make_session_event(SessionEventType.SESSION_MODEL_CHANGE, new_model="gpt-4o-mini") ) - assert session._turn_tracker._response_model == "gpt-4o-mini" + assert session._telemetry_tracker._response_model == "gpt-4o-mini" session._dispatch_event(_make_session_event(SessionEventType.ASSISTANT_TURN_END)) session._dispatch_event(_make_session_event(SessionEventType.SESSION_IDLE)) @@ -957,7 +970,7 @@ async def test_error_event_completes_span_with_error(self, _reset_otel_globals): telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -977,7 +990,7 @@ async def test_idle_event_records_duration_metric(self, _reset_otel_globals): telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -995,12 +1008,12 @@ async def test_subsequent_send_reuses_agent_span(self, _reset_otel_globals): session = _make_session_with_telemetry(telemetry, "s1") # First send creates the agent span - await session.send({"prompt": "First"}) - span_after_first = session._turn_tracker._agent_span + await _send_with_event(session, "First") + span_after_first = session._telemetry_tracker._agent_span # Second send reuses the same span - await session.send({"prompt": "Second"}) - assert session._turn_tracker._agent_span is span_after_first + await _send_with_event(session, "Second") + assert session._telemetry_tracker._agent_span is span_after_first session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") @@ -1023,7 +1036,7 @@ async def test_streaming_records_time_to_first_chunk(self, _reset_otel_globals): telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1", streaming=True) - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -1050,7 +1063,7 @@ async def test_streaming_records_time_per_output_chunk(self, _reset_otel_globals telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1", streaming=True) - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -1086,10 +1099,10 @@ def test_sets_provider_and_server(self, _reset_otel_globals): provider={"type": "azure", "base_url": "https://myendpoint.openai.azure.com:443/v1"}, ) - assert session._turn_tracker._request_model == "gpt-4" - assert session._turn_tracker._provider_name == "azure.ai.openai" - assert session._turn_tracker._server_address == "myendpoint.openai.azure.com" - assert session._turn_tracker._server_port == 443 + assert session._telemetry_tracker._request_model == "gpt-4" + assert session._telemetry_tracker._provider_name == "azure.ai.openai" + assert session._telemetry_tracker._server_address == "myendpoint.openai.azure.com" + assert session._telemetry_tracker._server_port == 443 def test_noop_without_telemetry(self): session = _make_session_with_telemetry(telemetry=None, model="gpt-4") @@ -1108,7 +1121,7 @@ async def test_finish_reasons_on_success(self, _reset_otel_globals): telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -1162,7 +1175,7 @@ async def test_chat_span_copilot_attrs(self, _reset_otel_globals): telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event( SessionEventType.ASSISTANT_TURN_START, @@ -1205,7 +1218,7 @@ async def test_invoke_agent_accumulated_cost_and_aiu(self, _reset_otel_globals): telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") # Turn 1 session._dispatch_event( @@ -1262,7 +1275,7 @@ async def test_truncation_span_event(self, _reset_otel_globals): telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -1300,7 +1313,7 @@ async def test_compaction_start_span_event(self, _reset_otel_globals): telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -1322,7 +1335,7 @@ async def test_compaction_complete_span_event(self, _reset_otel_globals): telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -1361,7 +1374,7 @@ async def test_skill_invoked_span_event(self, _reset_otel_globals): ) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -1407,7 +1420,7 @@ async def test_reasoning_parts_in_output(self, _reset_otel_globals): ) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Think about this"}) + await _send_with_event(session, "Think about this") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -1444,7 +1457,7 @@ async def test_tool_call_parts(self, _reset_otel_globals): ) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Use a tool"}) + await _send_with_event(session, "Use a tool") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -1498,7 +1511,7 @@ async def test_server_tool_call_parts(self, _reset_otel_globals): ) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Use MCP tool"}) + await _send_with_event(session, "Use MCP tool") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -1560,7 +1573,7 @@ async def test_subagent_creates_nested_hierarchy(self, _reset_otel_globals): ) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -1645,7 +1658,7 @@ async def test_subagent_failed(self, _reset_otel_globals): telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -1690,13 +1703,13 @@ async def test_complete_on_dispose_closes_spans_with_error(self, _reset_otel_glo telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) # Dispose while turn is in progress - session._turn_tracker.complete_on_dispose() + session._telemetry_tracker.complete_on_dispose() spans = exporter.get_finished_spans() # Should have both chat and invoke_agent spans @@ -1713,7 +1726,7 @@ async def test_destroy_calls_complete_on_dispose(self, _reset_otel_globals): telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -1739,7 +1752,7 @@ async def test_get_tool_call_parent_context_returns_agent_context(self, _reset_o telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) @@ -1777,7 +1790,7 @@ async def test_per_turn_token_metrics_use_chat_op(self, _reset_otel_globals): telemetry = _make_telemetry(tracer_provider=tp, meter_provider=mp) session = _make_session_with_telemetry(telemetry, "s1") - await session.send({"prompt": "Hello"}) + await _send_with_event(session, "Hello") session._dispatch_event( _make_session_event(SessionEventType.ASSISTANT_TURN_START, turn_id="t1") ) From bce39e7d9998e5d7b44779939b7a332d4b2df220 Mon Sep 17 00:00:00 2001 From: Stephen Toub Date: Tue, 3 Mar 2026 18:15:41 -0500 Subject: [PATCH 6/7] format --- go/copilot_telemetry.go | 1 - go/session.go | 2 +- nodejs/test/opentelemetry.test.ts | 6 +++++- python/copilot/_copilot_telemetry.py | 18 ++++++++---------- python/test_opentelemetry.py | 4 +--- 5 files changed, 15 insertions(+), 16 deletions(-) diff --git a/go/copilot_telemetry.go b/go/copilot_telemetry.go index 12191dff4..741bfcc57 100644 --- a/go/copilot_telemetry.go +++ b/go/copilot_telemetry.go @@ -572,7 +572,6 @@ type agentTurnTracker struct { turnAIU *float64 turnID *string turnInteractionID *string - } func newAgentTurnTracker(telemetry *copilotTelemetry, sessionID string, model string, provider *ProviderConfig, systemMessage *SystemMessageConfig, tools []Tool, streaming bool, agentName string, agentDescription string) *agentTurnTracker { diff --git a/go/session.go b/go/session.go index b34aebe02..977021ec9 100644 --- a/go/session.go +++ b/go/session.go @@ -66,7 +66,7 @@ type Session struct { hooks *SessionHooks hooksMux sync.RWMutex telemetry *copilotTelemetry - telemetryTracker *agentTurnTracker + telemetryTracker *agentTurnTracker // RPC provides typed session-scoped RPC methods. RPC *rpc.SessionRpc diff --git a/nodejs/test/opentelemetry.test.ts b/nodejs/test/opentelemetry.test.ts index 35ce43fa7..d16502d30 100644 --- a/nodejs/test/opentelemetry.test.ts +++ b/nodejs/test/opentelemetry.test.ts @@ -123,7 +123,11 @@ function dispatchEvent(session: CopilotSession, event: SessionEvent) { /** Send a message and dispatch the matching user.message event (unit tests * use mock connections that don't produce real events). */ -async function sendWithEvent(session: CopilotSession, prompt: string, extra: Record = {}) { +async function sendWithEvent( + session: CopilotSession, + prompt: string, + extra: Record = {} +) { await session.send({ prompt, ...extra }); dispatchEvent(session, makeEvent("user.message", { content: prompt })); } diff --git a/python/copilot/_copilot_telemetry.py b/python/copilot/_copilot_telemetry.py index eaf8bbb06..143a415e6 100644 --- a/python/copilot/_copilot_telemetry.py +++ b/python/copilot/_copilot_telemetry.py @@ -1144,16 +1144,14 @@ def _ensure_agent_span(self) -> None: RPC-initiated turns (no user.message) still get an agent span. """ if self._agent_span is None: - self._agent_span, self._agent_span_context = ( - self._telemetry.start_invoke_agent_span( - self._session_id, - self._request_model, - self._provider_name, - self._server_address, - self._server_port, - self._agent_name, - self._agent_description, - ) + self._agent_span, self._agent_span_context = self._telemetry.start_invoke_agent_span( + self._session_id, + self._request_model, + self._provider_name, + self._server_address, + self._server_port, + self._agent_name, + self._agent_description, ) self._agent_start_time = time.monotonic() self._agent_input_messages = [] diff --git a/python/test_opentelemetry.py b/python/test_opentelemetry.py index eac0c0285..e6dbfa462 100644 --- a/python/test_opentelemetry.py +++ b/python/test_opentelemetry.py @@ -199,9 +199,7 @@ async def _send_with_event(session: CopilotSession, prompt: str = "Hello") -> No the CLI. """ await session.send({"prompt": prompt}) - session._dispatch_event( - _make_session_event(SessionEventType.USER_MESSAGE, content=prompt) - ) + session._dispatch_event(_make_session_event(SessionEventType.USER_MESSAGE, content=prompt)) def _get_metric_names(reader): From b7f0fb894a168b478cf369103f92a57c5a33ac51 Mon Sep 17 00:00:00 2001 From: Stephen Toub Date: Tue, 3 Mar 2026 21:39:05 -0500 Subject: [PATCH 7/7] Fix CI test failures: mock session.resume and go mod tidy scenarios - Node.js/Python: Mock sendRequest for session.resume in overridesBuiltInTool test to avoid 'No authentication info available' error (matching pattern used by existing clientName resume tests) - Go scenarios: Run go mod tidy to add missing google/uuid indirect dependency introduced by new OTel SDK packages Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- nodejs/test/client.test.ts | 4 ++- python/test_client.py | 3 ++ test/scenarios/auth/byok-anthropic/go/go.mod | 1 + test/scenarios/auth/byok-azure/go/go.mod | 1 + test/scenarios/auth/byok-ollama/go/go.mod | 1 + test/scenarios/auth/byok-openai/go/go.mod | 1 + test/scenarios/auth/gh-app/go/go.mod | 1 + .../bundling/app-backend-to-server/go/go.mod | 1 + .../bundling/app-direct-server/go/go.mod | 1 + .../bundling/container-proxy/go/go.mod | 1 + .../bundling/fully-bundled/go/go.mod | 1 + test/scenarios/callbacks/hooks/go/go.mod | 1 + .../scenarios/callbacks/permissions/go/go.mod | 1 + test/scenarios/callbacks/user-input/go/go.mod | 1 + test/scenarios/modes/default/go/go.mod | 1 + test/scenarios/modes/minimal/go/go.mod | 1 + test/scenarios/prompts/attachments/go/go.mod | 1 + .../prompts/reasoning-effort/go/go.mod | 1 + .../prompts/system-message/go/go.mod | 1 + .../sessions/concurrent-sessions/go/go.mod | 1 + .../sessions/infinite-sessions/go/go.mod | 1 + .../sessions/session-resume/go/go.mod | 1 + test/scenarios/sessions/streaming/go/go.mod | 1 + test/scenarios/tools/custom-agents/go/go.mod | 1 + test/scenarios/tools/mcp-servers/go/go.mod | 1 + test/scenarios/tools/no-tools/go/go.mod | 1 + test/scenarios/tools/skills/go/go.mod | 1 + test/scenarios/tools/tool-filtering/go/go.mod | 1 + test/scenarios/tools/tool-overrides/go/go.mod | 16 ++++++++-- test/scenarios/tools/tool-overrides/go/go.sum | 31 +++++++++++++++++++ .../tools/virtual-filesystem/go/go.mod | 1 + test/scenarios/transport/reconnect/go/go.mod | 1 + test/scenarios/transport/stdio/go/go.mod | 1 + test/scenarios/transport/tcp/go/go.mod | 1 + 34 files changed, 81 insertions(+), 3 deletions(-) diff --git a/nodejs/test/client.test.ts b/nodejs/test/client.test.ts index d0034d87c..39f85ddf6 100644 --- a/nodejs/test/client.test.ts +++ b/nodejs/test/client.test.ts @@ -327,7 +327,9 @@ describe("CopilotClient", () => { onTestFinished(() => client.forceStop()); const session = await client.createSession({ onPermissionRequest: approveAll }); - const spy = vi.spyOn((client as any).connection!, "sendRequest"); + const spy = vi + .spyOn((client as any).connection!, "sendRequest") + .mockResolvedValueOnce({ sessionId: session.sessionId }); await client.resumeSession(session.sessionId, { onPermissionRequest: approveAll, tools: [ diff --git a/python/test_client.py b/python/test_client.py index 05b324228..4fd34342c 100644 --- a/python/test_client.py +++ b/python/test_client.py @@ -221,6 +221,9 @@ async def test_resume_session_sends_overrides_built_in_tool(self): async def mock_request(method, params): captured[method] = params + if method == "session.resume": + # Return a fake response to avoid needing real auth + return {"sessionId": session.session_id} return await original_request(method, params) client._client.request = mock_request diff --git a/test/scenarios/auth/byok-anthropic/go/go.mod b/test/scenarios/auth/byok-anthropic/go/go.mod index 2b6d2f8ca..d9366e575 100644 --- a/test/scenarios/auth/byok-anthropic/go/go.mod +++ b/test/scenarios/auth/byok-anthropic/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/auth/byok-azure/go/go.mod b/test/scenarios/auth/byok-azure/go/go.mod index ae0961138..b13cb4e4c 100644 --- a/test/scenarios/auth/byok-azure/go/go.mod +++ b/test/scenarios/auth/byok-azure/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/auth/byok-ollama/go/go.mod b/test/scenarios/auth/byok-ollama/go/go.mod index 4f06202ae..3bbf3d45e 100644 --- a/test/scenarios/auth/byok-ollama/go/go.mod +++ b/test/scenarios/auth/byok-ollama/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/auth/byok-openai/go/go.mod b/test/scenarios/auth/byok-openai/go/go.mod index e7cee801b..12c7197c4 100644 --- a/test/scenarios/auth/byok-openai/go/go.mod +++ b/test/scenarios/auth/byok-openai/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/auth/gh-app/go/go.mod b/test/scenarios/auth/gh-app/go/go.mod index f0a9c9cef..33b9e8920 100644 --- a/test/scenarios/auth/gh-app/go/go.mod +++ b/test/scenarios/auth/gh-app/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/bundling/app-backend-to-server/go/go.mod b/test/scenarios/bundling/app-backend-to-server/go/go.mod index 69de0828a..7e528f6c3 100644 --- a/test/scenarios/bundling/app-backend-to-server/go/go.mod +++ b/test/scenarios/bundling/app-backend-to-server/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/bundling/app-direct-server/go/go.mod b/test/scenarios/bundling/app-direct-server/go/go.mod index 6b77c20db..bc9f7c549 100644 --- a/test/scenarios/bundling/app-direct-server/go/go.mod +++ b/test/scenarios/bundling/app-direct-server/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/bundling/container-proxy/go/go.mod b/test/scenarios/bundling/container-proxy/go/go.mod index f1d2acdc9..69cacf610 100644 --- a/test/scenarios/bundling/container-proxy/go/go.mod +++ b/test/scenarios/bundling/container-proxy/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/bundling/fully-bundled/go/go.mod b/test/scenarios/bundling/fully-bundled/go/go.mod index 159b1c170..d0ad95501 100644 --- a/test/scenarios/bundling/fully-bundled/go/go.mod +++ b/test/scenarios/bundling/fully-bundled/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/callbacks/hooks/go/go.mod b/test/scenarios/callbacks/hooks/go/go.mod index 995fc7883..daa2c0291 100644 --- a/test/scenarios/callbacks/hooks/go/go.mod +++ b/test/scenarios/callbacks/hooks/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/callbacks/permissions/go/go.mod b/test/scenarios/callbacks/permissions/go/go.mod index 84fee8ccc..16d0c33f6 100644 --- a/test/scenarios/callbacks/permissions/go/go.mod +++ b/test/scenarios/callbacks/permissions/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/callbacks/user-input/go/go.mod b/test/scenarios/callbacks/user-input/go/go.mod index 923dfa0d6..2e38a9cc7 100644 --- a/test/scenarios/callbacks/user-input/go/go.mod +++ b/test/scenarios/callbacks/user-input/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/modes/default/go/go.mod b/test/scenarios/modes/default/go/go.mod index b909ea28f..2f972c45b 100644 --- a/test/scenarios/modes/default/go/go.mod +++ b/test/scenarios/modes/default/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/modes/minimal/go/go.mod b/test/scenarios/modes/minimal/go/go.mod index f5dc24b54..65fff4606 100644 --- a/test/scenarios/modes/minimal/go/go.mod +++ b/test/scenarios/modes/minimal/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/prompts/attachments/go/go.mod b/test/scenarios/prompts/attachments/go/go.mod index d0347fb34..9f782edc2 100644 --- a/test/scenarios/prompts/attachments/go/go.mod +++ b/test/scenarios/prompts/attachments/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/prompts/reasoning-effort/go/go.mod b/test/scenarios/prompts/reasoning-effort/go/go.mod index c1ff1cc9d..dc5331a29 100644 --- a/test/scenarios/prompts/reasoning-effort/go/go.mod +++ b/test/scenarios/prompts/reasoning-effort/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/prompts/system-message/go/go.mod b/test/scenarios/prompts/system-message/go/go.mod index 30363d2f2..4e8d495c4 100644 --- a/test/scenarios/prompts/system-message/go/go.mod +++ b/test/scenarios/prompts/system-message/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/sessions/concurrent-sessions/go/go.mod b/test/scenarios/sessions/concurrent-sessions/go/go.mod index 184552b26..f6c9eb7b1 100644 --- a/test/scenarios/sessions/concurrent-sessions/go/go.mod +++ b/test/scenarios/sessions/concurrent-sessions/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/sessions/infinite-sessions/go/go.mod b/test/scenarios/sessions/infinite-sessions/go/go.mod index 9c488fcc7..e59f8b08c 100644 --- a/test/scenarios/sessions/infinite-sessions/go/go.mod +++ b/test/scenarios/sessions/infinite-sessions/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/sessions/session-resume/go/go.mod b/test/scenarios/sessions/session-resume/go/go.mod index dfafc90e9..be193691d 100644 --- a/test/scenarios/sessions/session-resume/go/go.mod +++ b/test/scenarios/sessions/session-resume/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/sessions/streaming/go/go.mod b/test/scenarios/sessions/streaming/go/go.mod index cd639294a..1c9b797ac 100644 --- a/test/scenarios/sessions/streaming/go/go.mod +++ b/test/scenarios/sessions/streaming/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/tools/custom-agents/go/go.mod b/test/scenarios/tools/custom-agents/go/go.mod index a1672c1bb..36dd6676d 100644 --- a/test/scenarios/tools/custom-agents/go/go.mod +++ b/test/scenarios/tools/custom-agents/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/tools/mcp-servers/go/go.mod b/test/scenarios/tools/mcp-servers/go/go.mod index 8cbf69fd0..0e6f0f3be 100644 --- a/test/scenarios/tools/mcp-servers/go/go.mod +++ b/test/scenarios/tools/mcp-servers/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/tools/no-tools/go/go.mod b/test/scenarios/tools/no-tools/go/go.mod index c9d1363fe..0dfb92f3e 100644 --- a/test/scenarios/tools/no-tools/go/go.mod +++ b/test/scenarios/tools/no-tools/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/tools/skills/go/go.mod b/test/scenarios/tools/skills/go/go.mod index 287f749f5..e4b864777 100644 --- a/test/scenarios/tools/skills/go/go.mod +++ b/test/scenarios/tools/skills/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/tools/tool-filtering/go/go.mod b/test/scenarios/tools/tool-filtering/go/go.mod index 432ebfb71..fe59efb5a 100644 --- a/test/scenarios/tools/tool-filtering/go/go.mod +++ b/test/scenarios/tools/tool-filtering/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/tools/tool-overrides/go/go.mod b/test/scenarios/tools/tool-overrides/go/go.mod index 353066761..02c25eef9 100644 --- a/test/scenarios/tools/tool-overrides/go/go.mod +++ b/test/scenarios/tools/tool-overrides/go/go.mod @@ -1,9 +1,21 @@ module github.com/github/copilot-sdk/samples/tools/tool-overrides/go -go 1.24 +go 1.24.0 + +toolchain go1.24.4 require github.com/github/copilot-sdk/go v0.0.0 -require github.com/google/jsonschema-go v0.4.2 // indirect +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect +) replace github.com/github/copilot-sdk/go => ../../../../../go diff --git a/test/scenarios/tools/tool-overrides/go/go.sum b/test/scenarios/tools/tool-overrides/go/go.sum index 6e171099c..7d0052ed7 100644 --- a/test/scenarios/tools/tool-overrides/go/go.sum +++ b/test/scenarios/tools/tool-overrides/go/go.sum @@ -1,4 +1,35 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8= github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/scenarios/tools/virtual-filesystem/go/go.mod b/test/scenarios/tools/virtual-filesystem/go/go.mod index 9d75447ae..309c6d579 100644 --- a/test/scenarios/tools/virtual-filesystem/go/go.mod +++ b/test/scenarios/tools/virtual-filesystem/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/transport/reconnect/go/go.mod b/test/scenarios/transport/reconnect/go/go.mod index 9cd49b4c5..d24eb820d 100644 --- a/test/scenarios/transport/reconnect/go/go.mod +++ b/test/scenarios/transport/reconnect/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/transport/stdio/go/go.mod b/test/scenarios/transport/stdio/go/go.mod index 9f9412e8e..8f5d5a17e 100644 --- a/test/scenarios/transport/stdio/go/go.mod +++ b/test/scenarios/transport/stdio/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect diff --git a/test/scenarios/transport/tcp/go/go.mod b/test/scenarios/transport/tcp/go/go.mod index 69a7e9ae4..b9af30b67 100644 --- a/test/scenarios/transport/tcp/go/go.mod +++ b/test/scenarios/transport/tcp/go/go.mod @@ -11,6 +11,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/jsonschema-go v0.4.2 // indirect + github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect