diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c10c3496..60bb1c95 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -69,6 +69,8 @@ jobs: test: "*.TestThreads" - name: Vector Stores test: "*.TestVectorStores" + - name: Responses + test: "*.TestResponses" - name: Misc. test: "*.misc.*" steps: diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/OpenAI.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/OpenAI.kt index ff7b7560..05a92f59 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/OpenAI.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/OpenAI.kt @@ -11,7 +11,7 @@ import kotlin.time.Duration.Companion.seconds * OpenAI API. */ public interface OpenAI : Completions, Files, Edits, Embeddings, Models, Moderations, FineTunes, Images, Chat, Audio, - FineTuning, Assistants, Threads, Runs, Messages, VectorStores, Batch, AutoCloseable + FineTuning, Assistants, Threads, Runs, Messages, VectorStores, Batch, Responses, AutoCloseable /** * Creates an instance of [OpenAI]. diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/Responses.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/Responses.kt new file mode 100644 index 00000000..32967a17 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/Responses.kt @@ -0,0 +1,75 @@ +package com.aallam.openai.client + +import com.aallam.openai.api.core.RequestOptions +import com.aallam.openai.api.responses.Response +import com.aallam.openai.api.responses.ResponseIncludable +import com.aallam.openai.api.responses.ResponseItem +import com.aallam.openai.api.responses.ResponseRequest + +/** Interface for OpenAI's Responses API */ +public interface Responses { + /** + * Create a new response. + * + * @param request The request for creating a response + * @param requestOptions Optional request configuration + * @return The created response + */ + public suspend fun createResponse( + request: ResponseRequest, + requestOptions: RequestOptions? = null + ): Response + + /** + * Retrieves a model response with the given ID. + * + * @param responseId The ID of the response to retrieve + * @param include Additional fields to include in the response. + * @param requestOptions Optional request configuration + */ + public suspend fun getResponse( + responseId: String, + include: List? = null, + requestOptions: RequestOptions? = null): Response + + /** + * Deletes a model response with the given ID. + * + * @param responseId The ID of the response to delete + * @param requestOptions Optional request configuration + */ + public suspend fun deleteResponse( + responseId: String, + requestOptions: RequestOptions? = null): Boolean + + /** + * Cancels a model response with the given ID. Only responses created with the background parameter set to true can be cancelled. + * + * @param responseId The ID of the response to cancel + */ + public suspend fun cancelResponse( + responseId: String, + requestOptions: RequestOptions? = null): Response + + /** + * Returns a list of input items for a given response. + * + * @param responseId The ID of the response + * @param after An item ID to list items after, used in pagination. + * @param before An item ID to list items before, used in pagination. + * @param include Additional fields to include in the response. + * @param limit A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + * @param order The order to return the input items in. Can be either "asc" or "desc". Default is "desc". + * @param requestOptions Optional request configuration + */ + public suspend fun listInputItems( + responseId: String, + after: String? = null, + before: String? = null, + include: List? = null, + limit: Int? = null, + order: String? = null, + requestOptions: RequestOptions? = null): List + + //TODO Streaming +} diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/OpenAIApi.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/OpenAIApi.kt index 4612c433..e86dba01 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/OpenAIApi.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/OpenAIApi.kt @@ -29,4 +29,5 @@ internal class OpenAIApi( Messages by MessagesApi(requester), VectorStores by VectorStoresApi(requester), Batch by BatchApi(requester), + Responses by ResponsesApi(requester), AutoCloseable by requester diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/ApiPath.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/ApiPath.kt index 72fd0e62..46334f40 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/ApiPath.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/ApiPath.kt @@ -23,4 +23,5 @@ internal object ApiPath { const val Threads = "threads" const val VectorStores = "vector_stores" const val Batches = "batches" + const val Responses = "responses" } diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/ResponsesApi.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/ResponsesApi.kt new file mode 100644 index 00000000..cc2ad703 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/ResponsesApi.kt @@ -0,0 +1,94 @@ +package com.aallam.openai.client.internal.api + +import com.aallam.openai.api.core.DeleteResponse +import com.aallam.openai.api.core.ListResponse +import com.aallam.openai.api.core.RequestOptions +import com.aallam.openai.api.responses.Response +import com.aallam.openai.api.responses.ResponseIncludable +import com.aallam.openai.api.responses.ResponseItem +import com.aallam.openai.api.responses.ResponseRequest +import com.aallam.openai.client.Responses +import com.aallam.openai.client.internal.extension.requestOptions +import com.aallam.openai.client.internal.http.HttpRequester +import com.aallam.openai.client.internal.http.perform + +import io.ktor.client.* +import io.ktor.client.call.* +import io.ktor.client.request.* +import io.ktor.client.statement.* +import io.ktor.http.* + +internal class ResponsesApi(private val requester: HttpRequester) : Responses { + override suspend fun createResponse(request: ResponseRequest, requestOptions: RequestOptions?): Response { + return requester.perform { client: HttpClient -> + client.post { + url(path = ApiPath.Responses) + setBody(request.copy(stream = false)) + contentType(ContentType.Application.Json) + requestOptions(requestOptions) + }.body() + } + } + + override suspend fun getResponse( + responseId: String, + include: List?, + requestOptions: RequestOptions? + ): Response { + return requester.perform { client: HttpClient -> + client.get { + url(path = "${ApiPath.Responses}/$responseId") + parameter("include", include) + requestOptions(requestOptions) + }.body() + } + } + + override suspend fun deleteResponse(responseId: String, requestOptions: RequestOptions?): Boolean { + val response = requester.perform { + it.delete { + url(path = "${ApiPath.Responses}/$responseId") + requestOptions(requestOptions) + } + } + + return when (response.status) { + HttpStatusCode.NotFound -> false + else -> response.body().deleted + } + } + + override suspend fun cancelResponse(responseId: String, requestOptions: RequestOptions?): Response { + return requester.perform { + it.post { + url(path = "${ApiPath.Responses}/$responseId/cancel") + requestOptions(requestOptions) + } + }.body() + } + + override suspend fun listInputItems( + responseId: String, + after: String?, + before: String?, + include: List?, + limit: Int?, + order: String?, + requestOptions: RequestOptions? + ): List { + return requester.perform> { + it.get { + url(path = "${ApiPath.Responses}/$responseId/items") + parameter("after", after) + parameter("before", before) + parameter("include", include) + parameter("limit", limit) + parameter("order", order) + requestOptions(requestOptions) + } + }.data + } + + //TODO Add streaming + +} diff --git a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestResponses.kt b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestResponses.kt new file mode 100644 index 00000000..452730ea --- /dev/null +++ b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestResponses.kt @@ -0,0 +1,81 @@ +package com.aallam.openai.client + +import com.aallam.openai.api.core.Parameters.Companion.buildJsonObject +import com.aallam.openai.api.model.ModelId +import com.aallam.openai.api.responses.* +import kotlinx.serialization.json.add +import kotlinx.serialization.json.put +import kotlinx.serialization.json.putJsonArray +import kotlinx.serialization.json.putJsonObject +import kotlin.test.Test +import kotlin.test.assertNotNull + +class TestResponses : TestOpenAI() { + + @Test + fun basicResponse() = test { + val response = openAI.createResponse( + request = responseRequest { + model = ModelId("gpt-4o") + input = ResponseInput.from("What is the capital of France?") + } + ) + + assertNotNull(response) + assertNotNull(response.output) + } + + @Test + fun responseWithTools() = test { + val response = openAI.createResponse( + request = responseRequest { + model = ModelId("gpt-4o") + input = ResponseInput.from("What's the weather like in Paris?") + tools { + add( + ResponseTool.Function( + name = "get_weather", + description = "Get the current weather", + parameters = buildJsonObject { + put("type", "object") + putJsonObject("properties") { + putJsonObject("location") { + put("type", "string") + put("description", "The city and state, e.g. San Francisco, CA") + } + putJsonObject("unit") { + put("type", "string") + putJsonArray("enum") { + add("celsius") + add("fahrenheit") + } + } + } + putJsonArray("required") { + add("location") + } + }) + ) + } + }) + + + assertNotNull(response) + assertNotNull(response.output) + } + + @Test + fun responseWithInstructions() = test { + val response = openAI.createResponse( + request = responseRequest { + model = ModelId("gpt-4o") + input = ResponseInput.from("Tell me about artificial intelligence") + instructions = "Provide a concise answer focusing on recent developments" + maxOutputTokens = 200 + } + ) + + assertNotNull(response) + assertNotNull(response.output) + } +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/core/Role.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/core/Role.kt index 74640d6e..470d7896 100644 --- a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/core/Role.kt +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/core/Role.kt @@ -11,6 +11,7 @@ import kotlin.jvm.JvmInline public value class Role(public val role: String) { public companion object { public val System: Role = Role("system") + public val Developer: Role = Role("developer") public val User: Role = Role("user") public val Assistant: Role = Role("assistant") public val Function: Role = Role("function") diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/CodeInterpreterTool.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/CodeInterpreterTool.kt new file mode 100644 index 00000000..d5553877 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/CodeInterpreterTool.kt @@ -0,0 +1,111 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.DeserializationStrategy +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlinx.serialization.SerializationException +import kotlinx.serialization.json.JsonContentPolymorphicSerializer +import kotlinx.serialization.json.JsonElement +import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.JsonPrimitive +import kotlin.jvm.JvmInline + +@Serializable(with = CodeInterpreterContainerSerializer::class) +public sealed interface CodeInterpreterContainer + + +internal class CodeInterpreterContainerSerializer : + JsonContentPolymorphicSerializer(CodeInterpreterContainer::class) { + override fun selectDeserializer(element: JsonElement): DeserializationStrategy { + return when (element) { + is JsonPrimitive -> CodeInterpreterContainerId.serializer() + is JsonObject -> CodeInterpreterContainerAuto.serializer() + else -> throw SerializationException("Unsupported JSON element: $element") + } + } +} + +@Serializable +@JvmInline +public value class CodeInterpreterContainerId( + public val value: String +) : CodeInterpreterContainer + +@Serializable +@SerialName("auto") +public data class CodeInterpreterContainerAuto( + /** + * An optional list of uploaded files to make available to your code. + */ + @SerialName("file_ids") val fileIds: List = emptyList() +) : CodeInterpreterContainer + +/** + * A tool call to run code. + */ +@Serializable +@SerialName("code_interpreter_call") +public data class CodeInterpreterToolCall( + + /** + * The code to run, or null if not available. + */ + @SerialName("code") + val code: String? = null, + + /** + * The ID of the container used to run the code. + */ + @SerialName("container_id") + val containerId: String, + + /** + * The unique ID of the code interpreter tool call. + */ + @SerialName("id") + override val id: String, + + /** + * The outputs generated by the code interpreter, such as logs or images. Can be null if no outputs are available. + */ + @SerialName("outputs") + val outputs: List? = null, + + /** + * The status of the function tool call. + */ + @SerialName("status") + val status: ResponseStatus + +) : ResponseOutput + +@Serializable +public sealed interface CodeInterpreterOutput + +/** + * The logs output from the code interpreter. + */ +@Serializable +@SerialName("logs") +public data class CodeInterpreterOutputLogs( + + /** + * The logs output from the code interpreter. + */ + @SerialName("logs") + val logs: List + +) : CodeInterpreterOutput + +/** + * The image output from the code interpreter. + */ +@Serializable +@SerialName("image") +public data class CodeInterpreterOutputImage( + /** + * The URL of the image output from the code interpreter. + */ + @SerialName("url") + val url: String, +) : CodeInterpreterOutput diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ComputerUseTool.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ComputerUseTool.kt new file mode 100644 index 00000000..1519267e --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ComputerUseTool.kt @@ -0,0 +1,303 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +/** + * Computer action for the computer use tool + */ +@Serializable +public sealed interface ComputerAction { + + /** + * A click action + */ + @Serializable + @SerialName("click") + public data class Click( + + /** + * The mouse button used for the click. + * One of "left", "right", "wheel", "back", or "forward" + */ + @SerialName("button") + val button: String, + + /** + * The x-coordinate where the click occurred + */ + @SerialName("x") + val x: Int, + + /** + * The y-coordinate where the click occurred + */ + @SerialName("y") + val y: Int + ) : ComputerAction + + /** + * A double click action + */ + @Serializable + @SerialName("double_click") + public data class DoubleClick( + + /** + * The x-coordinate where the double click occurred + */ + @SerialName("x") + val x: Int, + + /** + * The y-coordinate where the double click occurred + */ + @SerialName("y") + val y: Int + ) : ComputerAction + + /** + * A drag action + */ + @Serializable + @SerialName("drag") + public data class Drag( + + /** + * An array of coordinates representing the path of the drag action + */ + @SerialName("path") + val path: List + ) : ComputerAction + + /** + * A keypress action + */ + @Serializable + @SerialName("keypress") + public data class KeyPress( + + /** + * The combination of keys to press + */ + @SerialName("keys") + val keys: List + ) : ComputerAction + + /** + * A move action + */ + @Serializable + @SerialName("move") + public data class Move( + + /** + * The x-coordinate to move to + */ + @SerialName("x") + val x: Int, + + /** + * The y-coordinate to move to + */ + @SerialName("y") + val y: Int + ) : ComputerAction + + /** + * A screenshot action + */ + @Serializable + @SerialName("screenshot") + public data object Screenshot : ComputerAction + + /** + * A scroll action + */ + @Serializable + @SerialName("scroll") + public data class Scroll( + + /** + * The x-coordinate where the scroll occurred + */ + @SerialName("x") + val x: Int, + + /** + * The y-coordinate where the scroll occurred + */ + @SerialName("y") + val y: Int, + + /** + * The horizontal scroll distance + */ + @SerialName("scroll_x") + val scrollX: Int, + + /** + * The vertical scroll distance + */ + @SerialName("scroll_y") + val scrollY: Int + ) : ComputerAction + + /** + * A typing action + */ + @Serializable + @SerialName("type") + public data class Type( + + /** + * The text to type + */ + @SerialName("text") + val text: String + ) : ComputerAction + + /** + * A wait action + */ + @Serializable + @SerialName("wait") + public data object Wait : ComputerAction + +} + +/** + * A coordinate pair (x, y) + */ +@Serializable +public data class Coordinate( + /** + * The x-coordinate + */ + @SerialName("x") + val x: Int, + + /** + * The y-coordinate + */ + @SerialName("y") + val y: Int +) + +/** + * Computer tool call in a response + */ +@Serializable +@SerialName("computer_call") +public data class ComputerToolCall( + /** + * The unique ID of the computer tool call. + */ + @SerialName("id") + override val id: String, + + /** + * The status of the computer tool call. + */ + @SerialName("status") + val status: ResponseStatus, + + /** + * An identifier used when responding to the tool call with output. + */ + @SerialName("call_id") + val callId: String, + + /** + * The action to be performed + */ + @SerialName("action") + val action: ComputerAction, + + /** + * The pending safety checks for the computer call. + */ + @SerialName("pending_safety_checks") + val pendingSafetyChecks: List = emptyList() +) : ResponseOutput + +/** + * A safety check for a computer call + */ +@Serializable +public data class SafetyCheck( + /** + * The ID of the safety check + */ + @SerialName("id") + val id: String, + + /** + * The type code of the safety check + */ + @SerialName("code") + val code: String, + + /** + * The message about the safety check + */ + @SerialName("message") + val message: String +) + +/** + * The output of a computer tool call. + */ +@Serializable +@SerialName("computer_call_output") +public data class ComputerToolCallOutput( + /** + * The unique ID of the computer tool call output. + */ + @SerialName("id") + val id: String? = null, + + /** + * The ID of the computer tool call that produced the output. + */ + @SerialName("call_id") + val callId: String, + + /** + * A computer screenshot image used with the computer use tool. + */ + @SerialName("output") + val output: ComputerScreenshot, + + /** + * The safety checks reported by the API that have been acknowledged by the developer. + */ + @SerialName("acknowledged_safety_checks") + val acknowledgedSafetyChecks: List = emptyList(), + + /** + * The status of the item. One of in_progress, completed, or incomplete. Populated when items are returned via API. + */ + @SerialName("status") + val status: ResponseStatus? = null +) : ResponseItem + +/** + * A computer screenshot image used with the computer use tool. + */ +@Serializable +@SerialName("computer_screenshot") +public data class ComputerScreenshot( + + /** + * The identifier of an uploaded file that contains the screenshot. + */ + @SerialName("file_id") + val fileId: String? = null, + + /** + * The URL of the screenshot image. + */ + @SerialName("image_url") + val imageUrl: String? = null +) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/CustomTool.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/CustomTool.kt new file mode 100644 index 00000000..a8137296 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/CustomTool.kt @@ -0,0 +1,109 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlin.jvm.JvmInline + +/** + * Represents the format of input for a custom tool. + */ +@Serializable +public sealed interface CustomToolFormat + +/** + * Unconstrained free-form text. + */ +@Serializable +@SerialName("text") +public object CustomToolTextFormat : CustomToolFormat + +/** + * A grammar defined by the user. + */ +@Serializable +@SerialName("grammar") +public data class CustomToolGrammarFormat( + /** The grammar definition.*/ + @SerialName("definition") + val definition: String, + + /** The syntax of the grammar definition. One of `lark` or `regex`. */ + @SerialName("syntax") + val syntax: Syntax +) : CustomToolFormat { + + /** + * Represents the syntax of a grammar definition. + */ + @JvmInline + @Serializable + public value class Syntax(public val value: String) { + public companion object { + /** Lark syntax for defining grammars. */ + public val Lark: Syntax = Syntax("lark") + + /** Regular expression syntax for defining grammars. */ + public val Regex: Syntax = Syntax("regex") + } + } +} + +/** + * A call to a custom tool created by the model. + */ +@Serializable +@SerialName("custom_tool_call") +public data class CustomToolCall( + + /** + * The unique ID of the custom tool call generated by the model. + */ + @SerialName("call_id") + val callId: String, + + /** + * The input for the custom tool call generated by the model. + */ + @SerialName("input") + val input: String, + + /** + * The name of the custom tool being called. + */ + @SerialName("name") + val name: String, + + /** + * The unique ID of the custom tool call in the OpenAI platform. + */ + @SerialName("id") + override val id: String + +) : ResponseOutput + + +/** + * The output of a custom tool call from your code, being sent back to the model. + */ +@Serializable +@SerialName("custom_tool_call_output") +public data class CustomToolCallOutput( + /** + * The unique ID of the custom tool call generated by the model. + */ + @SerialName("call_id") + val callId: String, + + /** + * The output from the custom tool call generated by your code. + */ + @SerialName("output") + val output: String, + + /** + * The unique ID of the custom tool call in the OpenAI platform. + */ + @SerialName("id") + val id: String? = null + +) : ResponseItem diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/FileSearchTool.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/FileSearchTool.kt new file mode 100644 index 00000000..dfefacea --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/FileSearchTool.kt @@ -0,0 +1,174 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.KSerializer +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlinx.serialization.descriptors.buildClassSerialDescriptor +import kotlinx.serialization.encoding.Decoder +import kotlinx.serialization.encoding.Encoder +import kotlinx.serialization.json.JsonDecoder +import kotlinx.serialization.json.JsonEncoder +import kotlinx.serialization.json.jsonObject +import kotlinx.serialization.json.jsonPrimitive + +@Serializable(with = FileSearchFilterSerializer::class) +public sealed interface FileSearchFilter + +/** + * A filter used to compare a specified attribute key to a given value using a defined comparison operation. + */ +@Serializable +public data class ComparisonFilter( + + /** + * Specifies the comparison operator: eq, ne, gt, gte, lt, lte. + */ + @SerialName("type") + public val type: String, + + /** + * The key to compare against the value. + */ + @SerialName("key") + public val key: String, + + /** + * The value to compare the attribute key to. + */ + @SerialName("value") + public val value: String + +) : FileSearchFilter + +/** + * Combine multiple filters using 'and' or 'or'. + */ +@Serializable +public data class CompoundFilter( + /** + * The logical operator to use: 'and' or 'or'. + */ + @SerialName("type") + public val type: String, + + /** + * Array of filters to combine. Items can be ComparisonFilter or CompoundFilter. + */ + @SerialName("filters") + public val filters: List + +) : FileSearchFilter + +/** + * Ranking options for search. + */ +@Serializable +public data class FileSearchRankingOptions( + /** + * The ranker to use for the file search. + * Defaults to "auto" + */ + @SerialName("ranker") + val ranker: String? = null, + + /** + * The score threshold for the file search, a number between 0 and 1. + * Numbers closer to 1 will attempt to return only the most relevant results, but may return fewer results. + * Defaults to 0 + */ + @SerialName("score_threshold") + val scoreThreshold: Int? = null, +) + +internal class FileSearchFilterSerializer : KSerializer { + + override val descriptor = buildClassSerialDescriptor("FileSearchFilter") + + override fun serialize(encoder: Encoder, value: FileSearchFilter) { + val jsonEncoder = encoder as? JsonEncoder + ?: throw IllegalArgumentException("This serializer can only be used with JSON") + + when (value) { + is ComparisonFilter -> ComparisonFilter.serializer().serialize(jsonEncoder, value) + is CompoundFilter -> CompoundFilter.serializer().serialize(jsonEncoder, value) + } + } + + override fun deserialize(decoder: Decoder): FileSearchFilter { + val jsonDecoder = decoder as? JsonDecoder + ?: throw IllegalArgumentException("This serializer can only be used with JSON") + + return when (val type = jsonDecoder.decodeJsonElement().jsonObject["type"]?.jsonPrimitive?.content) { + "and" -> CompoundFilter.serializer().deserialize(jsonDecoder) + "or" -> CompoundFilter.serializer().deserialize(jsonDecoder) + "eq" -> ComparisonFilter.serializer().deserialize(jsonDecoder) + "ne" -> ComparisonFilter.serializer().deserialize(jsonDecoder) + "gt" -> ComparisonFilter.serializer().deserialize(jsonDecoder) + "gte" -> ComparisonFilter.serializer().deserialize(jsonDecoder) + "lt" -> ComparisonFilter.serializer().deserialize(jsonDecoder) + "lte" -> ComparisonFilter.serializer().deserialize(jsonDecoder) + else -> throw IllegalArgumentException("Unknown filter type: $type") + } + } +} + +/** + * File search tool call in a response + */ +@Serializable +@SerialName("file_search_call") +public data class FileSearchToolCall( + /** + * The unique ID of the file search tool call. + */ + @SerialName("id") + override val id: String, + + /** + * The status of the file search tool call. + */ + @SerialName("status") + val status: ResponseStatus, + + /** + * The queries used to search for files. + */ + @SerialName("queries") + val queries: List, + + /** + * The results of the file search tool call. + */ + @SerialName("results") + val results: List? = null +) : ResponseOutput + +/** + * Result of a file search + */ +@Serializable +public data class FileSearchResult( + /** + * The ID of the file + */ + @SerialName("file_id") + val fileId: String, + + /** + * The text content from the file + */ + @SerialName("text") + val text: String, + + /** + * The filename + */ + @SerialName("filename") + val filename: String, + + /** + * The score or relevance rating + */ + @SerialName("score") + val score: Double +) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/FunctionTool.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/FunctionTool.kt new file mode 100644 index 00000000..14dc78a5 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/FunctionTool.kt @@ -0,0 +1,86 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlinx.serialization.json.Json +import kotlinx.serialization.json.JsonObject + +/** + * Function tool call in a response + */ +@Serializable +@SerialName("function_call") +public data class FunctionToolCall( + /** + * The unique ID of the function tool call. + */ + @SerialName("id") + override val id: String, + + /** + * The status of the function tool call. + */ + @SerialName("status") + val status: ResponseStatus, + + /** + * The unique ID of the function tool call generated by the model. + */ + @SerialName("call_id") + val callId: String, + + /** + * The name of the function to run. + */ + @SerialName("name") + val name: String, + + /** + * A JSON string of the arguments to pass to the function. + */ + @SerialName("arguments") + val arguments: String, +) : ResponseOutput { + + /** + * Decodes the [arguments] JSON string into a JsonObject. + * If [arguments] is null, the function will return null. + * + * @param json The Json object to be used for decoding, defaults to a default Json instance + */ + public fun argumentsAsJson(json: Json = Json): JsonObject = json.decodeFromString(arguments) + +} + +/** + * The output of a function tool call. + * + */ +@Serializable +@SerialName("function_call_output") +public data class FunctionToolCallOutput( + + /** + * The unique ID of the function tool call output. Populated when this item is returned via API. + */ + @SerialName("id") + val id: String? = null, + + /** + * The unique ID of the function tool call generated by the model. + */ + @SerialName("call_id") + val callId: String, + + /** + * A JSON string of the output of the function tool call. + */ + @SerialName("output") + val output: String, + + /** + * The status of the item. One of in_progress, completed, or incomplete. Populated when items are returned via API. + */ + @SerialName("status") + val status: ResponseStatus? = null +) : ResponseItem diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/LocalShellTool.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/LocalShellTool.kt new file mode 100644 index 00000000..cf40d18a --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/LocalShellTool.kt @@ -0,0 +1,103 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +/** + * A tool call to run a command on the local shell. + */ +@Serializable +@SerialName("local_shell_call") +public data class LocalShellToolCall( + + /** + * Execute a shell command on the server. + */ + @SerialName("action") + val action: LocalShellToolAction, + + /** + * The unique ID of the local shell tool call generated by the model. + */ + @SerialName("call_id") + val callId: String, + + /** + * The unique ID of the local shell call. + */ + @SerialName("id") + override val id: String, + + /** + * The status of the local shell call. + */ + @SerialName("status") + val status: ResponseStatus + +) : ResponseOutput + +/** + * The action to be performed by the local shell tool. + */ +@Serializable +@SerialName("exec") +public data class LocalShellToolAction( + + /** + * The command to run. + */ + @SerialName("command") + val command: String, + + /** + * Environment variables to set for the command. + */ + @SerialName("env") + val env: Map = emptyMap(), + + /** + * Optional timeout in milliseconds for the command. + */ + @SerialName("timeout_ms") + val timeoutMs: Int? = null, + + /** + * Optional user to run the command as. + */ + @SerialName("user") + val user: String? = null, + + /** + * The working directory to run the command in. + */ + @SerialName("working_directory") + val workingDirectory: String? = null + +) + +/** + * The output of a local shell tool call. + */ +@Serializable +@SerialName("local_shell_call_output") +public data class LocalShellToolCallOutput( + + /** + * The unique ID of the local shell tool call generated by the model. + */ + @SerialName("id") + val id: String, + + /** + * A JSON string of the output of the local shell tool call. + */ + @SerialName("output") + val output: String, + + /** + * The status of the local shell call. + */ + @SerialName("status") + val status: ResponseStatus + +) : ResponseItem diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/MCPTool.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/MCPTool.kt new file mode 100644 index 00000000..50eb542a --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/MCPTool.kt @@ -0,0 +1,253 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.DeserializationStrategy +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlinx.serialization.SerializationException +import kotlinx.serialization.json.JsonContentPolymorphicSerializer +import kotlinx.serialization.json.JsonElement +import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.JsonPrimitive +import kotlin.jvm.JvmInline + +@Serializable(with = MCPAllowedToolsSerializer::class) +public sealed interface MCPAllowedTools + +@Serializable +@JvmInline +public value class MCPAllowedToolsList( + public val value: List +) : MCPAllowedTools + +internal class MCPAllowedToolsSerializer : JsonContentPolymorphicSerializer(MCPAllowedTools::class) { + override fun selectDeserializer(element: JsonElement): DeserializationStrategy { + return when (element) { + is JsonPrimitive -> MCPAllowedToolsList.serializer() + is JsonObject -> MCPToolFilter.serializer() + else -> throw SerializationException("Unsupported JSON element: $element") + } + } +} + + +@Serializable(with = MCPRequireApprovalSerializer::class) +public sealed interface MCPRequireApproval + +internal class MCPRequireApprovalSerializer : + JsonContentPolymorphicSerializer(MCPRequireApproval::class) { + override fun selectDeserializer(element: JsonElement): DeserializationStrategy { + return when (element) { + is JsonPrimitive -> MCPToolApprovalSetting.serializer() + is JsonObject -> MCPToolApprovalFilter.serializer() + else -> throw SerializationException("Unsupported JSON element: $element") + } + } +} + +/** + * Specify a single approval policy for all tools. + * One of always or never. + * When set to always, all tools will require approval. + * When set to never, all tools will not require approval. + * + * @see [ResponseTool.MCP.requireApproval] + */ +@Serializable +@JvmInline +public value class MCPToolApprovalSetting( + public val value: String, +) : MCPRequireApproval { + public companion object { + public val Always: MCPToolApprovalSetting = MCPToolApprovalSetting("always") + public val Never: MCPToolApprovalSetting = MCPToolApprovalSetting("never") + } +} + +@Serializable +public data class MCPToolApprovalFilter( + + /** + * A list of tools that always require approval. + */ + @SerialName("always") val always: MCPToolFilter? = null, + + /** + * A list of tools that never require approval. + */ + @SerialName("never") val never: MCPToolFilter? = null + +) : MCPRequireApproval + + +/** + * A filter object to specify which tools are allowed. + */ +@Serializable +public data class MCPToolFilter( + /** + * List of allowed tool names. + */ + @SerialName("tool_names") val toolNames: List? = null + +) : MCPAllowedTools + +/** + * A list of tools available on an MCP server. + */ +@Serializable +@SerialName("mcp_list_tools") +public data class MCPListTools( + + /** + * The unique ID of the list. + */ + @SerialName("id") + override val id: String, + + /** + * The label of the MCP server. + */ + @SerialName("server_label") + val serverLabel: String, + + /** + * The list of tools available on the MCP server. + */ + @SerialName("tools") + val tools: List, + + /** + * Error message if the server could not list tools. + */ + @SerialName("error") + val error: String? = null +) : ResponseOutput + +/** + * A tool available on an MCP server. + */ +@Serializable +public data class MCPAvailableTool( + /** + * The name of the tool. + */ + @SerialName("name") val name: String, + + /** + * The description of the tool. + */ + @SerialName("description") val description: String? = null, + + /** + * The parameters required by the tool. + */ + @SerialName("parameters") val parameters: JsonObject? = null +) + +/** + * An invocation of a tool on an MCP server. + */ +@Serializable +@SerialName("mcp_call") +public data class MCPToolCall( + + /** + * A JSON string of the arguments passed to the tool. + */ + @SerialName("arguments") + val arguments: String, + + /** + * The unique ID of the tool call. + */ + @SerialName("id") + override val id: String, + + /** + * The name of the tool that was run. + */ + @SerialName("name") + val name: String, + + /** + * The label of the MCP server. + */ + @SerialName("server_label") + val serverLabel: String, + + /** + * Error message if the server could not list tools. + */ + @SerialName("error") + val error: String? = null, + + /** + * The output from the tool call. + */ + @SerialName("output") + val output: String? = null +) : ResponseOutput + +/** + * A request for human approval of a tool invocation. + */ +@Serializable +@SerialName("mcp_approval_request") +public data class MCPToolApprovalRequest( + + /** + * A JSON string of the arguments for the tool. + */ + @SerialName("arguments") + val arguments: String, + + /** + * The unique ID of the approval request. + */ + @SerialName("id") + override val id: String, + + /** + * The name of the tool to run. + */ + @SerialName("name") + val name: String, + + /** + * The label of the MCP server. + */ + @SerialName("server_label") + val serverLabel: String +) : ResponseOutput + +/** + * A response to an MCP approval request. + */ +@Serializable +@SerialName("mcp_approval_response") +public data class MCPToolApprovalResponse( + + /** + * The ID of the approval request being answered. + */ + @SerialName("approval_request_id") + val approvalRequestId: String, + + /** + * Whether the request was approved. + */ + @SerialName("approve") + val approve: Boolean, + + /** + * The unique ID of the approval response. + */ + @SerialName("id") + val id: String? = null, + + /** + * Optional reason for the decision. + */ + @SerialName("reason") + val reason: String? = null +) : ResponseItem diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/PromptTemplate.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/PromptTemplate.kt new file mode 100644 index 00000000..ffb9c43b --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/PromptTemplate.kt @@ -0,0 +1,22 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.Serializable + +/** + * Reference to a prompt template and its variables. + */ +@Serializable +public data class PromptTemplate( + + /** The unique identifier of the prompt template to use. */ + val id: String, + + /** + * Optional map of values to substitute in for variables in your prompt. + * The substitution values can either be strings, or other Response input types like images or files. + */ + val variables: Map? = null, + + /** Optional version of the prompt tem */ + val version: String? = null +) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ReasoningConfig.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ReasoningConfig.kt new file mode 100644 index 00000000..6bbc97f1 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ReasoningConfig.kt @@ -0,0 +1,79 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlin.jvm.JvmInline + +/** + * Configuration options for reasoning models + */ +@Serializable +public data class ReasoningConfig( + /** + * Constrains effort on reasoning for reasoning models. + * Currently supported values are `low`, `medium`, and `high`. + * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + */ + @SerialName("effort") + val effort: ReasoningEffort? = null, + + /** + * A summary of the reasoning performed by the model. + * This can be useful for debugging and understanding the model's reasoning process. + * One of `concise` or `detailed`. + */ + @Deprecated("Use summary instead.") + @SerialName("generate_summary") + val generateSummary: ReasoningSummary? = null, + + /** + * A summary of the reasoning performed by the model. + * This can be useful for debugging and understanding the model's reasoning process. + * One of `concise` or `detailed`. + */ + @SerialName("summary") + val summary: ReasoningSummary? = null +) + + +/** + * Reasoning effort levels for models with reasoning capabilities + */ +@JvmInline +@Serializable +public value class ReasoningEffort(public val value: String) { + public companion object { + /** + * Minimal reasoning effort + */ + public val Minimal: ReasoningEffort = ReasoningEffort("minimal") + + /** + * Low reasoning effort + */ + public val Low: ReasoningEffort = ReasoningEffort("low") + + /** + * Medium reasoning effort (default) + */ + public val Medium: ReasoningEffort = ReasoningEffort("medium") + + /** + * High reasoning effort + */ + public val High: ReasoningEffort = ReasoningEffort("high") + } +} + +/** + * Reasoning summary levels for models with reasoning capabilities + */ +@JvmInline +@Serializable +public value class ReasoningSummary(public val value: String) { + public companion object { + public val Auto: ReasoningSummary = ReasoningSummary("auto") + public val Concise: ReasoningSummary = ReasoningSummary("concise") + public val Detailed: ReasoningSummary = ReasoningSummary("detailed") + } +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/Response.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/Response.kt new file mode 100644 index 00000000..5ca4955c --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/Response.kt @@ -0,0 +1,220 @@ +package com.aallam.openai.api.responses + +import com.aallam.openai.api.core.Status +import com.aallam.openai.api.model.ModelId +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +/** + * Response from the OpenAI Responses API + */ +@Serializable +public data class Response( + + /** + * Whether to run the model response in the background. + */ + @SerialName("background") + val background: Boolean?, + + /** + * The Unix timestamp (in seconds) of when the response was created + */ + @SerialName("created_at") + val createdAt: Long, + + /** + * An error object returned when the model fails to generate a Response. + * + */ + @SerialName("error") + val error: ResponseError?, + + /** + * Unique identifier for this Response. + */ + @SerialName("id") + val id: String, + + /** + * Details about why the response is incomplete. + * + */ + @SerialName("incomplete_details") + val incompleteDetails: IncompleteDetails?, + + /** + * Inserts a system (or developer) message as the first item in the model's context. + * + * When using along with previous_response_id, the instructions from a previous response will not be carried over to the next response. This makes it simple to swap out system (or developer) messages in new responses. + */ + @SerialName("instructions") + val instructions: String?, + + /** + * An upper bound for the number of tokens that can be generated for a response, including visible output tokens and reasoning tokens. + */ + @SerialName("max_output_tokens") + val maxOutputTokens: Long? = null, + + /** + * The maximum number of total calls to built-in tools that can be processed in a response. This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored. + */ + @SerialName("max_tool_calls") + val maxToolCalls: Long? = null, + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + */ + @SerialName("metadata") + val metadata: Map = emptyMap(), + + /** + * Model ID used to generate the response, like gpt-4o or o1. OpenAI offers a wide range of models with different capabilities, performance characteristics, and price points. Refer to the model guide to browse and compare available models. + */ + @SerialName("model") + val model: ModelId, + + /** + * The object type, always "response" + */ + @SerialName("object") + val objectType: String = "response", + + /** + * An array of content items generated by the model. + * + * The length and order of items in the output array is dependent on the model's response. + */ + @SerialName("output") + val output: List = emptyList(), + + /** + * Whether parallel tool calls were enabled + */ + @SerialName("parallel_tool_calls") + val parallelToolCalls: Boolean, + + /** + * The unique ID of the previous response to the model. Use this to create multi-turn conversations. + */ + @SerialName("previous_response_id") + val previousResponseId: String?, + + /** + * Reference to a prompt template and its variables. + */ + @SerialName("prompt") + val prompt: PromptTemplate? = null, + + /** Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces the `user` field. */ + @SerialName("prompt_cache_key") + val promptCacheKey: String?, + + /** + * Configuration options for reasoning models. + */ + @SerialName("reasoning") + val reasoning: ReasoningConfig?, + + /** + * A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies. + * The IDs should be a string that uniquely identifies each user. + * We recommend hashing their username or email address, in order to avoid sending us any identifying information. + */ + @SerialName("safety_identifier") + val safetyIdentifier: String?, + + /** + * Specifies the processing type used for serving the request. + * + * If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. + * If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. + * If set to 'flex' or 'priority', then the request will be processed with the corresponding service tier. Contact sales to learn more about Priority processing. + * When not set, the default behavior is 'auto'. + * When the service_tier parameter is set, the response body will include the service_tier value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter. + */ + @SerialName("service_tier") + val serviceTier: ServiceTier?, + + /** + * The status of the response generation. One of `completed`, `failed`, `in_progress`, or `incomplete`. + */ + @SerialName("status") + val status: Status, + + /** + * The temperature used for sampling + */ + @SerialName("temperature") + val temperature: Double, + + /** + * Configuration options for a text response from the model. Can be plain text or structured JSON data. + */ + @SerialName("text") + val text: ResponseTextConfig? = null, + + /** + * How the model should select which tool (or tools) to use when generating a response. See the tools parameter to see how to specify which tools the model can call. + */ + @SerialName("tool_choice") + val toolChoice: ResponseToolChoiceConfig, + + /** + * An array of tools the model may call while generating a response. You can specify which tool to use by setting the tool_choice parameter. + * + * The two categories of tools you can provide the model are: + * + * Built-in tools: Tools that are provided by OpenAI that extend the model's capabilities, like web search or file search. Learn more about built-in tools. + * Function calls (custom tools): Functions that are defined by you, enabling the model to call your own code. Learn more about function calling. + */ + @SerialName("tools") + val tools: List, + + /** An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. */ + @SerialName("top_logprobs") + val topLogprobs: Int? = null, + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + @SerialName("top_p") + val topP: Double, + + /** + * The truncation strategy used for the model response. + */ + @SerialName("truncation") + val truncation: Truncation?, + + /** + * Represents token usage details including input tokens, output tokens, a breakdown of output tokens, and the total tokens used. + */ + @SerialName("usage") + val usage: ResponseUsage?, + + /** + * A stable identifier for your end-users. Used to boost cache hit rates by better bucketing similar requests and to help OpenAI detect and prevent abuse. + */ + @Deprecated("This field is being replaced by safety_identifier and prompt_cache_key. Use prompt_cache_key instead to maintain caching optimizations.") + @SerialName("user") + val user: String? + +) + +/** + * Details about why the response is incomplete + */ +@Serializable +public data class IncompleteDetails( + /** + * The reason why the response is incomplete + */ + @SerialName("reason") + val reason: String +) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseError.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseError.kt new file mode 100644 index 00000000..6558cdb0 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseError.kt @@ -0,0 +1,22 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +/** + * Information about an error during response generation + */ +@Serializable +public data class ResponseError( + /** + * The error code for the response. + */ + @SerialName("code") + val code: String? = null, + + /** + * A human-readable description of the error. + */ + @SerialName("message") + val message: String? = null, +) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseIncludable.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseIncludable.kt new file mode 100644 index 00000000..4b6f2a1d --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseIncludable.kt @@ -0,0 +1,48 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.Serializable +import kotlin.jvm.JvmInline + +/** + * Additional data to include in the response + * + * Specify additional output data to include in the model response. + */ +@JvmInline +@Serializable +public value class ResponseIncludable(public val value: String) { + public companion object { + + /** + * Includes the outputs of python code execution in code interpreter tool call items. + */ + public val CodeInterpreterCallOutputs: ResponseIncludable = ResponseIncludable("code_interpreter_call.outputs") + + /** + * Include image urls from the computer call output. + */ + public val ComputerCallOutputImageUrl: ResponseIncludable = ResponseIncludable("computer_call_output.output.image_url") + + /** + * Include the search results of the file search tool call + */ + public val FileSearchCallResults: ResponseIncludable = ResponseIncludable("file_search_call.results") + + /** + * Include image urls from the input message + */ + public val MessageInputImageUrl: ResponseIncludable = ResponseIncludable("message.input_image.image_url") + + /** + * Include logprobs with assistant messages. + */ + public val MessageOutputTextLogprobs: ResponseIncludable = ResponseIncludable("message.output_text.logprobs") + + /** + * Includes an encrypted version of reasoning tokens in reasoning item outputs. + * This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly + * (like when the store parameter is set to false, or when an organization is enrolled in the zero data retention program). + */ + public val ReasoningEncryptedContent: ResponseIncludable = ResponseIncludable("reasoning.encrypted_content") + } +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseInput.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseInput.kt new file mode 100644 index 00000000..3ae4e50c --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseInput.kt @@ -0,0 +1,130 @@ +package com.aallam.openai.api.responses + +import com.aallam.openai.api.responses.ResponseInput.ListInput +import com.aallam.openai.api.responses.ResponseInput.TextInput +import kotlinx.serialization.DeserializationStrategy +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlinx.serialization.SerializationException +import kotlinx.serialization.json.JsonArray +import kotlinx.serialization.json.JsonContentPolymorphicSerializer +import kotlinx.serialization.json.JsonElement +import kotlinx.serialization.json.JsonPrimitive +import kotlin.jvm.JvmInline + +/** + * Text, image, or file inputs to the model, used to generate a response. + * + * Can be either a simple text string or a list of messages. + */ +@Serializable(with = InputSerializer::class) +public sealed interface ResponseInput { + /** + * A text input to the model, equivalent to a text input with the `user` role. + */ + @Serializable + @JvmInline + public value class TextInput(public val value: String) : ResponseInput + + /** + * A list of chat messages as input to the model. + */ + @Serializable + @JvmInline + public value class ListInput(public val values: List) : ResponseInput + + public companion object { + /** + * Create a text input from a string. + */ + public fun from(text: String): ResponseInput = TextInput(text) + + /** + * Create an input list from a list of items. + */ + public fun from(items: List): ResponseInput = ListInput(items) + } +} + +/** + * Custom serializer for Input that handles direct string or array serialization. + */ +internal class InputSerializer : JsonContentPolymorphicSerializer(ResponseInput::class) { + + override fun selectDeserializer(element: JsonElement): DeserializationStrategy { + return when (element) { + is JsonPrimitive -> TextInput.serializer() + is JsonArray -> ListInput.serializer() + else -> throw SerializationException("Unsupported JSON element: $element") + } + } +} + +/** + * A text input to the model. + * + * @param text the text content. + */ +@Serializable +@SerialName("input_text") +public data class ResponseInputText(@SerialName("text") val text: String) : ResponseContent + +/** + * An image input to the model. + * + * @param imageUrl the image url. + */ +@Serializable +@SerialName("input_image") +public data class ResponseInputImage( + /** + * The detail level of the image to be sent to the model. One of high, low, or auto. Defaults to auto. + * */ + @SerialName("detail") val detail: ImageDetail? = null, + /** + * The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image in a data URL. + * */ + @SerialName("image_url") val imageUrl: String? = null, + /** + * The ID of the file to be sent to the model. + */ + @SerialName("file_id") val fileId: String? = null, +) : ResponseContent + + +/** + * The detail level of the image to be sent to the model. + */ +@JvmInline +@Serializable +public value class ImageDetail(public val value: String) { + public companion object { + public val High: ImageDetail = ImageDetail("high") + public val Low: ImageDetail = ImageDetail("low") + public val Auto: ImageDetail = ImageDetail("auto") + } +} + +/** + * A file input to the model. + */ +@Serializable +@SerialName("input_file") +public data class ResponseInputFile( + + /** + * The content of the file to be sent to the model. + * + */ + @SerialName("file_data") val fileData: String? = null, + + /** + * The ID of the file to be sent to the model. + */ + @SerialName("file_id") val fileId: String? = null, + + /** + * The name of the file to be sent to the model. + */ + @SerialName("filename") val fileName: String? = null, +) : ResponseContent diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseItem.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseItem.kt new file mode 100644 index 00000000..69b449c4 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseItem.kt @@ -0,0 +1,6 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.Serializable + +@Serializable +public sealed interface ResponseItem diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseOutput.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseOutput.kt new file mode 100644 index 00000000..d42bb7f5 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseOutput.kt @@ -0,0 +1,251 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.Required +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +/** + * A single output item in the response + */ +@Serializable +public sealed interface ResponseOutput : ResponseItem { + + /** + * The ID of the output item. + * Will always be populated when coming from the API. It is optional here, so you can construct your own ResponseMessages. + */ + @SerialName("id") + public val id: String? +} + + +/** + * A message input to the model with a role indicating instruction following hierarchy. Instructions given with the developer or system role take precedence over instructions given with the user role. Messages with the assistant role are presumed to have been generated by the model in previous interactions. + * + */ +@Serializable +@SerialName("message") +public data class ResponseMessage( + + + /** + * The role of the author of this message. + */ + @SerialName("role") public val role: ResponseRole, + + /** + * A list of one or many input items to the model, containing different content types. + * + * Important: + * If the role is "Assistant", only ResponseOutputText and Refusal are allowed in the content. + * If the role is "System", only ResponseInputText is allowed in the content. + * If the role is "User", only ResponseInputText, ResponseInputImage, + * and ResponseInputFile are allowed in the content. + * + */ + @SerialName("content") public val content: List = emptyList(), + //Implementation note: If we were to implement this distinction between input and output messages with proper polymorphism, serialization would break because of the common "message" type. + + /** + * The unique ID of the input message. + */ + @SerialName("id") public override val id: String? = null, + + /** + * The status of item. One of in_progress, completed, or incomplete. Populated when items are returned via API. + */ + @SerialName("status") public val status: ResponseStatus? = null, +) : ResponseOutput + +/** + * Represents a chat message part. + */ +@Serializable +public sealed interface ResponseContent + +/** + * Text output from the model + */ +@Serializable +@SerialName("output_text") +public data class ResponseOutputText( + + /** + * The text output from the model. + */ + @SerialName("text") + val text: String, + + /** + * The annotations of the text output. + */ + @SerialName("annotations") + val annotations: List = emptyList() +) : ResponseContent + +/** + * Refusal message from the model + */ +@Serializable +@SerialName("refusal") +public data class Refusal( + + /** + * The refusal explanation from the model. + */ + @SerialName("refusal") + val refusal: String +) : ResponseContent + + +/** + * An annotation in text output + */ +@Serializable +public sealed interface Annotation + +/** + * A citation to a file. + */ +@Serializable +@SerialName("file_citation") +public data class FileCitation( + /** + * The ID of the file. + * + */ + @SerialName("file_id") + val fileId: String, + + /** + * The index of the file in the list of files. + */ + @SerialName("index") + val index: Int + +) : Annotation + +/** + * A citation for a web resource used to generate a model response. + */ +@Serializable +@SerialName("url_citation") +public data class UrlCitation( + + /** + * The title of the web resource. + */ + @SerialName("title") + val title: String, + + /** + * The URL of the web resource. + */ + @SerialName("url") + val url: String, + + /** + * The index of the first character of the URL citation in the message. + */ + @SerialName("start_index") + val startIndex: Int, + + /** + * The index of the last character of the URL citation in the message. + */ + @SerialName("end_index") + val endIndex: Int +) : Annotation + +/** + * A path to a file. + */ +@Serializable +@SerialName("file_path") +public data class FilePath( + + /** + * The ID of the file. + */ + @SerialName("file_id") + val fileId: String, + + /** + * The index of the file in the list of files. + */ + @SerialName("index") + val index: Int +) : Annotation + + +/** + * Reasoning item for model reasoning + */ +@Serializable +@SerialName("reasoning") +public data class Reasoning( + /** + * The unique ID of the reasoning item. + */ + @SerialName("id") + override val id: String, + + /** + * Reasoning summary content. + */ + @SerialName("summary") + val summary: List, + + /** + * Reasoning text contents. + */ + @SerialName("content") + val content: List? = null, // Implementation note: OpenAI doc says this is always present, but it isn't + + /** + * The encrypted content of the reasoning item - populated when a response is generated with `reasoning.encrypted_content` in the `include` parameter. + */ + @SerialName("encrypted_content") + val encryptedContent: String? = null, + + /** + * The status of the reasoning item. + */ + @SerialName("status") + val status: ResponseStatus? = null + +) : ResponseOutput + +/** + * A summary item in the reasoning output + */ +@Serializable +@SerialName("summary_text") +public data class SummaryText( + /** + * A short summary of the reasoning used by the model. + */ + @SerialName("text") + val text: String, +) { + @SerialName("type") + @Required + val type: String = "summary_text" // Implementation note: We need to specify the type explicitly because this is not a polymorphic type +} + +/** + * A summary text item in the reasoning output + */ +@Serializable +@SerialName("reasoning_text") +public data class ReasoningText( + /** + * Reasoning text output from the model. + */ + @SerialName("text") + val text: String, +) { + @SerialName("type") + @Required + val type: String = "reasoning_text" // Implementation note: We need to specify the type explicitly because this is not a polymorphic type +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseRequest.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseRequest.kt new file mode 100644 index 00000000..076deb07 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseRequest.kt @@ -0,0 +1,302 @@ +package com.aallam.openai.api.responses + +import com.aallam.openai.api.OpenAIDsl +import com.aallam.openai.api.model.ModelId +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +/** Request object for the OpenAI Responses API */ +@Serializable +public data class ResponseRequest( + + /** Text, image, or file inputs to the model, used to generate a response. */ + @SerialName("input") val input: ResponseInput, + + /** + * Model ID used to generate the response, like gpt-4o or o1. + * OpenAI offers a wide range of models with different capabilities, performance characteristics, and price points. + * Refer to the model guide to browse and compare available models. + */ + @SerialName("model") val model: ModelId, + + /** + * Whether to run the model response in the background. + * Defaults to false. + */ + @SerialName("background") + val background: Boolean? = null, + + /** Specify additional output data to include in the model response. */ + @SerialName("include") val include: List? = null, + + /** + * Inserts a system (or developer) message as the first item in the model's context. + * + * When using along with previous_response_id, the instructions from a previous response will not be carried over to the next response. + * This makes it simple to swap out system (or developer) messages in new responses. + */ + @SerialName("instructions") val instructions: String? = null, + + /** An upper bound for the number of tokens that can be generated for a response, including visible output tokens and reasoning tokens. */ + @SerialName("max_output_tokens") val maxOutputTokens: Int? = null, + + /** + * The maximum number of total calls to built-in tools that can be processed in a response. + * This maximum number applies across all built-in tool calls, not per individual tool. + * Any further attempts to call a tool by the model will be ignored. + */ + @SerialName("max_tool_calls") val maxToolCalls: Int? = null, + + /** + * Set of key-value pairs that can be attached to an object. This can be + * useful for storing additional information about the object in a structured + * format, and querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings + * with a maximum length of 512 characters. + * */ + @SerialName("metadata") val metadata: Map? = null, + + /** Whether to allow the model to run tool calls in parallel. */ + @SerialName("parallel_tool_calls") val parallelToolCalls: Boolean? = null, + + /** The unique ID of the previous response to the model. Use this to create multi-turn conversations. */ + @SerialName("previous_response_id") val previousResponseId: String? = null, + + /** Reference to a prompt template and its variables. */ + @SerialName("prompt") + val prompt: PromptTemplate? = null, + + /** Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces the `user` field. */ + @SerialName("prompt_cache_key") val promptCacheKey: String? = null, + + /** Configuration for reasoning models. */ + @SerialName("reasoning") val reasoning: ReasoningConfig? = null, + + /** + * A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies. + * The IDs should be a string that uniquely identifies each user. + * We recommend hashing their username or email address, in order to avoid sending us any identifying information. + */ + @SerialName("safety_identifier") val safetyIdentifier: String? = null, + + /** + * Specifies the processing type used for serving the request. + * + * - If set to `auto`, then the request will be processed with the service tier configured in the Project settings. + * Unless otherwise configured, the Project will use 'default'. + * - If set to `default`, then the request will be processed with the standard pricing and performance for the selected model. + * - If set to `flex` or `priority`, then the request will be processed with the corresponding service tier. + * - When not set, the default behavior is `auto`. + * + * When the service_tier parameter is set, the response body will include the service_tier value based on the processing mode actually used to serve the request. + * This response value may be different from the value set in the parameter. + */ + @SerialName("service_tier") + val serviceTier: ServiceTier? = null, + + /** Whether to store the generated model response for later retrieval via API. */ + @SerialName("store") val store: Boolean? = null, + + /** + * If set to true, the model response data will be streamed to the client as it is generated using server-sent events. See the Streaming section below for more information. + */ + @SerialName("stream") val stream: Boolean? = null, + + /** + * What sampling temperature to use, between 0 and 2. + * Higher values like 0.8 will make the output more random, + * while lower values like 0.2 will make it more focused and deterministic. + * We generally recommend altering this or top_p but not both. + */ + @SerialName("temperature") val temperature: Double? = null, + + /** Configuration options for a text response from the model. Can be plain text or structured JSON data. */ + @SerialName("text") val text: ResponseTextConfig? = null, + + /** How the model should select which tool (or tools) to use when generating a response. See the tools parameter to see how to specify which tools the model can call. */ + @SerialName("tool_choice") val toolChoice: ResponseToolChoiceConfig? = null, + + /** + * An array of tools the model may call while generating a response. You can specify which tool to use by setting the `tool_choice` parameter. + * + * The two categories of tools you can provide the model are: + * + * - Built-in tools: Tools that are provided by OpenAI that extend the model's capabilities, like web search or file search. Learn more about built-in tools. + * - Function calls (custom tools): Functions that are defined by you, enabling the model to call your own code. Learn more about function calling. + */ + @SerialName("tools") val tools: List? = null, + + /** An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. */ + @SerialName("top_logprobs") val topLogprobs: Int? = null, + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + @SerialName("top_p") val topP: Double? = null, + + /** + * The truncation strategy to use for the model response. + * - `auto`: If the context exceeds the model's context window size, the model will truncate + * the response by dropping input items in the middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window size, + * the request will fail with a 400 error. + */ + @SerialName("truncation") val truncation: Truncation? = null, + + /** + * A stable identifier for your end-users. + * Used to boost cache hit rates by better bucketing similar requests and to help OpenAI detect and prevent abuse. + */ + @Deprecated("This field is being replaced by safety_identifier and prompt_cache_key. Use prompt_cache_key instead to maintain caching optimizations.") + @SerialName("user") val user: String? = null +) + +/** Builder for ResponseRequest objects */ +@OpenAIDsl +public class ResponseRequestBuilder { + /** Whether to run the model response in the background */ + public var background: Boolean? = null + + /** ID of the model to use */ + public var model: ModelId? = null + + /** The input to the model */ + public var input: ResponseInput? = null + + /** Specify additional output data to include in the model response */ + public var include: List? = null + + /** Instructions for the model */ + public var instructions: String? = null + + /** Maximum number of tokens to generate */ + public var maxOutputTokens: Int? = null + + /** The maximum number of total calls to built-in tools that can be processed in a response */ + public var maxToolCalls: Int? = null + + /** Custom metadata */ + public var metadata: Map? = null + + /** Whether to allow parallel tool calls */ + public var parallelToolCalls: Boolean? = null + + /** ID of a previous response to continue from */ + public var previousResponseId: String? = null + + /** Reference to a prompt template and its variables */ + public var prompt: PromptTemplate? = null + + /** Used to optimize cache hit rates; replaces the `user` field */ + public var promptCacheKey: String? = null + + /** Reasoning configuration */ + public var reasoning: ReasoningConfig? = null + + /** A stable identifier used to help detect policy violations */ + public var safetyIdentifier: String? = null + + /** Specifies the processing tier used for serving the request */ + public var serviceTier: ServiceTier? = null + + /** Whether to store the response */ + public var store: Boolean? = null + + /** Whether to stream the response */ + public var stream: Boolean? = null + + /** Sampling temperature */ + public var temperature: Double? = null + + /** Text response configuration */ + public var text: ResponseTextConfig? = null + + /** Tool choice configuration */ + public var toolChoice: ResponseToolChoiceConfig? = null + + /** Tools that the model may use */ + public var tools: MutableList? = null + + /** Number of most likely tokens to return with log probabilities */ + public var topLogprobs: Int? = null + + /** Top-p sampling parameter */ + public var topP: Double? = null + + /** + * Truncation configuration + * - `auto`: If the context exceeds the model's context window size, the model will truncate + * the response by dropping input items in the middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window size, + * the request will fail with a 400 error. + */ + public var truncation: Truncation? = null + + /** End-user identifier */ + public var user: String? = null + + /** Add a tool to the request */ + public fun tool(tool: ResponseTool) { + if (tools == null) { + tools = mutableListOf() + } + tools?.add(tool) + } + + /** Add multiple tools to the request */ + public fun tools(init: MutableList.() -> Unit) { + if (tools == null) { + tools = mutableListOf() + } + tools?.init() + } + + /** Add an includable option */ + public fun include(includable: ResponseIncludable) { + include = include.orEmpty() + includable + } + + /** Build the ResponseRequest object */ + public fun build(): ResponseRequest { + requireNotNull(model) { "Model must be set" } + requireNotNull(input) { "Input must be set" } + + return ResponseRequest( + input = input!!, + model = model!!, + background = background, + include = include, + instructions = instructions, + maxOutputTokens = maxOutputTokens, + maxToolCalls = maxToolCalls, + metadata = metadata, + parallelToolCalls = parallelToolCalls, + previousResponseId = previousResponseId, + prompt = prompt, + promptCacheKey = promptCacheKey, + reasoning = reasoning, + safetyIdentifier = safetyIdentifier, + serviceTier = serviceTier, + store = store, + stream = stream, + temperature = temperature, + text = text, + toolChoice = toolChoice, + tools = tools, + topLogprobs = topLogprobs, + topP = topP, + truncation = truncation, + user = user, + ) + } +} + +/** Creates a new ResponseRequest using a builder DSL */ +public fun responseRequest(init: ResponseRequestBuilder.() -> Unit): ResponseRequest { + val builder = ResponseRequestBuilder() + builder.init() + return builder.build() +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseRole.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseRole.kt new file mode 100644 index 00000000..dc1bd07f --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseRole.kt @@ -0,0 +1,8 @@ +package com.aallam.openai.api.responses + +import com.aallam.openai.api.core.Role + +/** + * The role of the author of a message. + */ +public typealias ResponseRole = Role diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseStatus.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseStatus.kt new file mode 100644 index 00000000..e26d8c1a --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseStatus.kt @@ -0,0 +1,17 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.Serializable +import kotlin.jvm.JvmInline + +/** + * Status of an output item + */ +@JvmInline +@Serializable +public value class ResponseStatus(public val value: String) { + public companion object { + public val InProgress: ResponseStatus = ResponseStatus("in_progress") + public val Completed: ResponseStatus = ResponseStatus("completed") + public val Incomplete: ResponseStatus = ResponseStatus("incomplete") + } +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseTextConfig.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseTextConfig.kt new file mode 100644 index 00000000..ade6deee --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseTextConfig.kt @@ -0,0 +1,84 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlinx.serialization.json.JsonObject + +/** Configuration for text responses */ +@Serializable +public data class ResponseTextConfig( + /** The format to use for text responses */ + @SerialName("format") val format: TextResponseFormatConfiguration? = null, + + /** + * Constrains the verbosity of the model's response. + * Lower values will result in more concise responses, while higher values will result in more verbose responses. + * Currently supported values are `low`, `medium`, and `high`. + */ + @SerialName("verbosity") val verbosity: Verbosity? = null, + // Implementation note: In the OpenAI doc, this is wrongly placed at the top level of response +) + +/** + * Configuration for text response format + */ +@Serializable +public sealed interface TextResponseFormatConfiguration + +/** + * Plain text format - default response format. + * Used to generate text responses. + */ +@Serializable +@SerialName("text") +public data object TextFormat : TextResponseFormatConfiguration + +/** + * JSON object response format. An older method of generating JSON responses. + * Using `json_schema` is recommended for models that support it. + * Note that the model will not generate JSON without a system or user message + * instructing it to do so. + */ +@Serializable +@SerialName("json_object") +public data object JsonObjectFormat : TextResponseFormatConfiguration + +/** + * JSON Schema response format. Used to generate structured JSON responses. + */ +@Serializable +@SerialName("json_schema") +public data class JsonSchemaFormat( + /** Structured Outputs configuration options, including a JSON Schema */ + @SerialName("json_schema") val jsonSchema: ResponseJsonSchema, +) : TextResponseFormatConfiguration + +/** + * Structured Outputs configuration options, including a JSON Schema + */ +@Serializable +public data class ResponseJsonSchema( + /** + * A description of what the response format is for, used by the model to + * determine how to respond in the format. + */ + @SerialName("description") val description: String? = null, + + /** + * The name of the response format. Must be a-z, A-Z, 0-9, or contain + * underscores and dashes, with a maximum length of 64. + */ + @SerialName("name") val name: String? = null, + + /** + * The schema for the response format, described as a JSON Schema object. + */ + @SerialName("schema") val schema: JsonObject, + + /** + * Whether to enable strict schema adherence when generating the output. + * If set to true, the model will always follow the exact schema defined + * in the `schema` field. + */ + @SerialName("strict") val strict: Boolean? = null, +) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseTool.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseTool.kt new file mode 100644 index 00000000..d5ae20fe --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseTool.kt @@ -0,0 +1,220 @@ +package com.aallam.openai.api.responses + +import com.aallam.openai.api.core.Parameters +import com.aallam.openai.api.vectorstore.VectorStoreId +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +@Serializable +public sealed interface ResponseTool { + + /** + * Function tool for function calling + */ + @Serializable + @SerialName("function") + public data class Function( + /** + * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + * length of 64. + */ + @SerialName("name") val name: String, + + /** + * The parameters the functions accept, described as a JSON Schema object. + * See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, + * and the [JSON Schema reference](https://json-schema.org/understanding-json-schema) for documentation about + * the format. + * + * Omitting `parameters` defines a function with an empty parameter list. + */ + @SerialName("parameters") val parameters: Parameters? = null, + + /** + * A description of what the function does, used by the model to choose when and how to call the function. + */ + @SerialName("description") val description: String? = null, + + /** + * Whether to enforce strict parameter validation. Default true. + */ + @SerialName("strict") val strict: Boolean? = null, + ) : ResponseTool + + /** + * A tool that searches for relevant content from uploaded files. + */ + @Serializable + @SerialName("file_search") + public data class FileSearch( + /** + * The IDs of the vector stores to search. + */ + @SerialName("vector_store_ids") + val vectorStoreIds: List = emptyList(), + /** + * A filter to apply based on file attributes. + */ + @SerialName("filters") + val filters: FileSearchFilter? = null, + + /** + * Ranking options for search. + */ + @SerialName("ranking_options") + val rankingOptions: FileSearchRankingOptions? = null, + + /** + * The maximum number of results to return. This number should be between 1 and 50 inclusive. + */ + @SerialName("max_num_results") + val maxNumResults: Int? = null, + ) : ResponseTool + + /** + * Web search tool (preview) + */ + @Serializable + @SerialName("web_search_preview") + public data class WebSearchPreview( + /** + * Approximate location parameters for the search. + */ + @SerialName("user_location") + val userLocation: WebSearchLocation? = null, + + /** + * High level guidance for the amount of context window space to use for the search. + * One of 'low', 'medium', or 'high'. + * 'medium' is the default. + */ + @SerialName("search_context_size") + val searchContextSize: WebSearchContextSize? = null, + ) : ResponseTool + + /** + * Web search tool (preview 2025-03-11) + */ + @Serializable + @SerialName("web_search_preview_2025_03_11") + public data class WebSearchPreview2025( + + /** + * Approximate location parameters for the search. + */ + @SerialName("user_location") + val userLocation: WebSearchLocation? = null, + + /** + * High level guidance for the amount of context window space to use for the search. + * One of 'low', 'medium', or 'high'. + * 'medium' is the default. + */ + @SerialName("search_context_size") + val searchContextSize: WebSearchContextSize? = null, + ) : ResponseTool + + /** + * Computer tool for computational tasks (preview) + */ + @Serializable + @SerialName("computer_use_preview") + public data class ComputerUsePreview( + + /** + * The width of the computer display + */ + @SerialName("display_width") + val displayWidth: Int, + + /** + * The height of the computer display + */ + @SerialName("display_height") + val displayHeight: Int, + + /** + * The type of computer environment to control + */ + @SerialName("environment") + val environment: String, + ) : ResponseTool + + /** + * Config for a remote MCP server. + */ + @Serializable + @SerialName("mcp") + public data class MCP( + /** + * A label for this MCP server, used to identify it in tool calls. + */ + @SerialName("server_label") val name: String, + + /** + * The URL for the MCP server. + */ + @SerialName("server_url") val serverUrl: String, + + /** + * List of allowed tool names or a filter object. + */ + @SerialName("allowed_tools") val allowedTools: MCPAllowedTools? = null, + + /** + * Optional HTTP headers to send to the MCP server. Use for authentication or other purposes. + */ + @SerialName("headers") val headers: Map? = null, + + /** + * Specify which of the MCP server's tools require approval. + */ + @SerialName("require_approval") val requireApproval: MCPRequireApproval? = null, + + /** + * Optional description of the MCP server, used to provide more context. + */ + @SerialName("server_description") val serverDescription: String? = null, + + ) : ResponseTool + + /** + * A tool that runs Python code to help generate a response to a prompt. + */ + @Serializable + @SerialName("code_interpreter") + public data class CodeInterpreter( + /** + * The code interpreter container. Can be a container ID or an object that specifies uploaded file IDs to make available to your code. + */ + @SerialName("container") val container: CodeInterpreterContainer, + ) : ResponseTool + + /** + * A tool that allows the model to execute shell commands in a local environment. + */ + @Serializable + @SerialName("local_shell") + public class LocalShell : ResponseTool + + /** + * A custom tool that processes input using a specified format. + */ + @Serializable + @SerialName("custom") + public data class Custom( + + /** The name of the custom tool, used to identify it in tool calls. */ + @SerialName("name") + val name: String, + + /** Optional description of the custom tool, used to provide more context. */ + @SerialName("description") + val description: String? = null, + + /** The input format for the custom tool. Default is unconstrained text. */ + @SerialName("format") + val format: CustomToolFormat? = null + + ) : ResponseTool +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseToolChoiceConfig.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseToolChoiceConfig.kt new file mode 100644 index 00000000..f92a8732 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseToolChoiceConfig.kt @@ -0,0 +1,100 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlinx.serialization.json.JsonContentPolymorphicSerializer +import kotlinx.serialization.json.JsonElement +import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.JsonPrimitive +import kotlin.jvm.JvmInline + +/** + * How the model should select which tool (or tools) to use when generating a response. + * See the tools parameter to see how to specify which tools the model can call. + */ +@Serializable(with = ResponseToolChoiceSerializer::class) +public sealed interface ResponseToolChoiceConfig { + + /** + * Represents a tool choice mode. + * - `none` means the model will not call any tool and instead generates a message. + * - `auto` means the model can pick between generating a message or calling one or more tools. + * - `required` means the model must call one or more tools. + */ + @JvmInline + @Serializable + public value class Mode(public val value: String) : ResponseToolChoiceConfig { + public companion object { + public val Auto: Mode = Mode("auto") + public val None: Mode = Mode("none") + public val Required: Mode = Mode("required") + } + } + + @Serializable + @SerialName("file_search") + public class FileSearch : ResponseToolChoice + + @Serializable + @SerialName("web_search_preview") + public class WebSearchPreview : ResponseToolChoice + + @Serializable + @SerialName("computer_use_preview") + public class ComputerUsePreview : ResponseToolChoice + + @Serializable + @SerialName("code_interpreter") + public class CodeInterpreter : ResponseToolChoice + + //TODO add image_generation after updates to image API are implemented + + /** + * Use this option to force the model to call a specific function. + */ + @Serializable + @SerialName("function") + public data class Function( + /** The name of the function to call. */ + @SerialName("name") val name: String + ) : ResponseToolChoice + + /** + * Use this option to force the model to call a specific tool on a remote MCP server. + */ + @Serializable + @SerialName("mcp") + public data class MCPTool( + /** The label of the MCP server to use. */ + @SerialName("server_label") val serverLabel: String, + + /** The name of the tool to call on the server.*/ + @SerialName("name") val name: String? = null + + ) : ResponseToolChoice + + /** + * Use this option to force the model to call a specific custom tool. + */ + @Serializable + @SerialName("custom") + public data class Custom( + /** The name of the custom tool to call. */ + @SerialName("name") val name: String + ) : ResponseToolChoice +} + +@Serializable +public sealed interface ResponseToolChoice : ResponseToolChoiceConfig + +/** + * Serializer for [ResponseToolChoiceConfig]. + */ +internal class ResponseToolChoiceSerializer : + JsonContentPolymorphicSerializer(ResponseToolChoiceConfig::class) { + override fun selectDeserializer(element: JsonElement) = when (element) { + is JsonPrimitive -> ResponseToolChoiceConfig.Mode.serializer() + is JsonObject -> ResponseToolChoice.serializer() + else -> throw IllegalArgumentException("Unknown element type: $element") + } +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseUsage.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseUsage.kt new file mode 100644 index 00000000..018c9ad1 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseUsage.kt @@ -0,0 +1,65 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +/** + * Represents token usage details including input tokens, output tokens, + * a breakdown of output tokens, and the total tokens used. + */ +@Serializable +public data class ResponseUsage( + /** + * The number of input tokens. + */ + @SerialName("input_tokens") + val inputTokens: Int, + + /** + * A detailed breakdown of the input tokens. + */ + @SerialName("input_tokens_details") + val inputTokensDetails: InputTokensDetails, + + /** + * The number of output tokens. + */ + @SerialName("output_tokens") + val outputTokens: Int, + + /** + * A detailed breakdown of the output tokens. + */ + @SerialName("output_tokens_details") + val outputTokensDetails: OutputTokensDetails, + + /** + * The total number of tokens used. + */ + @SerialName("total_tokens") + val totalTokens: Int +) + +/** + * A detailed breakdown of the input tokens. + */ +@Serializable +public data class InputTokensDetails( + /** + * The number of tokens that were retrieved from the cache. + */ + @SerialName("cached_tokens") + val cachedTokens: Int +) + +/** + * A detailed breakdown of the output tokens. + */ +@Serializable +public data class OutputTokensDetails( + /** + * The number of reasoning tokens. + */ + @SerialName("reasoning_tokens") + val reasoningTokens: Int +) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ServiceTier.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ServiceTier.kt new file mode 100644 index 00000000..7251f162 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ServiceTier.kt @@ -0,0 +1,22 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.Serializable +import kotlin.jvm.JvmInline + +/** + * Specifies the processing tier used for serving the request. + */ +@JvmInline +@Serializable +public value class ServiceTier(public val value: String) { + public companion object { + /** Use the project default service tier (usually "default"). */ + public val Auto: ServiceTier = ServiceTier("auto") + /** Standard pricing and performance for the selected model. */ + public val Default: ServiceTier = ServiceTier("default") + /** Flexible, lower-priority processing. */ + public val Flex: ServiceTier = ServiceTier("flex") + /** Priority processing. */ + public val Priority: ServiceTier = ServiceTier("priority") + } +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/Truncation.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/Truncation.kt new file mode 100644 index 00000000..925d0714 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/Truncation.kt @@ -0,0 +1,35 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.Serializable +import kotlin.jvm.JvmInline + +/** + * Controls truncation behavior for the model + * + * - `auto`: If the context of this response and previous ones exceeds + * the model's context window size, the model will truncate the + * response to fit the context window by dropping input items in the + * middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window + * size for a model, the request will fail with a 400 error. + */ +@JvmInline +@Serializable +public value class Truncation(public val value: String) { + public companion object { + /** + * If the context of this response and previous ones exceeds + * the model's context window size, the model will truncate the + * response to fit the context window by dropping input items in the + * middle of the conversation. + */ + public val Auto: Truncation = Truncation("auto") + + /** + * If a model response will exceed the context window + * size for a model, the request will fail with a 400 error. + * This is the default. + */ + public val Disabled: Truncation = Truncation("disabled") + } +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/Verbosity.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/Verbosity.kt new file mode 100644 index 00000000..46f655c1 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/Verbosity.kt @@ -0,0 +1,22 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.Serializable +import kotlin.jvm.JvmInline + +/** + * Constrains the verbosity of the model's response. + * + * Lower values are more concise; higher values are more detailed. + */ +@JvmInline +@Serializable +public value class Verbosity(public val value: String) { + public companion object { + /** Concise responses. */ + public val Low: Verbosity = Verbosity("low") + /** Balanced level of detail. */ + public val Medium: Verbosity = Verbosity("medium") + /** Detailed, more verbose responses. */ + public val High: Verbosity = Verbosity("high") + } +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/WebSearchTool.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/WebSearchTool.kt new file mode 100644 index 00000000..617ebe03 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/WebSearchTool.kt @@ -0,0 +1,87 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.Required +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlin.jvm.JvmInline + +/** + * Web search context size + */ +@JvmInline +@Serializable +public value class WebSearchContextSize(public val value: String) { + public companion object { + /** + * Low context size + */ + public val Low: WebSearchContextSize = WebSearchContextSize("low") + + /** + * Medium context size + */ + public val Medium: WebSearchContextSize = WebSearchContextSize("medium") + + /** + * High context size + */ + public val High: WebSearchContextSize = WebSearchContextSize("high") + } +} + +/** + * Web search location + */ +@Serializable +public data class WebSearchLocation( + /** + * Free text input for the city of the user, e.g., San Francisco. + */ + @SerialName("city") + val city: String? = null, + + /** + * The two-letter ISO-country code of the user, e.g., US. + */ + @SerialName("country") + val country: String? = null, + + /** + * Free text input for the region of the user, e.g., California. + */ + @SerialName("region") + val region: String? = null, + + /** + * The IANA time zone of the user, e.g., America/Los_Angeles. + */ + @SerialName("timezone") + val timezone: String? = null, + + ) { + /** + * The type of location approximation. Always approximate. + */ + @SerialName("type") + @Required + val type: String = "approximate" +} + +/** + * Web search tool call in a response + */ +@Serializable +@SerialName("web_search_call") +public data class WebSearchToolCall( + /** + * The unique ID of the web search tool call. + */ + @SerialName("id") + override val id: String, + + /** + * The status of the web search tool call. + */ + @SerialName("status") + val status: ResponseStatus +) : ResponseOutput