From 420281387544581b5f0ef12b5a55e7b34d1aaad6 Mon Sep 17 00:00:00 2001 From: Alex Kuleshov Date: Mon, 23 Mar 2026 13:05:51 -0400 Subject: [PATCH 1/9] feat(models): add example registry configs --- README.md | 17 +++++++++++++++++ models/gpt-5.1.json | 6 ++++++ models/o3.json | 20 ++++++++++++++++++++ models/openai/gpt-4o.json | 6 ++++++ providers/openrouter/openai/gpt-4o.json | 6 ++++++ 5 files changed, 55 insertions(+) create mode 100644 README.md create mode 100644 models/gpt-5.1.json create mode 100644 models/o3.json create mode 100644 models/openai/gpt-4o.json create mode 100644 providers/openrouter/openai/gpt-4o.json diff --git a/README.md b/README.md new file mode 100644 index 0000000..f61e97f --- /dev/null +++ b/README.md @@ -0,0 +1,17 @@ +# golemcore-models + +Example registry layout for GolemCore model defaults. + +Lookup order: + +1. `providers//.json` +2. `models/.json` + +Each JSON file contains full `ModelSettings` without the `provider` field. + +Examples in this repository: + +- `models/gpt-5.1.json` +- `models/o3.json` +- `models/openai/gpt-4o.json` +- `providers/openrouter/openai/gpt-4o.json` diff --git a/models/gpt-5.1.json b/models/gpt-5.1.json new file mode 100644 index 0000000..da66833 --- /dev/null +++ b/models/gpt-5.1.json @@ -0,0 +1,6 @@ +{ + "displayName": "GPT-5.1", + "supportsVision": true, + "supportsTemperature": false, + "maxInputTokens": 1000000 +} diff --git a/models/o3.json b/models/o3.json new file mode 100644 index 0000000..e0d2ee9 --- /dev/null +++ b/models/o3.json @@ -0,0 +1,20 @@ +{ + "displayName": "o3", + "supportsVision": true, + "supportsTemperature": false, + "maxInputTokens": 200000, + "reasoning": { + "default": "medium", + "levels": { + "low": { + "maxInputTokens": 120000 + }, + "medium": { + "maxInputTokens": 200000 + }, + "high": { + "maxInputTokens": 200000 + } + } + } +} diff --git a/models/openai/gpt-4o.json b/models/openai/gpt-4o.json new file mode 100644 index 0000000..743a42f --- /dev/null +++ b/models/openai/gpt-4o.json @@ -0,0 +1,6 @@ +{ + "displayName": "GPT-4o", + "supportsVision": true, + "supportsTemperature": true, + "maxInputTokens": 128000 +} diff --git a/providers/openrouter/openai/gpt-4o.json b/providers/openrouter/openai/gpt-4o.json new file mode 100644 index 0000000..9a394b9 --- /dev/null +++ b/providers/openrouter/openai/gpt-4o.json @@ -0,0 +1,6 @@ +{ + "displayName": "OpenRouter GPT-4o", + "supportsVision": true, + "supportsTemperature": false, + "maxInputTokens": 200000 +} From adff06187803cee18f5a36ee569b4e3d3b11090c Mon Sep 17 00:00:00 2001 From: Alex Kuleshov Date: Mon, 23 Mar 2026 13:16:32 -0400 Subject: [PATCH 2/9] feat(models): add current openai anthropic and gemini configs --- README.md | 19 +++++++++----- models/claude-haiku-4-5.json | 6 +++++ models/claude-opus-4-6.json | 6 +++++ models/claude-sonnet-4-6.json | 6 +++++ ...gemini-2.5-flash-lite-preview-09-2025.json | 6 +++++ models/gemini-2.5-flash-lite.json | 6 +++++ models/gemini-2.5-flash-preview-09-2025.json | 6 +++++ models/gemini-2.5-flash.json | 6 +++++ models/gemini-2.5-pro.json | 6 +++++ models/gemini-3-flash-preview.json | 23 ++++++++++++++++ models/gemini-3.1-flash-lite-preview.json | 23 ++++++++++++++++ models/gemini-3.1-pro-preview.json | 20 ++++++++++++++ models/gpt-4.1-mini.json | 6 +++++ models/gpt-4.1-nano.json | 6 +++++ models/{openai/gpt-4o.json => gpt-4.1.json} | 4 +-- models/gpt-5.1.json | 19 +++++++++++++- models/gpt-5.2-pro.json | 20 ++++++++++++++ models/gpt-5.2.json | 26 +++++++++++++++++++ models/gpt-5.4-mini.json | 6 +++++ models/gpt-5.4-nano.json | 6 +++++ models/gpt-5.4-pro.json | 20 ++++++++++++++ models/gpt-5.4.json | 26 +++++++++++++++++++ models/o3.json | 20 -------------- providers/openrouter/openai/gpt-4o.json | 6 ----- 24 files changed, 263 insertions(+), 35 deletions(-) create mode 100644 models/claude-haiku-4-5.json create mode 100644 models/claude-opus-4-6.json create mode 100644 models/claude-sonnet-4-6.json create mode 100644 models/gemini-2.5-flash-lite-preview-09-2025.json create mode 100644 models/gemini-2.5-flash-lite.json create mode 100644 models/gemini-2.5-flash-preview-09-2025.json create mode 100644 models/gemini-2.5-flash.json create mode 100644 models/gemini-2.5-pro.json create mode 100644 models/gemini-3-flash-preview.json create mode 100644 models/gemini-3.1-flash-lite-preview.json create mode 100644 models/gemini-3.1-pro-preview.json create mode 100644 models/gpt-4.1-mini.json create mode 100644 models/gpt-4.1-nano.json rename models/{openai/gpt-4o.json => gpt-4.1.json} (52%) create mode 100644 models/gpt-5.2-pro.json create mode 100644 models/gpt-5.2.json create mode 100644 models/gpt-5.4-mini.json create mode 100644 models/gpt-5.4-nano.json create mode 100644 models/gpt-5.4-pro.json create mode 100644 models/gpt-5.4.json delete mode 100644 models/o3.json delete mode 100644 providers/openrouter/openai/gpt-4o.json diff --git a/README.md b/README.md index f61e97f..acfbc07 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # golemcore-models -Example registry layout for GolemCore model defaults. +Shared registry layout for GolemCore model defaults. Lookup order: @@ -9,9 +9,16 @@ Lookup order: Each JSON file contains full `ModelSettings` without the `provider` field. -Examples in this repository: +This repository intentionally contains only current general-purpose text/vision models used in Model Catalog discovery. -- `models/gpt-5.1.json` -- `models/o3.json` -- `models/openai/gpt-4o.json` -- `providers/openrouter/openai/gpt-4o.json` +Excluded on purpose: + +- deprecated models +- legacy `gpt-4o` / `o3` style families +- audio-only, image-only, TTS, live, and other specialized endpoints + +Current families included: + +- OpenAI GPT versioned models +- Anthropic Claude current aliases +- Gemini current text multimodal models diff --git a/models/claude-haiku-4-5.json b/models/claude-haiku-4-5.json new file mode 100644 index 0000000..387f5d0 --- /dev/null +++ b/models/claude-haiku-4-5.json @@ -0,0 +1,6 @@ +{ + "displayName": "Claude Haiku 4.5", + "supportsVision": true, + "supportsTemperature": true, + "maxInputTokens": 200000 +} diff --git a/models/claude-opus-4-6.json b/models/claude-opus-4-6.json new file mode 100644 index 0000000..321643f --- /dev/null +++ b/models/claude-opus-4-6.json @@ -0,0 +1,6 @@ +{ + "displayName": "Claude Opus 4.6", + "supportsVision": true, + "supportsTemperature": true, + "maxInputTokens": 1000000 +} diff --git a/models/claude-sonnet-4-6.json b/models/claude-sonnet-4-6.json new file mode 100644 index 0000000..861759f --- /dev/null +++ b/models/claude-sonnet-4-6.json @@ -0,0 +1,6 @@ +{ + "displayName": "Claude Sonnet 4.6", + "supportsVision": true, + "supportsTemperature": true, + "maxInputTokens": 1000000 +} diff --git a/models/gemini-2.5-flash-lite-preview-09-2025.json b/models/gemini-2.5-flash-lite-preview-09-2025.json new file mode 100644 index 0000000..aa34a24 --- /dev/null +++ b/models/gemini-2.5-flash-lite-preview-09-2025.json @@ -0,0 +1,6 @@ +{ + "displayName": "Gemini 2.5 Flash-Lite Preview", + "supportsVision": true, + "supportsTemperature": true, + "maxInputTokens": 1048576 +} diff --git a/models/gemini-2.5-flash-lite.json b/models/gemini-2.5-flash-lite.json new file mode 100644 index 0000000..2f64229 --- /dev/null +++ b/models/gemini-2.5-flash-lite.json @@ -0,0 +1,6 @@ +{ + "displayName": "Gemini 2.5 Flash-Lite", + "supportsVision": true, + "supportsTemperature": true, + "maxInputTokens": 1048576 +} diff --git a/models/gemini-2.5-flash-preview-09-2025.json b/models/gemini-2.5-flash-preview-09-2025.json new file mode 100644 index 0000000..72f9fd9 --- /dev/null +++ b/models/gemini-2.5-flash-preview-09-2025.json @@ -0,0 +1,6 @@ +{ + "displayName": "Gemini 2.5 Flash Preview", + "supportsVision": true, + "supportsTemperature": true, + "maxInputTokens": 1048576 +} diff --git a/models/gemini-2.5-flash.json b/models/gemini-2.5-flash.json new file mode 100644 index 0000000..ae04907 --- /dev/null +++ b/models/gemini-2.5-flash.json @@ -0,0 +1,6 @@ +{ + "displayName": "Gemini 2.5 Flash", + "supportsVision": true, + "supportsTemperature": true, + "maxInputTokens": 1048576 +} diff --git a/models/gemini-2.5-pro.json b/models/gemini-2.5-pro.json new file mode 100644 index 0000000..118ccf5 --- /dev/null +++ b/models/gemini-2.5-pro.json @@ -0,0 +1,6 @@ +{ + "displayName": "Gemini 2.5 Pro", + "supportsVision": true, + "supportsTemperature": true, + "maxInputTokens": 1048576 +} diff --git a/models/gemini-3-flash-preview.json b/models/gemini-3-flash-preview.json new file mode 100644 index 0000000..ca75822 --- /dev/null +++ b/models/gemini-3-flash-preview.json @@ -0,0 +1,23 @@ +{ + "displayName": "Gemini 3 Flash Preview", + "supportsVision": true, + "supportsTemperature": true, + "maxInputTokens": 1048576, + "reasoning": { + "default": "high", + "levels": { + "minimal": { + "maxInputTokens": 1048576 + }, + "low": { + "maxInputTokens": 1048576 + }, + "medium": { + "maxInputTokens": 1048576 + }, + "high": { + "maxInputTokens": 1048576 + } + } + } +} diff --git a/models/gemini-3.1-flash-lite-preview.json b/models/gemini-3.1-flash-lite-preview.json new file mode 100644 index 0000000..b1a0f14 --- /dev/null +++ b/models/gemini-3.1-flash-lite-preview.json @@ -0,0 +1,23 @@ +{ + "displayName": "Gemini 3.1 Flash-Lite Preview", + "supportsVision": true, + "supportsTemperature": true, + "maxInputTokens": 1048576, + "reasoning": { + "default": "minimal", + "levels": { + "minimal": { + "maxInputTokens": 1048576 + }, + "low": { + "maxInputTokens": 1048576 + }, + "medium": { + "maxInputTokens": 1048576 + }, + "high": { + "maxInputTokens": 1048576 + } + } + } +} diff --git a/models/gemini-3.1-pro-preview.json b/models/gemini-3.1-pro-preview.json new file mode 100644 index 0000000..7265183 --- /dev/null +++ b/models/gemini-3.1-pro-preview.json @@ -0,0 +1,20 @@ +{ + "displayName": "Gemini 3.1 Pro Preview", + "supportsVision": true, + "supportsTemperature": true, + "maxInputTokens": 1048576, + "reasoning": { + "default": "high", + "levels": { + "low": { + "maxInputTokens": 1048576 + }, + "medium": { + "maxInputTokens": 1048576 + }, + "high": { + "maxInputTokens": 1048576 + } + } + } +} diff --git a/models/gpt-4.1-mini.json b/models/gpt-4.1-mini.json new file mode 100644 index 0000000..f7bd443 --- /dev/null +++ b/models/gpt-4.1-mini.json @@ -0,0 +1,6 @@ +{ + "displayName": "GPT-4.1 mini", + "supportsVision": true, + "supportsTemperature": true, + "maxInputTokens": 1047576 +} diff --git a/models/gpt-4.1-nano.json b/models/gpt-4.1-nano.json new file mode 100644 index 0000000..1ecb959 --- /dev/null +++ b/models/gpt-4.1-nano.json @@ -0,0 +1,6 @@ +{ + "displayName": "GPT-4.1 nano", + "supportsVision": true, + "supportsTemperature": true, + "maxInputTokens": 1047576 +} diff --git a/models/openai/gpt-4o.json b/models/gpt-4.1.json similarity index 52% rename from models/openai/gpt-4o.json rename to models/gpt-4.1.json index 743a42f..326002c 100644 --- a/models/openai/gpt-4o.json +++ b/models/gpt-4.1.json @@ -1,6 +1,6 @@ { - "displayName": "GPT-4o", + "displayName": "GPT-4.1", "supportsVision": true, "supportsTemperature": true, - "maxInputTokens": 128000 + "maxInputTokens": 1047576 } diff --git a/models/gpt-5.1.json b/models/gpt-5.1.json index da66833..e54a2d7 100644 --- a/models/gpt-5.1.json +++ b/models/gpt-5.1.json @@ -2,5 +2,22 @@ "displayName": "GPT-5.1", "supportsVision": true, "supportsTemperature": false, - "maxInputTokens": 1000000 + "maxInputTokens": 400000, + "reasoning": { + "default": "none", + "levels": { + "none": { + "maxInputTokens": 400000 + }, + "low": { + "maxInputTokens": 400000 + }, + "medium": { + "maxInputTokens": 400000 + }, + "high": { + "maxInputTokens": 400000 + } + } + } } diff --git a/models/gpt-5.2-pro.json b/models/gpt-5.2-pro.json new file mode 100644 index 0000000..156c2d1 --- /dev/null +++ b/models/gpt-5.2-pro.json @@ -0,0 +1,20 @@ +{ + "displayName": "GPT-5.2 pro", + "supportsVision": true, + "supportsTemperature": false, + "maxInputTokens": 400000, + "reasoning": { + "default": "high", + "levels": { + "medium": { + "maxInputTokens": 400000 + }, + "high": { + "maxInputTokens": 400000 + }, + "xhigh": { + "maxInputTokens": 400000 + } + } + } +} diff --git a/models/gpt-5.2.json b/models/gpt-5.2.json new file mode 100644 index 0000000..95eac49 --- /dev/null +++ b/models/gpt-5.2.json @@ -0,0 +1,26 @@ +{ + "displayName": "GPT-5.2", + "supportsVision": true, + "supportsTemperature": false, + "maxInputTokens": 400000, + "reasoning": { + "default": "none", + "levels": { + "none": { + "maxInputTokens": 400000 + }, + "low": { + "maxInputTokens": 400000 + }, + "medium": { + "maxInputTokens": 400000 + }, + "high": { + "maxInputTokens": 400000 + }, + "xhigh": { + "maxInputTokens": 400000 + } + } + } +} diff --git a/models/gpt-5.4-mini.json b/models/gpt-5.4-mini.json new file mode 100644 index 0000000..1478ad6 --- /dev/null +++ b/models/gpt-5.4-mini.json @@ -0,0 +1,6 @@ +{ + "displayName": "GPT-5.4 mini", + "supportsVision": true, + "supportsTemperature": false, + "maxInputTokens": 400000 +} diff --git a/models/gpt-5.4-nano.json b/models/gpt-5.4-nano.json new file mode 100644 index 0000000..ec0b6ff --- /dev/null +++ b/models/gpt-5.4-nano.json @@ -0,0 +1,6 @@ +{ + "displayName": "GPT-5.4 nano", + "supportsVision": true, + "supportsTemperature": false, + "maxInputTokens": 400000 +} diff --git a/models/gpt-5.4-pro.json b/models/gpt-5.4-pro.json new file mode 100644 index 0000000..aa9cf39 --- /dev/null +++ b/models/gpt-5.4-pro.json @@ -0,0 +1,20 @@ +{ + "displayName": "GPT-5.4 pro", + "supportsVision": true, + "supportsTemperature": false, + "maxInputTokens": 1050000, + "reasoning": { + "default": "high", + "levels": { + "medium": { + "maxInputTokens": 1050000 + }, + "high": { + "maxInputTokens": 1050000 + }, + "xhigh": { + "maxInputTokens": 1050000 + } + } + } +} diff --git a/models/gpt-5.4.json b/models/gpt-5.4.json new file mode 100644 index 0000000..778428e --- /dev/null +++ b/models/gpt-5.4.json @@ -0,0 +1,26 @@ +{ + "displayName": "GPT-5.4", + "supportsVision": true, + "supportsTemperature": false, + "maxInputTokens": 1050000, + "reasoning": { + "default": "none", + "levels": { + "none": { + "maxInputTokens": 1050000 + }, + "low": { + "maxInputTokens": 1050000 + }, + "medium": { + "maxInputTokens": 1050000 + }, + "high": { + "maxInputTokens": 1050000 + }, + "xhigh": { + "maxInputTokens": 1050000 + } + } + } +} diff --git a/models/o3.json b/models/o3.json deleted file mode 100644 index e0d2ee9..0000000 --- a/models/o3.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "displayName": "o3", - "supportsVision": true, - "supportsTemperature": false, - "maxInputTokens": 200000, - "reasoning": { - "default": "medium", - "levels": { - "low": { - "maxInputTokens": 120000 - }, - "medium": { - "maxInputTokens": 200000 - }, - "high": { - "maxInputTokens": 200000 - } - } - } -} diff --git a/providers/openrouter/openai/gpt-4o.json b/providers/openrouter/openai/gpt-4o.json deleted file mode 100644 index 9a394b9..0000000 --- a/providers/openrouter/openai/gpt-4o.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "displayName": "OpenRouter GPT-4o", - "supportsVision": true, - "supportsTemperature": false, - "maxInputTokens": 200000 -} From b27261174e2ac35e12702a7da1edef4bf3a319a6 Mon Sep 17 00:00:00 2001 From: Alex Kuleshov Date: Mon, 23 Mar 2026 13:17:42 -0400 Subject: [PATCH 3/9] feat(models): remove gpt-4.x configs --- README.md | 2 +- models/gpt-4.1-mini.json | 6 ------ models/gpt-4.1-nano.json | 6 ------ models/gpt-4.1.json | 6 ------ 4 files changed, 1 insertion(+), 19 deletions(-) delete mode 100644 models/gpt-4.1-mini.json delete mode 100644 models/gpt-4.1-nano.json delete mode 100644 models/gpt-4.1.json diff --git a/README.md b/README.md index acfbc07..cdd6d24 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,6 @@ Excluded on purpose: Current families included: -- OpenAI GPT versioned models +- OpenAI GPT-5 versioned models - Anthropic Claude current aliases - Gemini current text multimodal models diff --git a/models/gpt-4.1-mini.json b/models/gpt-4.1-mini.json deleted file mode 100644 index f7bd443..0000000 --- a/models/gpt-4.1-mini.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "displayName": "GPT-4.1 mini", - "supportsVision": true, - "supportsTemperature": true, - "maxInputTokens": 1047576 -} diff --git a/models/gpt-4.1-nano.json b/models/gpt-4.1-nano.json deleted file mode 100644 index 1ecb959..0000000 --- a/models/gpt-4.1-nano.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "displayName": "GPT-4.1 nano", - "supportsVision": true, - "supportsTemperature": true, - "maxInputTokens": 1047576 -} diff --git a/models/gpt-4.1.json b/models/gpt-4.1.json deleted file mode 100644 index 326002c..0000000 --- a/models/gpt-4.1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "displayName": "GPT-4.1", - "supportsVision": true, - "supportsTemperature": true, - "maxInputTokens": 1047576 -} From f78201c95955ed4ed7a6a11a89d3827511b4a893 Mon Sep 17 00:00:00 2001 From: Alex Kuleshov Date: Mon, 23 Mar 2026 13:18:30 -0400 Subject: [PATCH 4/9] feat(models): remove dated gemini aliases --- models/gemini-2.5-flash-lite-preview-09-2025.json | 6 ------ models/gemini-2.5-flash-preview-09-2025.json | 6 ------ 2 files changed, 12 deletions(-) delete mode 100644 models/gemini-2.5-flash-lite-preview-09-2025.json delete mode 100644 models/gemini-2.5-flash-preview-09-2025.json diff --git a/models/gemini-2.5-flash-lite-preview-09-2025.json b/models/gemini-2.5-flash-lite-preview-09-2025.json deleted file mode 100644 index aa34a24..0000000 --- a/models/gemini-2.5-flash-lite-preview-09-2025.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "displayName": "Gemini 2.5 Flash-Lite Preview", - "supportsVision": true, - "supportsTemperature": true, - "maxInputTokens": 1048576 -} diff --git a/models/gemini-2.5-flash-preview-09-2025.json b/models/gemini-2.5-flash-preview-09-2025.json deleted file mode 100644 index 72f9fd9..0000000 --- a/models/gemini-2.5-flash-preview-09-2025.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "displayName": "Gemini 2.5 Flash Preview", - "supportsVision": true, - "supportsTemperature": true, - "maxInputTokens": 1048576 -} From 374ea1c0e975c09cd9f2588dd3a35458a9f9678c Mon Sep 17 00:00:00 2001 From: Alex Kuleshov Date: Mon, 23 Mar 2026 13:22:10 -0400 Subject: [PATCH 5/9] docs: add supported models table --- README.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/README.md b/README.md index cdd6d24..06a6cc7 100644 --- a/README.md +++ b/README.md @@ -22,3 +22,24 @@ Current families included: - OpenAI GPT-5 versioned models - Anthropic Claude current aliases - Gemini current text multimodal models + +## Supported models + +| Family | Model ID | Display name | Vision | Temperature | Reasoning | +| --- | --- | --- | --- | --- | --- | +| OpenAI | `gpt-5.1` | GPT-5.1 | yes | no | `none`, `low`, `medium`, `high` | +| OpenAI | `gpt-5.2` | GPT-5.2 | yes | no | `none`, `low`, `medium`, `high`, `xhigh` | +| OpenAI | `gpt-5.2-pro` | GPT-5.2 pro | yes | no | `medium`, `high`, `xhigh` | +| OpenAI | `gpt-5.4` | GPT-5.4 | yes | no | `none`, `low`, `medium`, `high`, `xhigh` | +| OpenAI | `gpt-5.4-mini` | GPT-5.4 mini | yes | no | no explicit reasoning map | +| OpenAI | `gpt-5.4-nano` | GPT-5.4 nano | yes | no | no explicit reasoning map | +| OpenAI | `gpt-5.4-pro` | GPT-5.4 pro | yes | no | `medium`, `high`, `xhigh` | +| Anthropic | `claude-opus-4-6` | Claude Opus 4.6 | yes | yes | no explicit reasoning map | +| Anthropic | `claude-sonnet-4-6` | Claude Sonnet 4.6 | yes | yes | no explicit reasoning map | +| Anthropic | `claude-haiku-4-5` | Claude Haiku 4.5 | yes | yes | no explicit reasoning map | +| Gemini | `gemini-2.5-pro` | Gemini 2.5 Pro | yes | yes | no explicit reasoning map | +| Gemini | `gemini-2.5-flash` | Gemini 2.5 Flash | yes | yes | no explicit reasoning map | +| Gemini | `gemini-2.5-flash-lite` | Gemini 2.5 Flash-Lite | yes | yes | no explicit reasoning map | +| Gemini | `gemini-3-flash-preview` | Gemini 3 Flash Preview | yes | yes | `minimal`, `low`, `medium`, `high` | +| Gemini | `gemini-3.1-flash-lite-preview` | Gemini 3.1 Flash-Lite Preview | yes | yes | `minimal`, `low`, `medium`, `high` | +| Gemini | `gemini-3.1-pro-preview` | Gemini 3.1 Pro Preview | yes | yes | `low`, `medium`, `high` | From d1956fab37c7eebd496620025eda4fdf73f03b9b Mon Sep 17 00:00:00 2001 From: Alex Kuleshov Date: Mon, 23 Mar 2026 13:23:23 -0400 Subject: [PATCH 6/9] docs: simplify readme summary --- README.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/README.md b/README.md index 06a6cc7..14fc097 100644 --- a/README.md +++ b/README.md @@ -11,12 +11,6 @@ Each JSON file contains full `ModelSettings` without the `provider` field. This repository intentionally contains only current general-purpose text/vision models used in Model Catalog discovery. -Excluded on purpose: - -- deprecated models -- legacy `gpt-4o` / `o3` style families -- audio-only, image-only, TTS, live, and other specialized endpoints - Current families included: - OpenAI GPT-5 versioned models From a6b12e641b96efac1fd814e09e51e64ac5376f3a Mon Sep 17 00:00:00 2001 From: Alex Kuleshov Date: Mon, 23 Mar 2026 13:27:17 -0400 Subject: [PATCH 7/9] feat(models): add codex model configs --- README.md | 9 ++++++++- models/gpt-5-codex.json | 23 +++++++++++++++++++++++ models/gpt-5.1-codex-max.json | 23 +++++++++++++++++++++++ models/gpt-5.1-codex-mini.json | 23 +++++++++++++++++++++++ models/gpt-5.1-codex.json | 23 +++++++++++++++++++++++ models/gpt-5.2-codex.json | 23 +++++++++++++++++++++++ models/gpt-5.3-codex-spark.json | 6 ++++++ models/gpt-5.3-codex.json | 23 +++++++++++++++++++++++ 8 files changed, 152 insertions(+), 1 deletion(-) create mode 100644 models/gpt-5-codex.json create mode 100644 models/gpt-5.1-codex-max.json create mode 100644 models/gpt-5.1-codex-mini.json create mode 100644 models/gpt-5.1-codex.json create mode 100644 models/gpt-5.2-codex.json create mode 100644 models/gpt-5.3-codex-spark.json create mode 100644 models/gpt-5.3-codex.json diff --git a/README.md b/README.md index 14fc097..e07dde8 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ This repository intentionally contains only current general-purpose text/vision Current families included: -- OpenAI GPT-5 versioned models +- OpenAI GPT-5 and Codex models - Anthropic Claude current aliases - Gemini current text multimodal models @@ -22,8 +22,15 @@ Current families included: | Family | Model ID | Display name | Vision | Temperature | Reasoning | | --- | --- | --- | --- | --- | --- | | OpenAI | `gpt-5.1` | GPT-5.1 | yes | no | `none`, `low`, `medium`, `high` | +| OpenAI | `gpt-5-codex` | GPT-5-Codex | yes | no | `minimal`, `low`, `medium`, `high` | +| OpenAI | `gpt-5.1-codex` | GPT-5.1 Codex | yes | no | `none`, `low`, `medium`, `high` | +| OpenAI | `gpt-5.1-codex-mini` | GPT-5.1 Codex mini | yes | no | `none`, `low`, `medium`, `high` | +| OpenAI | `gpt-5.1-codex-max` | GPT-5.1 Codex Max | yes | no | `none`, `low`, `medium`, `high` | | OpenAI | `gpt-5.2` | GPT-5.2 | yes | no | `none`, `low`, `medium`, `high`, `xhigh` | +| OpenAI | `gpt-5.2-codex` | GPT-5.2-Codex | yes | no | `low`, `medium`, `high`, `xhigh` | | OpenAI | `gpt-5.2-pro` | GPT-5.2 pro | yes | no | `medium`, `high`, `xhigh` | +| OpenAI | `gpt-5.3-codex` | GPT-5.3-Codex | yes | no | `low`, `medium`, `high`, `xhigh` | +| OpenAI | `gpt-5.3-codex-spark` | GPT-5.3-Codex-Spark | no | no | no explicit reasoning map | | OpenAI | `gpt-5.4` | GPT-5.4 | yes | no | `none`, `low`, `medium`, `high`, `xhigh` | | OpenAI | `gpt-5.4-mini` | GPT-5.4 mini | yes | no | no explicit reasoning map | | OpenAI | `gpt-5.4-nano` | GPT-5.4 nano | yes | no | no explicit reasoning map | diff --git a/models/gpt-5-codex.json b/models/gpt-5-codex.json new file mode 100644 index 0000000..894c450 --- /dev/null +++ b/models/gpt-5-codex.json @@ -0,0 +1,23 @@ +{ + "displayName": "GPT-5-Codex", + "supportsVision": true, + "supportsTemperature": false, + "maxInputTokens": 400000, + "reasoning": { + "default": "medium", + "levels": { + "minimal": { + "maxInputTokens": 400000 + }, + "low": { + "maxInputTokens": 400000 + }, + "medium": { + "maxInputTokens": 400000 + }, + "high": { + "maxInputTokens": 400000 + } + } + } +} diff --git a/models/gpt-5.1-codex-max.json b/models/gpt-5.1-codex-max.json new file mode 100644 index 0000000..c73e978 --- /dev/null +++ b/models/gpt-5.1-codex-max.json @@ -0,0 +1,23 @@ +{ + "displayName": "GPT-5.1 Codex Max", + "supportsVision": true, + "supportsTemperature": false, + "maxInputTokens": 400000, + "reasoning": { + "default": "high", + "levels": { + "none": { + "maxInputTokens": 400000 + }, + "low": { + "maxInputTokens": 400000 + }, + "medium": { + "maxInputTokens": 400000 + }, + "high": { + "maxInputTokens": 400000 + } + } + } +} diff --git a/models/gpt-5.1-codex-mini.json b/models/gpt-5.1-codex-mini.json new file mode 100644 index 0000000..2973f60 --- /dev/null +++ b/models/gpt-5.1-codex-mini.json @@ -0,0 +1,23 @@ +{ + "displayName": "GPT-5.1 Codex mini", + "supportsVision": true, + "supportsTemperature": false, + "maxInputTokens": 400000, + "reasoning": { + "default": "medium", + "levels": { + "none": { + "maxInputTokens": 400000 + }, + "low": { + "maxInputTokens": 400000 + }, + "medium": { + "maxInputTokens": 400000 + }, + "high": { + "maxInputTokens": 400000 + } + } + } +} diff --git a/models/gpt-5.1-codex.json b/models/gpt-5.1-codex.json new file mode 100644 index 0000000..bece5cd --- /dev/null +++ b/models/gpt-5.1-codex.json @@ -0,0 +1,23 @@ +{ + "displayName": "GPT-5.1 Codex", + "supportsVision": true, + "supportsTemperature": false, + "maxInputTokens": 400000, + "reasoning": { + "default": "medium", + "levels": { + "none": { + "maxInputTokens": 400000 + }, + "low": { + "maxInputTokens": 400000 + }, + "medium": { + "maxInputTokens": 400000 + }, + "high": { + "maxInputTokens": 400000 + } + } + } +} diff --git a/models/gpt-5.2-codex.json b/models/gpt-5.2-codex.json new file mode 100644 index 0000000..c2f3005 --- /dev/null +++ b/models/gpt-5.2-codex.json @@ -0,0 +1,23 @@ +{ + "displayName": "GPT-5.2-Codex", + "supportsVision": true, + "supportsTemperature": false, + "maxInputTokens": 400000, + "reasoning": { + "default": "medium", + "levels": { + "low": { + "maxInputTokens": 400000 + }, + "medium": { + "maxInputTokens": 400000 + }, + "high": { + "maxInputTokens": 400000 + }, + "xhigh": { + "maxInputTokens": 400000 + } + } + } +} diff --git a/models/gpt-5.3-codex-spark.json b/models/gpt-5.3-codex-spark.json new file mode 100644 index 0000000..fb0b1a7 --- /dev/null +++ b/models/gpt-5.3-codex-spark.json @@ -0,0 +1,6 @@ +{ + "displayName": "GPT-5.3-Codex-Spark", + "supportsVision": false, + "supportsTemperature": false, + "maxInputTokens": 128000 +} diff --git a/models/gpt-5.3-codex.json b/models/gpt-5.3-codex.json new file mode 100644 index 0000000..631085f --- /dev/null +++ b/models/gpt-5.3-codex.json @@ -0,0 +1,23 @@ +{ + "displayName": "GPT-5.3-Codex", + "supportsVision": true, + "supportsTemperature": false, + "maxInputTokens": 400000, + "reasoning": { + "default": "medium", + "levels": { + "low": { + "maxInputTokens": 400000 + }, + "medium": { + "maxInputTokens": 400000 + }, + "high": { + "maxInputTokens": 400000 + }, + "xhigh": { + "maxInputTokens": 400000 + } + } + } +} From cb881018521f56c726333a86bc2e48a58cc14544 Mon Sep 17 00:00:00 2001 From: Alex Kuleshov Date: Mon, 23 Mar 2026 13:27:26 -0400 Subject: [PATCH 8/9] docs: add model source instructions --- AGENTS.md | 84 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 AGENTS.md diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..004fad3 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,84 @@ +# AGENTS.md for `golemcore-models` + +This repository is a shared registry of provider-agnostic `ModelSettings` JSON files used by GolemCore during model discovery. + +## Repository contract + +- Store shared defaults under `models/.json`. +- Store provider-specific overrides under `providers//.json` only when a provider genuinely needs different defaults. +- JSON files must contain full `ModelSettings` without the `provider` field. +- Prefer stable shared model IDs over dated aliases, snapshots, or temporary rollout IDs. + +## Primary data sources + +Always prefer official provider documentation over third-party catalogs, SDK enums, forum posts, or blog summaries. + +### OpenAI + +Primary sources: + +- `https://developers.openai.com/api/docs/models` +- model-specific pages under `https://platform.openai.com/docs/models/*` +- relevant official guides when model behavior is described there, for example Codex or GPT-5 guides + +Use OpenAI docs to confirm: + +- canonical model ID +- supported modalities +- context window +- max output tokens when available +- reasoning effort support, if explicitly documented + +### Anthropic + +Primary sources: + +- `https://docs.anthropic.com/` +- Claude model overview / model reference pages under official Anthropic docs + +Use Anthropic docs to confirm: + +- canonical model ID or stable alias +- context window +- image support +- current family naming + +### Gemini + +Primary sources: + +- `https://ai.google.dev/gemini-api/docs/models` +- other official Gemini API docs under `https://ai.google.dev/` + +Use Gemini docs to confirm: + +- canonical model ID +- context window +- multimodal support +- stable alias vs dated preview alias + +## Mapping rules + +When converting provider docs into `ModelSettings`: + +- `displayName`: use the public product name shown in provider docs +- `supportsVision`: `true` only when image input is supported +- `supportsTemperature`: set from explicit provider compatibility guidance when documented; otherwise use the most conservative safe value +- `maxInputTokens`: use the published context window +- `reasoning`: include only when the provider explicitly documents supported reasoning levels, or when the setting is intentionally inherited from the base model family and that inheritance is obvious + +## Exclusions + +Do not add: + +- deprecated models +- dated aliases when a stable alias exists +- snapshots unless the repository deliberately decides to pin them +- audio-only, image-only, realtime-only, TTS, embedding, moderation, or other specialized endpoint models unless the catalog explicitly expands scope + +## Validation + +Before committing: + +- run `jq empty models/*.json` and, if applicable, `jq empty providers/**/*.json` +- keep `README.md` in sync with the supported models actually present in the repository From 1bb8ce1c454ce7e5a3be6cdb2e3fe37fbdb14528 Mon Sep 17 00:00:00 2001 From: Alex Kuleshov Date: Mon, 23 Mar 2026 13:31:13 -0400 Subject: [PATCH 9/9] chore(github): add commit message guard --- .github/workflows/commit-message-check.yml | 71 ++++++++++++++++++++++ AGENTS.md | 39 ++++++++++++ 2 files changed, 110 insertions(+) create mode 100644 .github/workflows/commit-message-check.yml diff --git a/.github/workflows/commit-message-check.yml b/.github/workflows/commit-message-check.yml new file mode 100644 index 0000000..bd91375 --- /dev/null +++ b/.github/workflows/commit-message-check.yml @@ -0,0 +1,71 @@ +name: Commit Message Check + +on: + pull_request: + types: + - opened + - synchronize + - reopened + - edited + push: + branches-ignore: + - main + +jobs: + conventional-commits: + name: conventional-commits + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Validate commit messages + env: + EVENT_NAME: ${{ github.event_name }} + BEFORE_SHA: ${{ github.event.before }} + AFTER_SHA: ${{ github.sha }} + BASE_SHA: ${{ github.event.pull_request.base.sha }} + HEAD_SHA: ${{ github.event.pull_request.head.sha }} + run: | + set -euo pipefail + + regex='^(feat|fix|refactor|test|docs|chore|perf|style|revert)(\([a-z0-9][a-z0-9._/-]*\))?(!)?: [^ ].*[^.]$' + + if [ "$EVENT_NAME" = "pull_request" ]; then + range="${BASE_SHA}..${HEAD_SHA}" + else + if [ "${BEFORE_SHA}" = "0000000000000000000000000000000000000000" ]; then + range="${AFTER_SHA}" + else + range="${BEFORE_SHA}..${AFTER_SHA}" + fi + fi + + echo "Validating commits in range: ${range}" + + invalid=0 + + while IFS=$'\t' read -r sha subject; do + if [ -z "${sha}" ]; then + continue + fi + + if [[ "${subject}" =~ ^Merge[[:space:]] ]]; then + continue + fi + + if [[ ! "${subject}" =~ ${regex} ]]; then + echo "Invalid commit message: ${sha} ${subject}" + invalid=1 + fi + done < <(git log --format='%H%x09%s' "${range}") + + if [ "${invalid}" -ne 0 ]; then + echo "Commit message validation failed. Use Conventional Commits." + exit 1 + fi + + echo "All commit messages passed Conventional Commit validation." diff --git a/AGENTS.md b/AGENTS.md index 004fad3..d00b7b9 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -82,3 +82,42 @@ Before committing: - run `jq empty models/*.json` and, if applicable, `jq empty providers/**/*.json` - keep `README.md` in sync with the supported models actually present in the repository + +## Git workflow + +- Direct pushes to `main` are prohibited. +- All changes must go through a feature branch and Pull Request. +- Keep commits focused and reviewable. + +## Commit messages + +Use Conventional Commits. + +Format: + +`[optional scope]: ` + +Allowed types: + +- `feat` +- `fix` +- `refactor` +- `test` +- `docs` +- `chore` +- `perf` +- `style` +- `revert` + +Rules: + +- use imperative mood +- keep the subject concise +- do not end the subject with a period +- use a scope when it improves clarity, for example `models`, `readme`, `registry`, or `github` + +Examples: + +- `feat(models): add gpt-5.3-codex defaults` +- `docs(readme): update supported models table` +- `chore(github): add commit message workflow`