diff --git a/src/openai_messages_token_helper/model_helper.py b/src/openai_messages_token_helper/model_helper.py index 3dc8f85..cb6e2e8 100644 --- a/src/openai_messages_token_helper/model_helper.py +++ b/src/openai_messages_token_helper/model_helper.py @@ -40,13 +40,17 @@ "gpt-5-mini": 272000, "gpt-5-nano": 272000, "gpt-5-chat": 128000, + "gpt-5.1": 272000, + "gpt-5.1-chat": 111616, + "gpt-5.2": 272000, + "gpt-5.2-chat": 111616, } AOAI_2_OAI = {"gpt-35-turbo": "gpt-3.5-turbo", "gpt-35-turbo-16k": "gpt-3.5-turbo-16k", "gpt-4v": "gpt-4-turbo-vision"} # Set of reasoning models that cannot have token usage pre-estimated -REASONING_MODELS = {"gpt-5", "gpt-5-mini", "gpt-5-nano"} +REASONING_MODELS = {"gpt-5", "gpt-5-mini", "gpt-5-nano", "gpt-5.1", "gpt-5.1-chat", "gpt-5.2"} logger = logging.getLogger("openai_messages_token_helper") diff --git a/tests/test_modelhelper.py b/tests/test_modelhelper.py index 3b79e54..bec164c 100644 --- a/tests/test_modelhelper.py +++ b/tests/test_modelhelper.py @@ -20,6 +20,10 @@ def test_get_token_limit(): assert get_token_limit("gpt-5-mini") == 272000 assert get_token_limit("gpt-5-nano") == 272000 assert get_token_limit("gpt-5-chat") == 128000 + assert get_token_limit("gpt-5.1") == 272000 + assert get_token_limit("gpt-5.1-chat") == 111616 + assert get_token_limit("gpt-5.2") == 272000 + assert get_token_limit("gpt-5.2-chat") == 111616 def test_get_token_limit_error():