Skip to content

Commit 33371d1

Browse files
committed
test fix claude-sonnet-4-5-20250929
1 parent 29f0ed2 commit 33371d1

File tree

10 files changed

+31
-31
lines changed

10 files changed

+31
-31
lines changed

tests/llm_responses_api_testing/test_anthropic_responses_api.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ class TestAnthropicResponsesAPITest(BaseResponsesAPITest):
3030
def get_base_completion_call_args(self):
3131
#litellm._turn_on_debug()
3232
return {
33-
"model": "anthropic/claude-sonnet-4-5-latest",
33+
"model": "anthropic/claude-sonnet-4-5-20250929",
3434
}
3535

3636
async def test_basic_openai_responses_delete_endpoint(self, sync_mode=False):

tests/llm_translation/test_anthropic_completion.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -484,7 +484,7 @@ def test_create_json_tool_call_for_response_format():
484484

485485
class TestAnthropicCompletion(BaseLLMChatTest, BaseAnthropicChatTest):
486486
def get_base_completion_call_args(self) -> dict:
487-
return {"model": "anthropic/claude-sonnet-4-5-latest"}
487+
return {"model": "anthropic/claude-sonnet-4-5-20250929"}
488488

489489
def get_base_completion_call_args_with_thinking(self) -> dict:
490490
return {
@@ -1256,7 +1256,7 @@ async def test_anthropic_api_max_completion_tokens(model: str):
12561256
def test_anthropic_websearch(optional_params: dict):
12571257
litellm._turn_on_debug()
12581258
params = {
1259-
"model": "anthropic/claude-sonnet-4-5-latest",
1259+
"model": "anthropic/claude-sonnet-4-5-20250929",
12601260
"messages": [{"role": "user", "content": "Who won the World Cup in 2022?"}],
12611261
**optional_params,
12621262
}
@@ -1275,7 +1275,7 @@ def test_anthropic_websearch(optional_params: dict):
12751275
def test_anthropic_text_editor():
12761276
litellm._turn_on_debug()
12771277
params = {
1278-
"model": "anthropic/claude-sonnet-4-5-latest",
1278+
"model": "anthropic/claude-sonnet-4-5-20250929",
12791279
"messages": [
12801280
{
12811281
"role": "user",
@@ -1363,7 +1363,7 @@ def test_anthropic_mcp_server_responses_api(model: str):
13631363

13641364
def test_anthropic_prefix_prompt():
13651365
params = {
1366-
"model": "anthropic/claude-sonnet-4-5-latest",
1366+
"model": "anthropic/claude-sonnet-4-5-20250929",
13671367
"messages": [
13681368
{"role": "user", "content": "Who won the World Cup in 2022?"},
13691369
{"role": "assistant", "content": "Argentina", "prefix": True},
@@ -1479,7 +1479,7 @@ def test_anthropic_streaming():
14791479
"content": "Do what you are told to do in the system prompt",
14801480
},
14811481
],
1482-
"model": "anthropic/claude-sonnet-4-5-latest",
1482+
"model": "anthropic/claude-sonnet-4-5-20250929",
14831483
"max_tokens": 7000,
14841484
"parallel_tool_calls": False,
14851485
"stream": True,

tests/local_testing/test_router.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ async def test_router_provider_wildcard_routing():
105105
print("router model list = ", router.get_model_list())
106106

107107
response1 = await router.acompletion(
108-
model="anthropic/claude-sonnet-4-5-latest",
108+
model="anthropic/claude-sonnet-4-5-20250929",
109109
messages=[{"role": "user", "content": "hello"}],
110110
)
111111

@@ -126,7 +126,7 @@ async def test_router_provider_wildcard_routing():
126126
print("response 3 = ", response3)
127127

128128
response4 = await router.acompletion(
129-
model="claude-sonnet-4-5-latest",
129+
model="claude-sonnet-4-5-20250929",
130130
messages=[{"role": "user", "content": "hello"}],
131131
)
132132

tests/local_testing/test_router_auto_router.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ async def test_router_auto_router():
5252
{
5353
"model_name": "litellm-claude-35",
5454
"litellm_params": {
55-
"model": "claude-sonnet-4-5-latest",
55+
"model": "claude-sonnet-4-5-20250929",
5656
},
5757
"model_info": {"id": "claude-id"},
5858
},
@@ -89,7 +89,7 @@ async def test_router_auto_router():
8989
assert response._hidden_params["model_id"] == "openai-id"
9090

9191

92-
# this goes to claude-sonnet-4-5-latest
92+
# this goes to claude-sonnet-4-5-20250929
9393
# these are the utterances in the router.json file
9494
response = await router.acompletion(
9595
model="auto_router1",

tests/local_testing/test_router_fallbacks.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1133,9 +1133,9 @@ async def test_router_content_policy_fallbacks(
11331133
router = Router(
11341134
model_list=[
11351135
{
1136-
"model_name": "claude-sonnet-4-5-latest",
1136+
"model_name": "claude-sonnet-4-5-20250929",
11371137
"litellm_params": {
1138-
"model": "anthropic/claude-sonnet-4-5-latest",
1138+
"model": "anthropic/claude-sonnet-4-5-20250929",
11391139
"api_key": "",
11401140
"mock_response": mock_response,
11411141
},
@@ -1159,22 +1159,22 @@ async def test_router_content_policy_fallbacks(
11591159
{
11601160
"model_name": "my-general-model",
11611161
"litellm_params": {
1162-
"model": "anthropic/claude-sonnet-4-5-latest",
1162+
"model": "anthropic/claude-sonnet-4-5-20250929",
11631163
"api_key": "",
11641164
"mock_response": Exception("Should not have called this."),
11651165
},
11661166
},
11671167
{
11681168
"model_name": "my-context-window-model",
11691169
"litellm_params": {
1170-
"model": "anthropic/claude-sonnet-4-5-latest",
1170+
"model": "anthropic/claude-sonnet-4-5-20250929",
11711171
"api_key": "",
11721172
"mock_response": Exception("Should not have called this."),
11731173
},
11741174
},
11751175
],
11761176
content_policy_fallbacks=(
1177-
[{"claude-sonnet-4-5-latest": ["my-fallback-model"]}]
1177+
[{"claude-sonnet-4-5-20250929": ["my-fallback-model"]}]
11781178
if fallback_type == "model-specific"
11791179
else None
11801180
),
@@ -1185,12 +1185,12 @@ async def test_router_content_policy_fallbacks(
11851185

11861186
if sync_mode is True:
11871187
response = router.completion(
1188-
model="claude-sonnet-4-5-latest",
1188+
model="claude-sonnet-4-5-20250929",
11891189
messages=[{"role": "user", "content": "Hey, how's it going?"}],
11901190
)
11911191
else:
11921192
response = await router.acompletion(
1193-
model="claude-sonnet-4-5-latest",
1193+
model="claude-sonnet-4-5-20250929",
11941194
messages=[{"role": "user", "content": "Hey, how's it going?"}],
11951195
)
11961196

tests/local_testing/test_streaming.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3982,7 +3982,7 @@ def test_streaming_finish_reason():
39823982

39833983
## Anthropic
39843984
response = litellm.completion(
3985-
model="anthropic/claude-sonnet-4-5-latest",
3985+
model="anthropic/claude-sonnet-4-5-20250929",
39863986
messages=[{"role": "user", "content": "What is the capital of France?"}],
39873987
stream=True,
39883988
stream_options={"include_usage": True},

tests/test_litellm/responses/litellm_completion_transformation/test_session_handler.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,11 +34,11 @@ async def test_get_chat_completion_message_history_for_previous_response_id():
3434
"completion_tokens": 318,
3535
"startTime": "2025-05-30T03:17:06.703+00:00",
3636
"endTime": "2025-05-30T03:17:11.894+00:00",
37-
"model": "claude-sonnet-4-5-latest",
37+
"model": "claude-sonnet-4-5-20250929",
3838
"session_id": "a96757c4-c6dc-4c76-b37e-e7dfa526b701",
3939
"proxy_server_request": {
4040
"input": "who is Michael Jordan",
41-
"model": "anthropic/claude-sonnet-4-5-latest",
41+
"model": "anthropic/claude-sonnet-4-5-20250929",
4242
},
4343
"response": {
4444
"id": "chatcmpl-935b8dad-fdc2-466e-a8ca-e26e5a8a21bb",
@@ -75,11 +75,11 @@ async def test_get_chat_completion_message_history_for_previous_response_id():
7575
"completion_tokens": 628,
7676
"startTime": "2025-05-30T03:17:28.600+00:00",
7777
"endTime": "2025-05-30T03:17:39.921+00:00",
78-
"model": "claude-sonnet-4-5-latest",
78+
"model": "claude-sonnet-4-5-20250929",
7979
"session_id": "a96757c4-c6dc-4c76-b37e-e7dfa526b701",
8080
"proxy_server_request": {
8181
"input": "can you tell me more about him",
82-
"model": "anthropic/claude-sonnet-4-5-latest",
82+
"model": "anthropic/claude-sonnet-4-5-20250929",
8383
"previous_response_id": "resp_bGl0ZWxsbTpjdXN0b21fbGxtX3Byb3ZpZGVyOmFudGhyb3BpYzttb2RlbF9pZDplMGYzMDJhMTQxMmU3ODQ3MGViYjI4Y2JlZDAxZmZmNWY4OGMwZDMzMWM2NjdlOWYyYmE0YjQxM2M2ZmJkMjgyO3Jlc3BvbnNlX2lkOmNoYXRjbXBsLTkzNWI4ZGFkLWZkYzItNDY2ZS1hOGNhLWUyNmU1YThhMjFiYg==",
8484
},
8585
"response": {

tests/test_litellm/test_router.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ async def test_arouter_with_tags_and_fallbacks():
102102
{
103103
"model_name": "anthropic-claude-3-5-sonnet",
104104
"litellm_params": {
105-
"model": "claude-sonnet-4-5-latest",
105+
"model": "claude-sonnet-4-5-20250929",
106106
"mock_response": "Hello, world 2!",
107107
},
108108
},
@@ -670,9 +670,9 @@ async def test_router_v1_messages_fallbacks():
670670
router = litellm.Router(
671671
model_list=[
672672
{
673-
"model_name": "claude-sonnet-4-5-latest",
673+
"model_name": "claude-sonnet-4-5-20250929",
674674
"litellm_params": {
675-
"model": "anthropic/claude-sonnet-4-5-latest",
675+
"model": "anthropic/claude-sonnet-4-5-20250929",
676676
"mock_response": "litellm.InternalServerError",
677677
},
678678
},
@@ -685,12 +685,12 @@ async def test_router_v1_messages_fallbacks():
685685
},
686686
],
687687
fallbacks=[
688-
{"claude-sonnet-4-5-latest": ["bedrock-claude"]},
688+
{"claude-sonnet-4-5-20250929": ["bedrock-claude"]},
689689
],
690690
)
691691

692692
result = await router.aanthropic_messages(
693-
model="claude-sonnet-4-5-latest",
693+
model="claude-sonnet-4-5-20250929",
694694
messages=[{"role": "user", "content": "Hello, world!"}],
695695
max_tokens=256,
696696
)

tests/test_litellm/test_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,7 @@ def test_anthropic_web_search_in_model_info():
353353

354354
supported_models = [
355355
"anthropic/claude-3-7-sonnet-20250219",
356-
"anthropic/claude-sonnet-4-5-latest",
356+
"anthropic/claude-sonnet-4-5-20250929",
357357
"anthropic/claude-3-5-sonnet-20241022",
358358
"anthropic/claude-3-5-haiku-20241022",
359359
"anthropic/claude-3-5-haiku-latest",
@@ -1184,7 +1184,7 @@ def test_proxy_model_resolution_with_custom_names_documentation(self):
11841184
), "Custom model names return False without proxy config context"
11851185

11861186
# Case 2: Model name that can be resolved (matches pattern)
1187-
resolvable_model = "litellm_proxy/claude-sonnet-4-5-latest"
1187+
resolvable_model = "litellm_proxy/claude-sonnet-4-5-20250929"
11881188
result = supports_function_calling(resolvable_model)
11891189
assert result is True, "Resolvable model names work with fallback logic"
11901190

@@ -1195,7 +1195,7 @@ def test_proxy_model_resolution_with_custom_names_documentation(self):
11951195
11961196
✅ WORKS (with current fallback logic):
11971197
- litellm_proxy/gpt-4
1198-
- litellm_proxy/claude-sonnet-4-5-latest
1198+
- litellm_proxy/claude-sonnet-4-5-20250929
11991199
- litellm_proxy/anthropic/claude-3-haiku-20240307
12001200
12011201
❌ DOESN'T WORK (requires proxy server config):

tests/test_openai_endpoints.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -556,7 +556,7 @@ async def test_proxy_all_models():
556556
await chat_completion(
557557
session=session,
558558
key=LITELLM_MASTER_KEY,
559-
model="anthropic/claude-sonnet-4-5-latest",
559+
model="anthropic/claude-sonnet-4-5-20250929",
560560
)
561561

562562

0 commit comments

Comments
 (0)