Skip to content

Commit cec9975

Browse files
committed
removing TogetherAI integration in tests and using a dummy LLM response for pytest
1 parent de5345c commit cec9975

File tree

3 files changed

+5
-144
lines changed

3 files changed

+5
-144
lines changed

tests/test_reprompting_cases.py

Lines changed: 2 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,37 +1,23 @@
11
import os
22
import pytest
33
from string import Template
4-
from together import Together
54
from aimon.reprompting_api.config import RepromptingConfig
65
from aimon.reprompting_api.runner import run_reprompting_pipeline
76

8-
TOGETHER_API_KEY = os.environ.get("TOGETHER_API_KEY")
97
AIMON_API_KEY = os.environ.get("AIMON_API_KEY")
108

11-
client = Together(api_key=TOGETHER_API_KEY)
12-
139
# --- Fixtures ---
1410

1511
@pytest.fixture
1612
def my_llm():
1713
"""Mock LLM function for integration tests. Prints prompts and responses."""
1814
def _my_llm(recommended_prompt_template: Template, system_prompt, context, user_query) -> str:
19-
filled_prompt = recommended_prompt_template.substitute(
15+
filled_prompt = recommended_prompt_template.safe_substitute(
2016
system_prompt=system_prompt or "",
2117
context=context or "",
2218
user_query=user_query or ""
2319
)
24-
print("\n==== LLM PROMPT SENT ====", flush=True)
25-
print(filled_prompt, flush=True)
26-
response = client.chat.completions.create(
27-
model="mistralai/Mistral-7B-Instruct-v0.2",
28-
messages=[{"role": "user", "content": filled_prompt}],
29-
max_tokens=256,
30-
temperature=0
31-
)
32-
print("\n==== LLM RAW RESPONSE ====", flush=True)
33-
print(response.choices[0].message.content, flush=True)
34-
return response.choices[0].message.content
20+
return filled_prompt
3521
return _my_llm
3622

3723
@pytest.fixture
@@ -104,7 +90,6 @@ def print_result(test_name, result):
10490

10591
# --- Tests ---
10692

107-
@pytest.mark.integration
10893
def test_low_latency_limit(my_llm, config_low_latency):
10994
"""Test stopping behavior when latency limit is very low (100ms)."""
11095
result = run_reprompting_pipeline(
@@ -117,7 +102,6 @@ def test_low_latency_limit(my_llm, config_low_latency):
117102
print_result("Low Latency Limit Test (100ms)", result)
118103
assert "best_response" in result
119104

120-
@pytest.mark.integration
121105
def test_latency_limit(my_llm, config_high_latency):
122106
"""Test behavior with a high latency limit and contradictory instructions."""
123107
result = run_reprompting_pipeline(
@@ -130,7 +114,6 @@ def test_latency_limit(my_llm, config_high_latency):
130114
print_result("High Latency Limit Test (5000ms)", result)
131115
assert "best_response" in result
132116

133-
@pytest.mark.integration
134117
def test_iteration_limit(my_llm, config_iteration_limit):
135118
"""Test behavior when max_iterations is 1."""
136119
result = run_reprompting_pipeline(
@@ -144,7 +127,6 @@ def test_iteration_limit(my_llm, config_iteration_limit):
144127
print_result("Iteration Limit Test (no re-prompting, only 1 iteration allowed)", result)
145128
assert "best_response" in result
146129

147-
@pytest.mark.integration
148130
def test_empty_context_and_instructions(my_llm, base_config):
149131
"""Ensure pipeline works with no context, instructions, or system prompt."""
150132
result = run_reprompting_pipeline(
@@ -157,7 +139,6 @@ def test_empty_context_and_instructions(my_llm, base_config):
157139
print_result("Empty Context & Instructions Test", result)
158140
assert "best_response" in result
159141

160-
@pytest.mark.integration
161142
def test_no_telemetry(my_llm, config_without_telemetry):
162143
"""Confirm telemetry and summary are excluded when disabled in config."""
163144
result = run_reprompting_pipeline(
@@ -171,7 +152,6 @@ def test_no_telemetry(my_llm, config_without_telemetry):
171152
assert "telemetry" not in result
172153
assert "summary" not in result
173154

174-
@pytest.mark.integration
175155
def test_no_system_prompt(my_llm, base_config):
176156
"""Test behavior when system prompt is excluded."""
177157
result = run_reprompting_pipeline(

tests/test_reprompting_failures.py

Lines changed: 3 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,21 @@
11
import os
22
import pytest
33
from string import Template
4-
from together import Together
54
import aimon
65
from aimon.reprompting_api.config import RepromptingConfig
76
from aimon.reprompting_api.runner import run_reprompting_pipeline
87

9-
TOGETHER_API_KEY = os.environ.get("TOGETHER_API_KEY")
108
AIMON_API_KEY = os.environ.get("AIMON_API_KEY")
11-
client = Together(api_key=TOGETHER_API_KEY)
129

1310
# --- MOCKED LLM FUNCTIONS ---
1411
def my_llm(prompt_template: Template, system_prompt=None, context=None, user_query=None) -> str:
15-
"""Simulates a normal working LLM that returns a string response."""
16-
filled_prompt = prompt_template.substitute(
12+
"""Simulates a normal working LLM that returns a string response. Just returns filled_prompt for test"""
13+
filled_prompt = prompt_template.safe_substitute(
1714
system_prompt=system_prompt or "",
1815
context=context or "",
1916
user_query=user_query or ""
2017
)
21-
response = client.chat.completions.create(
22-
model="mistralai/Mistral-7B-Instruct-v0.2",
23-
messages=[{"role": "user", "content": filled_prompt}],
24-
max_tokens=256,
25-
temperature=0
26-
)
27-
return response.choices[0].message.content
18+
return filled_prompt
2819

2920
def llm_fn_failure(prompt_template: Template, system_prompt=None, context=None, user_query=None) -> str:
3021
"""Simulates an LLM call that fails every time."""
@@ -58,7 +49,6 @@ def get_config_with_invalid_aimon_api_key():
5849
)
5950

6051
# --- TESTS EXPECTING FAILURES ---
61-
@pytest.mark.integration
6252
def test_llm_failure():
6353
"""Should raise RuntimeError when the LLM function always fails."""
6454
config = get_config()
@@ -71,7 +61,6 @@ def test_llm_failure():
7161
user_instructions=[]
7262
)
7363

74-
@pytest.mark.integration
7564
def test_invalid_llm_fn():
7665
"""Should raise TypeError when LLM function is None."""
7766
config = get_config()
@@ -84,7 +73,6 @@ def test_invalid_llm_fn():
8473
user_instructions=[]
8574
)
8675

87-
@pytest.mark.integration
8876
def test_invalid_return_value():
8977
"""Should raise TypeError when the LLM returns a non-string value."""
9078
config = get_config()
@@ -97,7 +85,6 @@ def test_invalid_return_value():
9785
user_instructions=[]
9886
)
9987

100-
@pytest.mark.integration
10188
def test_empty_query():
10289
"""Empty query should raise a ValueError."""
10390
config = get_config()
@@ -110,7 +97,6 @@ def test_empty_query():
11097
user_instructions=[]
11198
)
11299

113-
@pytest.mark.integration
114100
def test_invalid_api_key():
115101
"""Should fail due to invalid AIMon API key."""
116102
config = get_config_with_invalid_aimon_api_key()

tests/test_reprompting_success.py

Lines changed: 0 additions & 105 deletions
This file was deleted.

0 commit comments

Comments
 (0)