Skip to content

Commit b593ce2

Browse files
committed
Adapted to pytest framework
1 parent 13e89a3 commit b593ce2

File tree

4 files changed

+80
-178
lines changed

4 files changed

+80
-178
lines changed

test/common/llmperf/run_inference.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ def inference_results():
128128
server_url = config.get("llm_connection", {}).get("server_url", "")
129129
tokenizer_path = config.get("llm_connection", {}).get("tokenizer_path", "")
130130
test_cases = config.get("llmperf_test_cases", [])
131-
timestamp_dir = Path("result_outputs")
131+
timestamp_dir = Path("results")
132132
timestamp_dir.mkdir(parents=True, exist_ok=True)
133133
print(f"[INFO] Created results directory: {timestamp_dir}")
134134

test/common/llmperf/utils/token_benchmark.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -353,8 +353,14 @@ def run_token_benchmark(
353353
elif not results_dir.is_dir():
354354
raise ValueError(f"{results_dir} is not a directory")
355355

356+
llmperf_dir = results_dir / "llmperf"
357+
if not llmperf_dir.exists():
358+
llmperf_dir.mkdir(parents=True)
359+
elif not llmperf_dir.is_dir():
360+
raise ValueError(f"{llmperf_dir} is not a directory")
361+
356362
try:
357-
with open(results_dir / f"{summary_filename}.json", "w") as f:
363+
with open(llmperf_dir / f"{summary_filename}.json", "w") as f:
358364
json.dump(results.to_dict(), f, indent=4, default=str)
359365
except Exception as e:
360366
print(results.to_dict())

test/config.yaml

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,4 +15,19 @@ database:
1515
name: "ucm_pytest"
1616
user: "root"
1717
password: "123456"
18-
charset: "utf8mb4"
18+
charset: "utf8mb4"
19+
20+
# LLM Connection Configuration
21+
llm_connection:
22+
model: "qwen3"
23+
server_url: "http://141.111.32.70:9382"
24+
tokenizer_path: "/home/models/QwQ-32B"
25+
26+
# Performance Test Configuration
27+
llmperf_test_cases:
28+
- mean_input_tokens: 6000
29+
mean_output_tokens: 200
30+
max_num_completed_requests: 16
31+
concurrent_requests: 8
32+
additional_sampling_params: "{}"
33+
hit_rate: 0
Lines changed: 56 additions & 175 deletions
Original file line numberDiff line numberDiff line change
@@ -1,185 +1,66 @@
1-
# tests/test_demo.py
21
import pytest
3-
import allure
2+
from common.config_utils import config_utils as config_instance
43

5-
@pytest.mark.stage(1)
6-
@pytest.mark.feature("mark")
7-
@pytest.mark.platform("gpu")
8-
def test_gpu_smoke():
9-
assert 1 == 1
104

11-
@pytest.mark.stage(1)
12-
@pytest.mark.feature("mark")
13-
def test_regress_accuracy():
14-
assert 2 + 2 <= 5
5+
# ---------------- Fixture Example ----------------
6+
class Calculator:
7+
def __init__(self):
8+
print("[Calculator Initialization]")
9+
pass
10+
11+
def add(self, a, b):
12+
return a + b
13+
14+
def divide(self, a, b):
15+
if b == 0:
16+
raise ZeroDivisionError("Cannot divide by zero")
17+
return a / b
18+
19+
20+
@pytest.fixture(scope="module", name="calc")
21+
def calculator():
22+
return Calculator()
1523

16-
@pytest.mark.stage(1)
17-
@pytest.mark.feature("mark")
18-
@pytest.mark.platform("npu")
19-
def test_performance_accuracy():
20-
assert 2 + 2 <= 5
2124

22-
# Example of new mark
2325
@pytest.mark.feature("mark")
24-
@pytest.mark.reliability("high")
25-
def test_llm_reliability():
26-
assert True
26+
class TestCalculator:
27+
# The calc instance will only be initialized on the first call, see the pytest documentation for more usage
28+
def test_add(self, calc):
29+
assert calc.add(1, 2) == 3
2730

31+
def test_divide(self, calc):
32+
assert calc.divide(6, 2) == 3
2833

29-
# Example of importing configuration file parameters
34+
def test_divide_by_zero(self, calc):
35+
with pytest.raises(ZeroDivisionError):
36+
calc.divide(6, 0)
37+
38+
39+
# ---------------- Write to DB Example ----------------
40+
from common.capture_utils import *
41+
42+
43+
@pytest.mark.feature("capture") # pytest must be the top
44+
@export_vars
45+
def test_capture_mix():
46+
"""Mixed single + lists via '_name' + '_data'"""
47+
assert 1 == 1
48+
return {
49+
"_name": "demo",
50+
"_data": {
51+
"length": 10086, # single value
52+
"accuracy": [0.1, 0.2, 0.3], # list
53+
"loss": [0.1, 0.2, 0.3], # list
54+
},
55+
}
56+
57+
58+
# ---------------- Read Config Example ----------------
3059
from common.config_utils import config_utils as config_instance
60+
61+
3162
@pytest.mark.feature("config")
32-
def test_llm_config():
33-
llm_config = config_instance.get_config("llm_connection")
34-
assert llm_config["type"] == "openai"
35-
assert config_instance.get_nested_config("llm_connection.model") == "gpt-3.5-turbo"
36-
assert config_instance.get_nested_config("llm_connection.models", "gpt-3.5-turbo") == "gpt-3.5-turbo"
37-
38-
39-
40-
# Example of using allure
41-
@pytest.mark.feature("allure1")
42-
@allure.feature('test_success')
43-
def test_success():
44-
"""this test succeeds"""
45-
assert True
46-
47-
@allure.feature('test_failure')
48-
@pytest.mark.feature("allure1")
49-
def test_failure():
50-
"""this test fails"""
51-
assert False
52-
53-
@allure.feature('test_skip')
54-
@pytest.mark.feature("allure1")
55-
def test_skip():
56-
"""this test is skipped"""
57-
pytest.skip('for a reason!')
58-
59-
@allure.feature('test_broken')
60-
@pytest.mark.feature("allure1")
61-
def test_broken():
62-
raise Exception('oops')
63-
64-
@pytest.mark.feature("allure2")
65-
@pytest.mark.parametrize('param1', ["Hello", "World"])
66-
@pytest.mark.parametrize('param2', ['Hello', "Hello"])
67-
def test_parametrize_with_two_parameters(param1, param2):
68-
assert param1 == param2
69-
70-
@pytest.mark.feature("allure3")
71-
@allure.description_html("""
72-
<h1>This is HTML description</h1>
73-
<table style="width:100%">
74-
<tr>
75-
<th>Firstname</th>
76-
<th>Lastname</th>
77-
<th>Age</th>
78-
</tr>
79-
<tr align="center">
80-
<td>jade</td>
81-
<td>mr</td>
82-
<td>18</td>
83-
</tr>
84-
<tr align="center">
85-
<td>road</td>
86-
<td>Tester</td>
87-
<td>18</td>
88-
</tr>
89-
</table>
90-
""")
91-
def test_html_description():
92-
assert True
93-
94-
@pytest.mark.feature("allure3")
95-
@allure.description("""Multi-line description""")
96-
def test_description_from_decorator():
97-
assert 42 == int(6 * 7)
98-
99-
@pytest.mark.feature("allure3")
100-
def test_unicode_in_docstring_description():
101-
"""Description can also be below the function"""
102-
assert 42 == int(6 * 7)
103-
104-
@pytest.mark.feature("allure4")
105-
@allure.title("Assert that 2+2=4")
106-
def test_with_a_title():
107-
assert 2 + 2 == 4
108-
109-
@pytest.mark.feature("allure4")
110-
@allure.title("Dynamic title: {param1} + {param2} = {expected}")
111-
@pytest.mark.parametrize('param1,param2,expected', [(2, 2, 4),(1, 2, 5)])
112-
def test_with_parameterized_title(param1, param2, expected):
113-
assert param1 + param2 == expected
114-
115-
@pytest.mark.feature("allure4")
116-
@allure.title("This is a dynamic title that will be replaced")
117-
def test_with_dynamic_title():
118-
assert 2 + 2 == 4
119-
allure.dynamic.title('Test completed, used as title')
120-
121-
122-
@pytest.mark.feature("allure5")
123-
def test_with_steps():
124-
"""Example test case with steps"""
125-
with allure.step("Step 1: Initialize variables"):
126-
a = 2
127-
b = 3
128-
129-
with allure.step("Step 2: Perform addition"):
130-
result = a + b
131-
132-
with allure.step("Step 3: Verify result"):
133-
assert result == 5
134-
135-
import tempfile
136-
import os
137-
@pytest.mark.feature("allure6")
138-
def test_with_attachment():
139-
"""Example test case with attachment"""
140-
# Create some data to attach
141-
data = "This is sample data for attachment\nLine 2\nLine 3"
142-
143-
# Attach text data
144-
allure.attach(data, name="Sample Data", attachment_type=allure.attachment_type.TEXT)
145-
146-
# Create and attach a simple file
147-
with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt') as f:
148-
f.write("Sample file content\nFor testing attachment feature")
149-
temp_file_path = f.name
150-
151-
# Attach the file
152-
allure.attach.file(temp_file_path, name="Attached File",
153-
attachment_type=allure.attachment_type.TEXT)
154-
155-
# Clean up temporary file
156-
os.unlink(temp_file_path)
157-
158-
assert True
159-
160-
@pytest.mark.feature("allure7")
161-
def test_mixed_steps_and_attachments():
162-
"""Example test case combining steps and attachments"""
163-
with allure.step("Initialize test data"):
164-
test_data = {"name": "John", "age": 30, "city": "New York"}
165-
166-
with allure.step("Convert data to JSON string"):
167-
import json
168-
json_data = json.dumps(test_data, indent=2)
169-
allure.attach(json_data, name="JSON Data", attachment_type=allure.attachment_type.JSON)
170-
171-
with allure.step("Validate data"):
172-
assert test_data["name"] == "John"
173-
assert test_data["age"] == 30
174-
175-
with allure.step("Create and attach report"):
176-
report_content = f"""
177-
Test Report
178-
===========
179-
Name: {test_data['name']}
180-
Age: {test_data['age']}
181-
City: {test_data['city']}
182-
Status: PASSED
183-
"""
184-
allure.attach(report_content, name="Test Report",
185-
attachment_type=allure.attachment_type.TEXT)
63+
def test_config():
64+
assert (
65+
config_instance.get_nested_config("database.host", "localhost") == "127.0.0.1"
66+
)

0 commit comments

Comments
 (0)