Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion app/api/api_v1/endpoints/test_run_executions.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ def read_test_run_executions(
search_query: Optional[str] = None,
skip: int = 0,
limit: int = 100,
sort_order: str = "asc",
) -> list[schemas.TestRunExecutionWithStats]:
"""Retrieve test runs, including statistics.

Expand All @@ -71,7 +72,9 @@ def read_test_run_executions(
archived: Get archived test runs, when true will return archived
test runs only, when false only non-archived test runs are returned.
skip: Pagination offset.
limit: Max number of records to return.
limit: Max number of records to return. Set to 0 to return all results.
sort_order: Sort order for results. Either "asc" or "desc". Defaults to "asc".
Results are sorted by ID.

Returns:
List of test runs with execution statistics.
Expand All @@ -83,6 +86,7 @@ def read_test_run_executions(
search_query=search_query,
skip=skip,
limit=limit,
sort_order=sort_order,
)


Expand Down
22 changes: 19 additions & 3 deletions app/crud/crud_test_run_execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ def get_multi(
order_by: Optional[str] = None,
skip: Optional[int] = 0,
limit: Optional[int] = 100,
sort_order: str = "asc",
) -> Sequence[TestRunExecution]:
query = self.select()

Expand All @@ -85,11 +86,24 @@ def get_multi(
)

if order_by is None:
Copy link
Contributor

@antonio-amjr antonio-amjr Dec 24, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is the suggestion below possible to substitute this whole conditional?
What do you think?

Suggested change
if order_by is None:
column = self.model.id if order_by is None else getattr(self.model, order_by)
if sort_order == "desc":
query = query.order_by(column.desc())
else:
query = query.order_by(column.asc())

query = query.order_by(self.model.id)
# Default to ordering by id with specified sort order
if sort_order == "desc":
query = query.order_by(self.model.id.desc())
else:
query = query.order_by(self.model.id.asc())
else:
query = query.order_by(order_by)
# Apply sort_order to the specified order_by column
column = getattr(self.model, order_by)
if sort_order == "desc":
query = query.order_by(column.desc())
else:
query = query.order_by(column.asc())

query = query.offset(skip).limit(limit)
query = query.offset(skip)

# If limit is 0, return all results without limit
if limit != 0:
query = query.limit(limit)

return db.scalars(query).all()

Expand All @@ -103,13 +117,15 @@ def get_multi_with_stats(
order_by: Optional[str] = None,
skip: Optional[int] = 0,
limit: Optional[int] = 100,
sort_order: str = "asc",
) -> List[TestRunExecutionWithStats]:
results = self.get_multi(
db=db,
project_id=project_id,
archived=archived,
search_query=search_query,
order_by=order_by,
sort_order=sort_order,
skip=skip,
limit=limit,
)
Expand Down
35 changes: 35 additions & 0 deletions app/tests/api/api_v1/test_test_run_executions.py
Original file line number Diff line number Diff line change
Expand Up @@ -939,6 +939,41 @@ def test_read_multiple_test_run_executions_with_search_query(
assert not any(test_run.get("id") == test_run_execution.id for test_run in content)


def test_read_multiple_test_run_executions_with_limit_zero_returns_all(
client: TestClient, db: Session
) -> None:
"""Test that limit=0 returns all test run executions."""

# Create several test executions to ensure we have more than default limit
test_runs = []
for i in range(105):
test_run = create_random_test_run_execution(db)
test_runs.append(test_run)

# Ensure changes are committed
db.commit()

# Test with limit=0 to get all results
response = client.get(f"{settings.API_V1_STR}/test_run_executions?limit=0")
assert response.status_code == HTTPStatus.OK
content = response.json()
assert isinstance(content, list)

# Verify that all our created test runs are in the response
created_ids = {tr.id for tr in test_runs}
response_ids = {tr["id"] for tr in content}

# All created test runs should be present (and potentially more from other tests)
assert created_ids.issubset(
response_ids
), f"Created IDs {created_ids} not found in response IDs"

# Verify be at least 105 created runs
assert (
len(content) >= 105
), f"Expected at least 105 test runs with limit=0, got {len(content)}"


def test_read_test_run_execution(client: TestClient, db: Session) -> None:
# We generate a random test run for this test.
# To validate that all test cases are returned in the response,
Expand Down
73 changes: 68 additions & 5 deletions app/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,12 @@
#
import asyncio
import contextlib
import json
import sys
from importlib import import_module
from typing import AsyncGenerator, Generator
from typing import Any, AsyncGenerator, Generator
from unittest import mock
from unittest.mock import patch

import pytest
import pytest_asyncio
Expand Down Expand Up @@ -123,13 +125,42 @@ def block_on_serial_marker(request: pytest.FixtureRequest) -> Generator:
By default, test_script_manager does not discover all test collections including
unit tests. Make sure we discover all test collections here.
"""

# Initialize Python tests synchronously for test environment
try:
from test_collections.matter.sdk_tests.support.python_testing import (
initialize_python_tests_sync,
)
# Apply JSON mocking for dynamically generated files to prevent race conditions
# Create a mock that returns valid JSON for dynamic files, original for static files
original_json_load = json.load

def mock_json_load(fp: Any) -> dict:
"""Smart mock that handles dynamic vs static JSON files differently."""
filename = getattr(fp, "name", str(fp))

# Mock the problematic dynamically generated JSON files
if any(
name in filename
for name in [
"python_tests_info.json",
"custom_python_tests_info.json",
"sdk_python_tests_info.json",
]
) and not any(static in filename for static in ["test_python_script"]):
return {
"sdk_sha": "mock_sha_for_tests",
"tests": [], # Empty tests to prevent processing
}

# Use original json.load for static test files and other JSON files
return original_json_load(fp)

# Apply the patch globally during test initialization
with patch("json.load", side_effect=mock_json_load):
from test_collections.matter.sdk_tests.support.python_testing import (
initialize_python_tests_sync,
)

initialize_python_tests_sync()

initialize_python_tests_sync()
except ImportError:
# Python testing module not available (e.g., DRY_RUN mode)
pass
Expand All @@ -142,6 +173,38 @@ def block_on_serial_marker(request: pytest.FixtureRequest) -> Generator:
)


@pytest.fixture(scope="session", autouse=True)
def mock_json_loading() -> Generator:
"""Session-scoped fixture to mock JSON loading globally for all tests to prevent
race conditions."""

original_json_load = json.load

def safe_json_load(fp: Any) -> dict:
"""Mock json.load to return safe data for dynamic files."""
filename = getattr(fp, "name", str(fp))

# Mock problematic dynamically generated JSON files
if any(
name in filename
for name in [
"python_tests_info.json",
"custom_python_tests_info.json",
"sdk_python_tests_info.json",
]
) and not any(static in filename for static in ["test_python_script"]):
return {
"sdk_sha": "mock_sha_for_tests",
"tests": [], # Return empty to avoid processing
}

# Use original for everything else
return original_json_load(fp)

with patch("json.load", side_effect=safe_json_load):
yield


@contextlib.contextmanager
def use_real_sdk_container() -> Generator:
"""Context manager to temporarily use the real SDKContainer"""
Expand Down
76 changes: 76 additions & 0 deletions app/tests/crud/test_test_run_execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -809,3 +809,79 @@ def test_import_execution_success_without_test_config() -> None:
assert imported_test_run.project_id == project_id
assert imported_test_run.title == test_run_execution_dict.get("title")
assert imported_test_run.operator_id == operator_id


def test_get_test_run_executions_sort_order(db: Session) -> None:
"""Test that sort_order parameter correctly orders test run executions by id."""
project = create_random_project(db, config={})

# Create multiple test run executions
test_runs = []
for i in range(3):
test_run = create_random_test_run_execution(db, project_id=project.id)
test_runs.append(test_run)

# Test ascending order (default)
test_run_executions_asc = crud.test_run_execution.get_multi_with_stats(
db, project_id=project.id, sort_order="asc"
)

# Test descending order
test_run_executions_desc = crud.test_run_execution.get_multi_with_stats(
db, project_id=project.id, sort_order="desc"
)

# Get the IDs of our created test runs
created_ids = [tr.id for tr in test_runs]

# Filter to only our test runs for verification
asc_our_runs = [tre for tre in test_run_executions_asc if tre.id in created_ids]
desc_our_runs = [tre for tre in test_run_executions_desc if tre.id in created_ids]

# Verify ascending order
asc_ids = [tr.id for tr in asc_our_runs]
assert asc_ids == sorted(created_ids)

# Verify descending order
desc_ids = [tr.id for tr in desc_our_runs]
assert desc_ids == sorted(created_ids, reverse=True)

# Verify the orders are actually different (reversed)
assert asc_ids == list(reversed(desc_ids))

# Verify we have all test runs
assert len(asc_ids) == 3
assert len(desc_ids) == 3


def test_get_test_run_executions_limit_zero_returns_all(db: Session) -> None:
"""Test that limit=0 returns all test run executions without applying limit."""
project = create_random_project(db, config={})

# Create several test runs to ensure we have multiple records
test_runs = []
for i in range(5):
test_run = create_random_test_run_execution(db, project_id=project.id)
test_runs.append(test_run)

db.commit()

# Test with default limit (should be limited to 2)
limited_results = crud.test_run_execution.get_multi_with_stats(
db, project_id=project.id, limit=2
)

# Test with limit=0 (should return all for this project)
all_results = crud.test_run_execution.get_multi_with_stats(
db, project_id=project.id, limit=0
)

# Verify that limit=0 returns more results than the limited query
assert len(all_results) > len(limited_results)
assert len(limited_results) == 2 # Verify limited query worked
assert len(all_results) >= 5 # Should have at least our 5 test runs

# Verify all our created test runs are in the unlimited results
created_ids = {tr.id for tr in test_runs}
result_ids = {tr.id for tr in all_results}
assert created_ids.issubset(result_ids)
Loading