From 3fa220ab9221219ea5ea7bc71b6a5266c9beb676 Mon Sep 17 00:00:00 2001 From: Romulo Quidute Filho Date: Tue, 16 Dec 2025 20:48:26 +0000 Subject: [PATCH 01/13] Add sort_order to GET test_run_execution --- .../api_v1/endpoints/test_run_executions.py | 4 ++ app/crud/crud_test_run_execution.py | 9 +++- .../api/api_v1/test_test_run_executions.py | 1 + app/tests/crud/test_test_run_execution.py | 47 +++++++++++++++++++ 4 files changed, 60 insertions(+), 1 deletion(-) diff --git a/app/api/api_v1/endpoints/test_run_executions.py b/app/api/api_v1/endpoints/test_run_executions.py index d4de30a5..619e626d 100644 --- a/app/api/api_v1/endpoints/test_run_executions.py +++ b/app/api/api_v1/endpoints/test_run_executions.py @@ -63,6 +63,7 @@ def read_test_run_executions( search_query: Optional[str] = None, skip: int = 0, limit: int = 100, + sort_order: str = "asc", ) -> list[schemas.TestRunExecutionWithStats]: """Retrieve test runs, including statistics. @@ -72,6 +73,8 @@ def read_test_run_executions( test runs only, when false only non-archived test runs are returned. skip: Pagination offset. limit: Max number of records to return. + sort_order: Sort order for results. Either "asc" or "desc". Defaults to "asc". + Results are sorted by ID. Returns: List of test runs with execution statistics. @@ -83,6 +86,7 @@ def read_test_run_executions( search_query=search_query, skip=skip, limit=limit, + sort_order=sort_order, ) diff --git a/app/crud/crud_test_run_execution.py b/app/crud/crud_test_run_execution.py index 7f0401b3..1a4dc056 100644 --- a/app/crud/crud_test_run_execution.py +++ b/app/crud/crud_test_run_execution.py @@ -62,6 +62,7 @@ def get_multi( archived: Optional[bool] = False, search_query: Optional[str] = None, order_by: Optional[str] = None, + sort_order: Optional[str] = "asc", skip: Optional[int] = 0, limit: Optional[int] = 100, ) -> Sequence[TestRunExecution]: @@ -85,7 +86,11 @@ def get_multi( ) if order_by is None: - query = query.order_by(self.model.id) + # Default to ordering by id with specified sort order + if sort_order == "desc": + query = query.order_by(self.model.id.desc()) + else: + query = query.order_by(self.model.id.asc()) else: query = query.order_by(order_by) @@ -101,6 +106,7 @@ def get_multi_with_stats( archived: Optional[bool] = False, search_query: Optional[str] = None, order_by: Optional[str] = None, + sort_order: Optional[str] = "asc", skip: Optional[int] = 0, limit: Optional[int] = 100, ) -> List[TestRunExecutionWithStats]: @@ -110,6 +116,7 @@ def get_multi_with_stats( archived=archived, search_query=search_query, order_by=order_by, + sort_order=sort_order, skip=skip, limit=limit, ) diff --git a/app/tests/api/api_v1/test_test_run_executions.py b/app/tests/api/api_v1/test_test_run_executions.py index e33aba25..622c7598 100644 --- a/app/tests/api/api_v1/test_test_run_executions.py +++ b/app/tests/api/api_v1/test_test_run_executions.py @@ -1958,3 +1958,4 @@ def test_create_cli_test_run_execution_updates_existing_project_config_only_no_p # PICS should be set to empty PICS object when empty dict is provided assert project_update.pics is not None assert project_update.pics.clusters == {} + \ No newline at end of file diff --git a/app/tests/crud/test_test_run_execution.py b/app/tests/crud/test_test_run_execution.py index f0a5c82b..9f632043 100644 --- a/app/tests/crud/test_test_run_execution.py +++ b/app/tests/crud/test_test_run_execution.py @@ -809,3 +809,50 @@ def test_import_execution_success_without_test_config() -> None: assert imported_test_run.project_id == project_id assert imported_test_run.title == test_run_execution_dict.get("title") assert imported_test_run.operator_id == operator_id + + +def test_get_test_run_executions_sort_order(db: Session) -> None: + """Test that sort_order parameter correctly orders test run executions by id.""" + project = create_random_project(db, config={}) + + # Create multiple test run executions + test_runs = [] + for i in range(3): + test_run = create_random_test_run_execution(db, project_id=project.id) + test_runs.append(test_run) + + # Test ascending order (default) + test_run_executions_asc = crud.test_run_execution.get_multi_with_stats( + db, project_id=project.id, sort_order="asc" + ) + + # Test descending order + test_run_executions_desc = crud.test_run_execution.get_multi_with_stats( + db, project_id=project.id, sort_order="desc" + ) + + # Verify we have all test runs + assert len(test_run_executions_asc) >= 3 + assert len(test_run_executions_desc) >= 3 + + # Get the IDs of our created test runs + created_ids = [tr.id for tr in test_runs] + + # Filter to only our test runs for verification + asc_our_runs = [tre for tre in test_run_executions_asc if tre.id in created_ids] + desc_our_runs = [tre for tre in test_run_executions_desc if tre.id in created_ids] + + # Sort by id for verification + asc_our_runs.sort(key=lambda x: x.id) + desc_our_runs.sort(key=lambda x: x.id, reverse=True) + + # Verify ascending order - lowest ID first + assert asc_our_runs[0].id <= asc_our_runs[1].id <= asc_our_runs[2].id + + # Verify descending order - highest ID first + assert desc_our_runs[0].id >= desc_our_runs[1].id >= desc_our_runs[2].id + + # Verify the orders are actually different (reversed) + asc_ids = [tr.id for tr in asc_our_runs] + desc_ids = [tr.id for tr in desc_our_runs] + assert asc_ids == list(reversed(desc_ids)) From 5f2a2204133fc999fc9e26d6bfa7967e3f640912 Mon Sep 17 00:00:00 2001 From: Romulo Quidute Filho Date: Wed, 17 Dec 2025 20:45:26 +0000 Subject: [PATCH 02/13] Add support to sort results and retrive all records from test_run_execution --- .../api_v1/endpoints/test_run_executions.py | 2 +- app/crud/crud_test_run_execution.py | 6 +++- .../api/api_v1/test_test_run_executions.py | 36 ++++++++++++++++++- app/tests/crud/test_test_run_execution.py | 33 +++++++++++++++++ 4 files changed, 74 insertions(+), 3 deletions(-) diff --git a/app/api/api_v1/endpoints/test_run_executions.py b/app/api/api_v1/endpoints/test_run_executions.py index 619e626d..363f1330 100644 --- a/app/api/api_v1/endpoints/test_run_executions.py +++ b/app/api/api_v1/endpoints/test_run_executions.py @@ -72,7 +72,7 @@ def read_test_run_executions( archived: Get archived test runs, when true will return archived test runs only, when false only non-archived test runs are returned. skip: Pagination offset. - limit: Max number of records to return. + limit: Max number of records to return. Set to 0 to return all results. sort_order: Sort order for results. Either "asc" or "desc". Defaults to "asc". Results are sorted by ID. diff --git a/app/crud/crud_test_run_execution.py b/app/crud/crud_test_run_execution.py index 1a4dc056..720229f8 100644 --- a/app/crud/crud_test_run_execution.py +++ b/app/crud/crud_test_run_execution.py @@ -94,7 +94,11 @@ def get_multi( else: query = query.order_by(order_by) - query = query.offset(skip).limit(limit) + query = query.offset(skip) + + # If limit is 0, return all results without limit + if limit != 0: + query = query.limit(limit) return db.scalars(query).all() diff --git a/app/tests/api/api_v1/test_test_run_executions.py b/app/tests/api/api_v1/test_test_run_executions.py index 622c7598..10e60597 100644 --- a/app/tests/api/api_v1/test_test_run_executions.py +++ b/app/tests/api/api_v1/test_test_run_executions.py @@ -939,6 +939,41 @@ def test_read_multiple_test_run_executions_with_search_query( assert not any(test_run.get("id") == test_run_execution.id for test_run in content) +def test_read_multiple_test_run_executions_with_limit_zero_returns_all( + client: TestClient, db: Session +) -> None: + """Test that limit=0 returns all test run executions.""" + + # Create several test executions to ensure we have more than default limit + test_runs = [] + for i in range(5): + test_run = create_random_test_run_execution(db) + test_runs.append(test_run) + + # Ensure changes are committed + db.commit() + + # Test with limit=0 to get all results + response = client.get(f"{settings.API_V1_STR}/test_run_executions?limit=0") + assert response.status_code == HTTPStatus.OK + content = response.json() + assert isinstance(content, list) + + # Verify that all our created test runs are in the response + created_ids = {tr.id for tr in test_runs} + response_ids = {tr["id"] for tr in content} + + # All created test runs should be present (and potentially more from other tests) + assert created_ids.issubset( + response_ids + ), f"Created IDs {created_ids} not found in response IDs" + + # Verify we got more than the default limit (should be at least our 5 created runs) + assert ( + len(content) >= 5 + ), f"Expected at least 5 test runs with limit=0, got {len(content)}" + + def test_read_test_run_execution(client: TestClient, db: Session) -> None: # We generate a random test run for this test. # To validate that all test cases are returned in the response, @@ -1958,4 +1993,3 @@ def test_create_cli_test_run_execution_updates_existing_project_config_only_no_p # PICS should be set to empty PICS object when empty dict is provided assert project_update.pics is not None assert project_update.pics.clusters == {} - \ No newline at end of file diff --git a/app/tests/crud/test_test_run_execution.py b/app/tests/crud/test_test_run_execution.py index 9f632043..6a8e4262 100644 --- a/app/tests/crud/test_test_run_execution.py +++ b/app/tests/crud/test_test_run_execution.py @@ -856,3 +856,36 @@ def test_get_test_run_executions_sort_order(db: Session) -> None: asc_ids = [tr.id for tr in asc_our_runs] desc_ids = [tr.id for tr in desc_our_runs] assert asc_ids == list(reversed(desc_ids)) + + +def test_get_test_run_executions_limit_zero_returns_all(db: Session) -> None: + """Test that limit=0 returns all test run executions without applying limit.""" + project = create_random_project(db, config={}) + + # Create several test runs to ensure we have multiple records + test_runs = [] + for i in range(5): + test_run = create_random_test_run_execution(db, project_id=project.id) + test_runs.append(test_run) + + db.commit() + + # Test with default limit (should be limited to 2) + limited_results = crud.test_run_execution.get_multi_with_stats( + db, project_id=project.id, limit=2 + ) + + # Test with limit=0 (should return all for this project) + all_results = crud.test_run_execution.get_multi_with_stats( + db, project_id=project.id, limit=0 + ) + + # Verify that limit=0 returns more results than the limited query + assert len(all_results) > len(limited_results) + assert len(limited_results) == 2 # Verify limited query worked + assert len(all_results) >= 5 # Should have at least our 5 test runs + + # Verify all our created test runs are in the unlimited results + created_ids = {tr.id for tr in test_runs} + result_ids = {tr.id for tr in all_results} + assert created_ids.issubset(result_ids) From 2c1ff57cb0cab09ca7539f0c62862917373a7a52 Mon Sep 17 00:00:00 2001 From: Romulo Quidute Filho <116586593+rquidute@users.noreply.github.com> Date: Thu, 18 Dec 2025 09:52:37 -0300 Subject: [PATCH 03/13] Update app/tests/crud/test_test_run_execution.py Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- app/tests/crud/test_test_run_execution.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/app/tests/crud/test_test_run_execution.py b/app/tests/crud/test_test_run_execution.py index 6a8e4262..c8847db0 100644 --- a/app/tests/crud/test_test_run_execution.py +++ b/app/tests/crud/test_test_run_execution.py @@ -842,19 +842,15 @@ def test_get_test_run_executions_sort_order(db: Session) -> None: asc_our_runs = [tre for tre in test_run_executions_asc if tre.id in created_ids] desc_our_runs = [tre for tre in test_run_executions_desc if tre.id in created_ids] - # Sort by id for verification - asc_our_runs.sort(key=lambda x: x.id) - desc_our_runs.sort(key=lambda x: x.id, reverse=True) - - # Verify ascending order - lowest ID first - assert asc_our_runs[0].id <= asc_our_runs[1].id <= asc_our_runs[2].id + # Verify ascending order + asc_ids = [tr.id for tr in asc_our_runs] + assert asc_ids == sorted(created_ids) - # Verify descending order - highest ID first - assert desc_our_runs[0].id >= desc_our_runs[1].id >= desc_our_runs[2].id + # Verify descending order + desc_ids = [tr.id for tr in desc_our_runs] + assert desc_ids == sorted(created_ids, reverse=True) # Verify the orders are actually different (reversed) - asc_ids = [tr.id for tr in asc_our_runs] - desc_ids = [tr.id for tr in desc_our_runs] assert asc_ids == list(reversed(desc_ids)) From 4a9f8f4806441d5812e84d19ea2b4abd2eab703a Mon Sep 17 00:00:00 2001 From: Romulo Quidute Filho <116586593+rquidute@users.noreply.github.com> Date: Thu, 18 Dec 2025 09:55:22 -0300 Subject: [PATCH 04/13] Update app/crud/crud_test_run_execution.py Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- app/crud/crud_test_run_execution.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/crud/crud_test_run_execution.py b/app/crud/crud_test_run_execution.py index 720229f8..ff5782fb 100644 --- a/app/crud/crud_test_run_execution.py +++ b/app/crud/crud_test_run_execution.py @@ -62,7 +62,7 @@ def get_multi( archived: Optional[bool] = False, search_query: Optional[str] = None, order_by: Optional[str] = None, - sort_order: Optional[str] = "asc", + sort_order: str = "asc", skip: Optional[int] = 0, limit: Optional[int] = 100, ) -> Sequence[TestRunExecution]: From 3eee5ea0667fef33e501983e55b8cc3cd70150d0 Mon Sep 17 00:00:00 2001 From: Romulo Quidute Filho <116586593+rquidute@users.noreply.github.com> Date: Thu, 18 Dec 2025 09:55:30 -0300 Subject: [PATCH 05/13] Update app/crud/crud_test_run_execution.py Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- app/crud/crud_test_run_execution.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/crud/crud_test_run_execution.py b/app/crud/crud_test_run_execution.py index ff5782fb..ff810911 100644 --- a/app/crud/crud_test_run_execution.py +++ b/app/crud/crud_test_run_execution.py @@ -110,7 +110,7 @@ def get_multi_with_stats( archived: Optional[bool] = False, search_query: Optional[str] = None, order_by: Optional[str] = None, - sort_order: Optional[str] = "asc", + sort_order: str = "asc", skip: Optional[int] = 0, limit: Optional[int] = 100, ) -> List[TestRunExecutionWithStats]: From b203e0b00c10b4628d7889531da105e50d71c7c8 Mon Sep 17 00:00:00 2001 From: Romulo Quidute Filho Date: Thu, 18 Dec 2025 13:42:42 +0000 Subject: [PATCH 06/13] Improved logic to load json file, while launching. This avoids unpredicable unit tests failing due to race condition --- app/crud/crud_test_run_execution.py | 4 +- .../models/python_test_parser.py | 68 ++++++++++++++++++- 2 files changed, 68 insertions(+), 4 deletions(-) diff --git a/app/crud/crud_test_run_execution.py b/app/crud/crud_test_run_execution.py index ff810911..9a4258ca 100644 --- a/app/crud/crud_test_run_execution.py +++ b/app/crud/crud_test_run_execution.py @@ -62,9 +62,9 @@ def get_multi( archived: Optional[bool] = False, search_query: Optional[str] = None, order_by: Optional[str] = None, - sort_order: str = "asc", skip: Optional[int] = 0, limit: Optional[int] = 100, + sort_order: str = "asc", ) -> Sequence[TestRunExecution]: query = self.select() @@ -110,9 +110,9 @@ def get_multi_with_stats( archived: Optional[bool] = False, search_query: Optional[str] = None, order_by: Optional[str] = None, - sort_order: str = "asc", skip: Optional[int] = 0, limit: Optional[int] = 100, + sort_order: str = "asc", ) -> List[TestRunExecutionWithStats]: results = self.get_multi( db=db, diff --git a/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py b/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py index 59b9434a..419d67c2 100644 --- a/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py +++ b/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py @@ -16,6 +16,7 @@ import ast import json import re +import time from pathlib import Path from typing import Any, List, Optional, Union @@ -42,6 +43,66 @@ ] +def __load_json_with_retry( + path: Path, max_retries: int = 5, delay: float = 0.2 +) -> dict: + """Load JSON file with retry logic to handle race conditions during file generation. + + Args: + path: Path to the JSON file + max_retries: Maximum number of retry attempts + delay: Delay between retries in seconds + + Returns: + dict: Parsed JSON content + + Raises: + json.JSONDecodeError: If JSON parsing fails after all retries + FileNotFoundError: If file doesn't exist after all retries + """ + last_exception = None + + for attempt in range(max_retries): + try: + with open(path, "r") as json_file: + content = json_file.read() + + # Check for empty or incomplete content + if not content.strip(): + logger.warning( + f"JSON file {path} is empty on attempt {attempt + 1}/{max_retries}" + ) + if attempt < max_retries - 1: + time.sleep(delay) + continue + else: + raise json.JSONDecodeError("File is empty", str(path), 0) + + # Try to parse JSON + return json.loads(content) + + except (json.JSONDecodeError, FileNotFoundError) as e: + last_exception = e + logger.warning( + f"Attempt {attempt + 1}/{max_retries} failed for {path}: {e}" + ) + + # If this is the last attempt, raise the exception + if attempt == max_retries - 1: + raise e + + # Wait before retrying + time.sleep(delay) + + # This should never be reached, but just in case + if last_exception: + raise last_exception + else: + raise RuntimeError( + f"Failed to load JSON from {path} after {max_retries} attempts" + ) + + def parse_python_script(path: Path) -> list[PythonTest]: """Parse a python file into a list of PythonTest models. @@ -62,8 +123,11 @@ def parse_python_script(path: Path) -> list[PythonTest]: """ python_tests: list[PythonTest] = [] - with open(path, "r") as json_file: - parsed_scripts = json.load(json_file) + try: + parsed_scripts = __load_json_with_retry(path) + except Exception as e: + logger.error(f"Failed to parse JSON file {path}: {e}") + return python_tests if len(parsed_scripts) == 0: return python_tests From 5e8ff2c58f057b79acaa29c1382762939ce66a16 Mon Sep 17 00:00:00 2001 From: Romulo Quidute Filho Date: Thu, 18 Dec 2025 13:46:19 +0000 Subject: [PATCH 07/13] Flake8 --- .../support/python_testing/models/python_test_parser.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py b/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py index 419d67c2..1b2537f6 100644 --- a/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py +++ b/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py @@ -70,7 +70,8 @@ def __load_json_with_retry( # Check for empty or incomplete content if not content.strip(): logger.warning( - f"JSON file {path} is empty on attempt {attempt + 1}/{max_retries}" + f"JSON file {path} is empty on attempt " + f"{attempt + 1}/{max_retries}" ) if attempt < max_retries - 1: time.sleep(delay) From b97ee01f1b368d3d25622c0e501037e8ac5fc0ef Mon Sep 17 00:00:00 2001 From: Romulo Quidute Filho Date: Mon, 22 Dec 2025 17:25:59 +0000 Subject: [PATCH 08/13] Code review - Mock read python test JSON file --- .../api/api_v1/test_test_run_executions.py | 6 +- app/tests/conftest.py | 76 ++++++++++++++++++- app/tests/crud/test_test_run_execution.py | 8 +- .../models/python_test_parser.py | 68 +---------------- 4 files changed, 81 insertions(+), 77 deletions(-) diff --git a/app/tests/api/api_v1/test_test_run_executions.py b/app/tests/api/api_v1/test_test_run_executions.py index 10e60597..905d99dc 100644 --- a/app/tests/api/api_v1/test_test_run_executions.py +++ b/app/tests/api/api_v1/test_test_run_executions.py @@ -946,7 +946,7 @@ def test_read_multiple_test_run_executions_with_limit_zero_returns_all( # Create several test executions to ensure we have more than default limit test_runs = [] - for i in range(5): + for i in range(105): test_run = create_random_test_run_execution(db) test_runs.append(test_run) @@ -970,8 +970,8 @@ def test_read_multiple_test_run_executions_with_limit_zero_returns_all( # Verify we got more than the default limit (should be at least our 5 created runs) assert ( - len(content) >= 5 - ), f"Expected at least 5 test runs with limit=0, got {len(content)}" + len(content) >= 105 + ), f"Expected at least 105 test runs with limit=0, got {len(content)}" def test_read_test_run_execution(client: TestClient, db: Session) -> None: diff --git a/app/tests/conftest.py b/app/tests/conftest.py index 078d6cc5..e4c34952 100644 --- a/app/tests/conftest.py +++ b/app/tests/conftest.py @@ -15,10 +15,12 @@ # import asyncio import contextlib +import json import sys from importlib import import_module from typing import AsyncGenerator, Generator from unittest import mock +from unittest.mock import patch import pytest import pytest_asyncio @@ -123,25 +125,91 @@ def block_on_serial_marker(request: pytest.FixtureRequest) -> Generator: By default, test_script_manager does not discover all test collections including unit tests. Make sure we discover all test collections here. """ + # Initialize Python tests synchronously for test environment try: - from test_collections.matter.sdk_tests.support.python_testing import ( - initialize_python_tests_sync, - ) + # Apply JSON mocking for dynamically generated files to prevent race conditions + import json + import pytest + from unittest.mock import patch, MagicMock + + # Create a mock that returns valid JSON for dynamic files, original for static files + original_json_load = json.load + + def mock_json_load(fp): + """Smart mock that handles dynamic vs static JSON files differently.""" + filename = getattr(fp, "name", str(fp)) + + # Mock the problematic dynamically generated JSON files + if any( + name in filename + for name in [ + "python_tests_info.json", + "custom_python_tests_info.json", + "sdk_python_tests_info.json", + ] + ) and not any(static in filename for static in ["test_python_script"]): + return { + "sdk_sha": "mock_sha_for_tests", + "tests": [], # Empty tests to prevent processing + } + + # Use original json.load for static test files and other JSON files + return original_json_load(fp) + + # Apply the patch globally during test initialization + with patch("json.load", side_effect=mock_json_load): + from test_collections.matter.sdk_tests.support.python_testing import ( + initialize_python_tests_sync, + ) + + initialize_python_tests_sync() - initialize_python_tests_sync() except ImportError: # Python testing module not available (e.g., DRY_RUN mode) pass except Exception as e: # Log the error but don't fail tests - some tests may not need Python collections print(f"Warning: Failed to initialize Python test collections for tests: {e}") + # Continue anyway - the mock should prevent most issues test_script_manager.test_script_manager.test_collections = discover_test_collections( disabled_collections=[] ) +@pytest.fixture(scope="session", autouse=True) +def mock_json_loading(): + """Session-scoped fixture to mock JSON loading globally for all tests to prevent + race conditions.""" + + original_json_load = json.load + + def safe_json_load(fp): + """Mock json.load to return safe data for dynamic files.""" + filename = getattr(fp, "name", str(fp)) + + # Mock problematic dynamically generated JSON files + if any( + name in filename + for name in [ + "python_tests_info.json", + "custom_python_tests_info.json", + "sdk_python_tests_info.json", + ] + ) and not any(static in filename for static in ["test_python_script"]): + return { + "sdk_sha": "mock_sha_for_tests", + "tests": [], # Return empty to avoid processing + } + + # Use original for everything else + return original_json_load(fp) + + with patch("json.load", side_effect=safe_json_load): + yield + + @contextlib.contextmanager def use_real_sdk_container() -> Generator: """Context manager to temporarily use the real SDKContainer""" diff --git a/app/tests/crud/test_test_run_execution.py b/app/tests/crud/test_test_run_execution.py index c8847db0..49b540e7 100644 --- a/app/tests/crud/test_test_run_execution.py +++ b/app/tests/crud/test_test_run_execution.py @@ -831,10 +831,6 @@ def test_get_test_run_executions_sort_order(db: Session) -> None: db, project_id=project.id, sort_order="desc" ) - # Verify we have all test runs - assert len(test_run_executions_asc) >= 3 - assert len(test_run_executions_desc) >= 3 - # Get the IDs of our created test runs created_ids = [tr.id for tr in test_runs] @@ -853,6 +849,10 @@ def test_get_test_run_executions_sort_order(db: Session) -> None: # Verify the orders are actually different (reversed) assert asc_ids == list(reversed(desc_ids)) + # Verify we have all test runs + assert len(asc_ids) == 3 + assert len(desc_ids) == 3 + def test_get_test_run_executions_limit_zero_returns_all(db: Session) -> None: """Test that limit=0 returns all test run executions without applying limit.""" diff --git a/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py b/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py index 1b2537f6..bae00944 100644 --- a/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py +++ b/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py @@ -43,67 +43,6 @@ ] -def __load_json_with_retry( - path: Path, max_retries: int = 5, delay: float = 0.2 -) -> dict: - """Load JSON file with retry logic to handle race conditions during file generation. - - Args: - path: Path to the JSON file - max_retries: Maximum number of retry attempts - delay: Delay between retries in seconds - - Returns: - dict: Parsed JSON content - - Raises: - json.JSONDecodeError: If JSON parsing fails after all retries - FileNotFoundError: If file doesn't exist after all retries - """ - last_exception = None - - for attempt in range(max_retries): - try: - with open(path, "r") as json_file: - content = json_file.read() - - # Check for empty or incomplete content - if not content.strip(): - logger.warning( - f"JSON file {path} is empty on attempt " - f"{attempt + 1}/{max_retries}" - ) - if attempt < max_retries - 1: - time.sleep(delay) - continue - else: - raise json.JSONDecodeError("File is empty", str(path), 0) - - # Try to parse JSON - return json.loads(content) - - except (json.JSONDecodeError, FileNotFoundError) as e: - last_exception = e - logger.warning( - f"Attempt {attempt + 1}/{max_retries} failed for {path}: {e}" - ) - - # If this is the last attempt, raise the exception - if attempt == max_retries - 1: - raise e - - # Wait before retrying - time.sleep(delay) - - # This should never be reached, but just in case - if last_exception: - raise last_exception - else: - raise RuntimeError( - f"Failed to load JSON from {path} after {max_retries} attempts" - ) - - def parse_python_script(path: Path) -> list[PythonTest]: """Parse a python file into a list of PythonTest models. @@ -124,11 +63,8 @@ def parse_python_script(path: Path) -> list[PythonTest]: """ python_tests: list[PythonTest] = [] - try: - parsed_scripts = __load_json_with_retry(path) - except Exception as e: - logger.error(f"Failed to parse JSON file {path}: {e}") - return python_tests + with open(path, "r") as json_file: + parsed_scripts = json.load(json_file) if len(parsed_scripts) == 0: return python_tests From 308939e02b919d9ccd0efc41d77ba4333f516e3b Mon Sep 17 00:00:00 2001 From: Romulo Quidute Filho Date: Mon, 22 Dec 2025 17:28:09 +0000 Subject: [PATCH 09/13] Code review - organize imports and remove useless code --- app/tests/conftest.py | 7 +------ .../support/python_testing/models/python_test_parser.py | 1 - 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/app/tests/conftest.py b/app/tests/conftest.py index e4c34952..7eaa1cdd 100644 --- a/app/tests/conftest.py +++ b/app/tests/conftest.py @@ -20,7 +20,7 @@ from importlib import import_module from typing import AsyncGenerator, Generator from unittest import mock -from unittest.mock import patch +from unittest.mock import MagicMock, patch import pytest import pytest_asyncio @@ -129,10 +129,6 @@ def block_on_serial_marker(request: pytest.FixtureRequest) -> Generator: # Initialize Python tests synchronously for test environment try: # Apply JSON mocking for dynamically generated files to prevent race conditions - import json - import pytest - from unittest.mock import patch, MagicMock - # Create a mock that returns valid JSON for dynamic files, original for static files original_json_load = json.load @@ -171,7 +167,6 @@ def mock_json_load(fp): except Exception as e: # Log the error but don't fail tests - some tests may not need Python collections print(f"Warning: Failed to initialize Python test collections for tests: {e}") - # Continue anyway - the mock should prevent most issues test_script_manager.test_script_manager.test_collections = discover_test_collections( disabled_collections=[] diff --git a/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py b/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py index bae00944..59b9434a 100644 --- a/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py +++ b/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py @@ -16,7 +16,6 @@ import ast import json import re -import time from pathlib import Path from typing import Any, List, Optional, Union From 6770d9841ebabfe4785d65a6945f4db515c7db91 Mon Sep 17 00:00:00 2001 From: Romulo Quidute Filho Date: Mon, 22 Dec 2025 17:48:42 +0000 Subject: [PATCH 10/13] Code review - flake8 --- app/tests/api/api_v1/test_test_run_executions.py | 4 ++-- app/tests/conftest.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/app/tests/api/api_v1/test_test_run_executions.py b/app/tests/api/api_v1/test_test_run_executions.py index 905d99dc..e7e5c615 100644 --- a/app/tests/api/api_v1/test_test_run_executions.py +++ b/app/tests/api/api_v1/test_test_run_executions.py @@ -968,9 +968,9 @@ def test_read_multiple_test_run_executions_with_limit_zero_returns_all( response_ids ), f"Created IDs {created_ids} not found in response IDs" - # Verify we got more than the default limit (should be at least our 5 created runs) + # Verify we got exactly 105 test runs assert ( - len(content) >= 105 + len(content) == 105 ), f"Expected at least 105 test runs with limit=0, got {len(content)}" diff --git a/app/tests/conftest.py b/app/tests/conftest.py index 7eaa1cdd..c0c1cfab 100644 --- a/app/tests/conftest.py +++ b/app/tests/conftest.py @@ -20,7 +20,7 @@ from importlib import import_module from typing import AsyncGenerator, Generator from unittest import mock -from unittest.mock import MagicMock, patch +from unittest.mock import patch import pytest import pytest_asyncio @@ -132,7 +132,7 @@ def block_on_serial_marker(request: pytest.FixtureRequest) -> Generator: # Create a mock that returns valid JSON for dynamic files, original for static files original_json_load = json.load - def mock_json_load(fp): + def mock_json_load(fp) -> dict: """Smart mock that handles dynamic vs static JSON files differently.""" filename = getattr(fp, "name", str(fp)) @@ -174,13 +174,13 @@ def mock_json_load(fp): @pytest.fixture(scope="session", autouse=True) -def mock_json_loading(): +def mock_json_loading() -> Generator: """Session-scoped fixture to mock JSON loading globally for all tests to prevent race conditions.""" original_json_load = json.load - def safe_json_load(fp): + def safe_json_load(fp) -> dict: """Mock json.load to return safe data for dynamic files.""" filename = getattr(fp, "name", str(fp)) From 19f5d828b4a7cd2b797a935448e099fb36d5ce49 Mon Sep 17 00:00:00 2001 From: Romulo Quidute Filho Date: Mon, 22 Dec 2025 17:51:29 +0000 Subject: [PATCH 11/13] Code review - flake8 --- app/tests/conftest.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/app/tests/conftest.py b/app/tests/conftest.py index c0c1cfab..5c9a46f9 100644 --- a/app/tests/conftest.py +++ b/app/tests/conftest.py @@ -18,7 +18,7 @@ import json import sys from importlib import import_module -from typing import AsyncGenerator, Generator +from typing import Any, AsyncGenerator, Generator from unittest import mock from unittest.mock import patch @@ -132,7 +132,7 @@ def block_on_serial_marker(request: pytest.FixtureRequest) -> Generator: # Create a mock that returns valid JSON for dynamic files, original for static files original_json_load = json.load - def mock_json_load(fp) -> dict: + def mock_json_load(fp: Any) -> dict: """Smart mock that handles dynamic vs static JSON files differently.""" filename = getattr(fp, "name", str(fp)) @@ -180,7 +180,7 @@ def mock_json_loading() -> Generator: original_json_load = json.load - def safe_json_load(fp) -> dict: + def safe_json_load(fp: Any) -> dict: """Mock json.load to return safe data for dynamic files.""" filename = getattr(fp, "name", str(fp)) From 9ecf018be181b16b5913750e16eec99de987bbb2 Mon Sep 17 00:00:00 2001 From: Romulo Quidute Filho Date: Mon, 22 Dec 2025 17:59:32 +0000 Subject: [PATCH 12/13] Code review - fix unit test --- app/tests/api/api_v1/test_test_run_executions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/tests/api/api_v1/test_test_run_executions.py b/app/tests/api/api_v1/test_test_run_executions.py index e7e5c615..68a0b8e9 100644 --- a/app/tests/api/api_v1/test_test_run_executions.py +++ b/app/tests/api/api_v1/test_test_run_executions.py @@ -968,9 +968,9 @@ def test_read_multiple_test_run_executions_with_limit_zero_returns_all( response_ids ), f"Created IDs {created_ids} not found in response IDs" - # Verify we got exactly 105 test runs + # Verify be at least 105 created runs assert ( - len(content) == 105 + len(content) >= 105 ), f"Expected at least 105 test runs with limit=0, got {len(content)}" From 57a6abc196214c18a81f88ca532b3507c082b6c7 Mon Sep 17 00:00:00 2001 From: Romulo Quidute Filho Date: Tue, 23 Dec 2025 23:04:07 +0000 Subject: [PATCH 13/13] Code revier --- app/crud/crud_test_run_execution.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/app/crud/crud_test_run_execution.py b/app/crud/crud_test_run_execution.py index 9a4258ca..f7713842 100644 --- a/app/crud/crud_test_run_execution.py +++ b/app/crud/crud_test_run_execution.py @@ -92,7 +92,12 @@ def get_multi( else: query = query.order_by(self.model.id.asc()) else: - query = query.order_by(order_by) + # Apply sort_order to the specified order_by column + column = getattr(self.model, order_by) + if sort_order == "desc": + query = query.order_by(column.desc()) + else: + query = query.order_by(column.asc()) query = query.offset(skip)