-
Notifications
You must be signed in to change notification settings - Fork 12
Open
Labels
benchmark-failureintegration test failedintegration test failed
Description
Benchmark scenario ID: whittaker
Benchmark scenario definition: https://github.com/ESA-APEx/apex_algorithms/blob/0e03f815f0f63265b2cf46df6314638606387109/algorithm_catalog/vito/whittaker/benchmark_scenarios/whittaker.json
openEO backend: openeofed.dataspace.copernicus.eu
GitHub Actions workflow run: https://github.com/ESA-APEx/apex_algorithms/actions/runs/22625599737
Workflow artifacts: https://github.com/ESA-APEx/apex_algorithms/actions/runs/22625599737#artifacts
Test start: 2026-03-03 13:40:57.888106+00:00
Test duration: 0:08:03.510007
Test outcome: ❌ failed
Last successful test phase: download-reference
Failure in test phase: compare:derived_from-change
Contact Information
| Name | Organization | Contact |
|---|---|---|
| Bram Janssen | VITO | Contact via VITO (VITO Website, GitHub) |
Process Graph
{
"saveresult1": {
"arguments": {
"data": {
"from_node": "whittaker1"
},
"format": "JSON",
"options": {}
},
"process_id": "save_result",
"result": true
},
"whittaker1": {
"arguments": {
"smoothing_lambda": 10000,
"spatial_extent": {
"coordinates": [
[
[
5.170012098271149,
51.25062964728295
],
[
5.17085904378298,
51.24882567194015
],
[
5.17857421368097,
51.2468515482926
],
[
5.178972704726344,
51.24982704376254
],
[
5.170012098271149,
51.25062964728295
]
]
],
"type": "Polygon"
},
"temporal_extent": [
"2022-01-01",
"2022-12-31"
]
},
"namespace": "https://raw.githubusercontent.com/VITObelgium/openeo_algorithm_catalog/refs/heads/main/whittaker/openeo_udp/whittaker.json",
"process_id": "whittaker"
}
}Error Logs
scenario = BenchmarkScenario(id='whittaker', description='Whittaker smoothing on Sentinel-2 NDVI time series', backend='openeofed...home/runner/work/apex_algorithms/apex_algorithms/algorithm_catalog/vito/whittaker/benchmark_scenarios/whittaker.json'))
connection_factory = <function connection_factory.<locals>.get_connection at 0x7f0de15fa520>
tmp_path = PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_whittaker_0')
track_metric = <function track_metric.<locals>.track at 0x7f0de15fa660>
track_phase = <function track_phase.<locals>.track at 0x7f0de15fa7a0>
upload_assets_on_fail = <function upload_assets_on_fail.<locals>.collect at 0x7f0de15fa840>
request = <FixtureRequest for <Function test_run_benchmark[whittaker]>>
@pytest.mark.parametrize(
"scenario",
[
# Use scenario id as parameterization id to give nicer test names.
pytest.param(uc, id=uc.id)
for uc in get_benchmark_scenarios()
],
)
def test_run_benchmark(
scenario: BenchmarkScenario,
connection_factory,
tmp_path: Path,
track_metric,
track_phase,
upload_assets_on_fail,
request,
):
track_metric("scenario_id", scenario.id)
with track_phase(phase="connect"):
# Check if a backend override has been provided via cli options.
override_backend = request.config.getoption("--override-backend")
backend_filter = request.config.getoption("--backend-filter")
if backend_filter and not re.match(backend_filter, scenario.backend):
# TODO apply filter during scenario retrieval, but seems to be hard to retrieve cli param
pytest.skip(
f"skipping scenario {scenario.id} because backend {scenario.backend} does not match filter {backend_filter!r}"
)
backend = scenario.backend
if override_backend:
_log.info(f"Overriding backend URL with {override_backend!r}")
backend = override_backend
connection: openeo.Connection = connection_factory(url=backend)
report_path = None
with track_phase(phase="create-job"):
# TODO #14 scenario option to use synchronous instead of batch job mode?
job = connection.create_job(
process_graph=scenario.process_graph,
title=f"APEx benchmark {scenario.id}",
additional=scenario.job_options,
)
track_metric("job_id", job.job_id)
if request.config.getoption("--upload-benchmark-report"):
report_path = tmp_path / "benchmark_report.json"
report_path.write_text(json.dumps({
"job_id": job.job_id,
"scenario_id": scenario.id,
"scenario_description": scenario.description,
"scenario_backend": scenario.backend,
"scenario_source": str(scenario.source) if scenario.source else None,
"reference_data": scenario.reference_data,
"reference_options": scenario.reference_options,
}, indent=2))
upload_assets_on_fail(report_path)
with track_phase(phase="run-job"):
# TODO: monitor timing and progress
# TODO: separate "job started" and run phases?
max_minutes = request.config.getoption("--maximum-job-time-in-minutes")
if max_minutes:
def _timeout_handler(signum, frame):
raise TimeoutError(
f"Batch job {job.job_id} exceeded maximum allowed time of {max_minutes} minutes"
)
old_handler = signal.signal(signal.SIGALRM, _timeout_handler)
signal.alarm(max_minutes * 60)
try:
job.start_and_wait()
finally:
if max_minutes:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
with track_phase(phase="collect-metadata"):
collect_metrics_from_job_metadata(job, track_metric=track_metric)
results = job.get_results()
collect_metrics_from_results_metadata(results, track_metric=track_metric)
with track_phase(phase="download-actual"):
# Download actual results
actual_dir = tmp_path / "actual"
paths = results.download_files(target=actual_dir, include_stac_metadata=True)
# Upload assets on failure
upload_assets_on_fail(*paths)
with track_phase(phase="download-reference"):
reference_dir = download_reference_data(
scenario=scenario, reference_dir=tmp_path / "reference"
)
if report_path is not None:
report = json.loads(report_path.read_text())
report["actual_files"] = {
str(p.relative_to(actual_dir)): f"{p.stat().st_size / 1024:.1f} kb"
for p in sorted(actual_dir.rglob("*")) if p.is_file()
}
ref_files = {}
for p in sorted(reference_dir.rglob("*")):
if not p.is_file():
continue
rel = p.relative_to(reference_dir)
size_str = f"{p.stat().st_size / 1024:.1f} kb"
actual_counterpart = actual_dir / rel
if not actual_counterpart.exists():
size_str += " (missing in actual)"
elif actual_counterpart.stat().st_size != p.stat().st_size:
size_str += f" (actual: {actual_counterpart.stat().st_size / 1024:.1f} kb)"
ref_files[str(rel)] = size_str
report["reference_files"] = ref_files
report_path.write_text(json.dumps(report, indent=2))
with track_phase(
phase="compare", describe_exception=analyse_results_comparison_exception
):
# Compare actual results with reference data
> assert_job_results_allclose(
actual=actual_dir,
expected=reference_dir,
tmp_path=tmp_path,
rtol=scenario.reference_options.get("rtol", 1e-3),
atol=scenario.reference_options.get("atol", 1),
pixel_tolerance=scenario.reference_options.get("pixel_tolerance", 1),
)
tests/test_benchmarks.py:146:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
actual = PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_whittaker_0/actual')
expected = PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_whittaker_0/reference')
def assert_job_results_allclose(
actual: Union[BatchJob, JobResults, str, Path],
expected: Union[BatchJob, JobResults, str, Path],
*,
rtol: float = _DEFAULT_RTOL,
atol: float = _DEFAULT_ATOL,
pixel_tolerance: float = _DEFAULT_PIXELTOL,
tmp_path: Optional[Path] = None,
):
"""
Assert that two job results sets are equal (with tolerance).
:param actual: actual job results, provided as :py:class:`~openeo.rest.job.BatchJob` object,
:py:meth:`~openeo.rest.job.JobResults` object or path to directory with downloaded assets.
:param expected: expected job results, provided as :py:class:`~openeo.rest.job.BatchJob` object,
:py:meth:`~openeo.rest.job.JobResults` object or path to directory with downloaded assets.
:param rtol: relative tolerance
:param atol: absolute tolerance
:param pixel_tolerance: maximum fraction of pixels (in percent)
that is allowed to be significantly different (considering ``atol`` and ``rtol``)
:param tmp_path: root temp path to download results if needed.
It's recommended to pass pytest's `tmp_path` fixture here
:raises AssertionError: if not equal within the given tolerance
.. versionadded:: 0.31.0
.. warning::
This function is experimental and subject to change.
"""
issues = _compare_job_results(
actual, expected, rtol=rtol, atol=atol, pixel_tolerance=pixel_tolerance, tmp_path=tmp_path
)
if issues:
> raise AssertionError("\n".join(issues))
E AssertionError: Issues for metadata file 'job-results.json':
E Differing 'derived_from' links (0 common, 146 only in actual, 145 only in expected):
E only in actual: {'S2B_MSIL2A_20221101T105109_N0510_R051_T31UFS_20240802T143807', 'S2A_MSIL2A_20220815T103641_N0510_R008_T31UFS_20240711T145601', 'S2B_MSIL2A_20220929T103729_N0510_R008_T31UFS_20240726T085348', 'S2B_MSIL2A_20221012T104909_N0510_R051_T31UFS_20240801T062603', 'S2B_MSIL2A_20220313T103729_N0510_R008_T31UFS_20240521T093447', 'S2B_MSIL2A_20220525T104619_N0510_R051_T31UFS_20240617T223754', 'S2B_MSIL2A_20220323T103639_N0510_R008_T31UFS_20240522T144634', 'S2B_MSIL2A_20220803T104629_N0510_R051_T31UFS_20240712T060026', 'S2B_MSIL2A_20220105T105339_N0510_R051_T31UFS_20240423T204237', 'S2A_MSIL2A_20220219T105101_N0510_R051_T31UFS_20240530T003359', 'S2B_MSIL2A_20220515T104619_N0510_R051_T31UFS_20240610T215024', 'S2B_MSIL2A_20220422T103619_N0510_R008_T31UFS_20240609T221735', 'S2B_MSIL2A_20220830T103629_N0510_R008_T31UFS_20240705T093805', 'S2A_MSIL2A_20220609T104631_N0510_R051_T31UFS_20240619T185429', 'S2B_MSIL2A_20220112T104319_N0510_R008_T31UFS_20240428T050058', 'S2A_MSIL2A_20221126T105401_N0510_R0...
E only in expected: {'/eodata/Sentinel-2/MSI/L2A_N0500/2022/03/11/S2A_MSIL2A_20220311T104841_N0510_R051_T31UFS_20240523T050849.SAFE', '/eodata/Sentinel-2/MSI/L2A_N0500/2022/07/19/S2A_MSIL2A_20220719T105041_N0510_R051_T31UFS_20240705T161711.SAFE', '/eodata/Sentinel-2/MSI/L2A_N0500/2022/08/30/S2B_MSIL2A_20220830T103629_N0510_R008_T31UFS_20240705T093805.SAFE', '/eodata/Sentinel-2/MSI/L2A_N0500/2022/10/14/S2A_MSIL2A_20221014T104011_N0510_R008_T31UFS_20240727T030255.SAFE', '/eodata/Sentinel-2/MSI/L2A_N0500/2022/03/23/S2B_MSIL2A_20220323T103639_N0510_R008_T31UFS_20240522T144634.SAFE', '/eodata/Sentinel-2/MSI/L2A_N0500/2022/09/24/S2A_MSIL2A_20220924T103801_N0510_R008_T31UFS_20240726T205556.SAFE', '/eodata/Sentinel-2/MSI/L2A_N0500/2022/06/11/S2B_MSIL2A_20220611T103629_N0510_R008_T31UFS_20240629T153630.SAFE', '/eodata/Sentinel-2/MSI/L2A_N0500/2022/01/07/S2A_MSIL2A_20220107T104431_N0510_R008_T31UFS_20240425T110026.SAFE', '/eodata/Sentinel-2/MSI/L2A_N0500/2022/09/04/S2A_MSIL2A_20220904T103641_N0510_R008_T31UFS_20....
/opt/hostedtoolcache/Python/3.12.12/x64/lib/python3.12/site-packages/openeo/testing/results.py:521: AssertionError
----------------------------- Captured stdout call -----------------------------
0:00:00 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': send 'start'
0:00:15 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': queued (progress 0%)
0:00:21 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': queued (progress 0%)
0:00:27 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': queued (progress 0%)
0:00:36 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': queued (progress 0%)
0:00:46 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': queued (progress 0%)
0:00:58 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': queued (progress 0%)
0:01:14 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': running (progress N/A)
0:01:33 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': running (progress N/A)
0:01:57 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': running (progress N/A)
0:02:28 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': running (progress N/A)
0:03:06 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': running (progress N/A)
0:03:53 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': running (progress N/A)
0:04:51 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': running (progress N/A)
0:05:52 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': running (progress N/A)
0:06:53 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': running (progress N/A)
0:07:53 Job 'cdse-j-2603031341014ae38386c2b72ec00d24': finished (progress 100%)
------------------------------ Captured log call -------------------------------
INFO conftest:conftest.py:145 Connecting to 'openeofed.dataspace.copernicus.eu'
INFO openeo.config:config.py:193 Loaded openEO client config from sources: []
INFO conftest:conftest.py:158 Checking for auth_env_var='OPENEO_AUTH_CLIENT_CREDENTIALS_CDSEFED' to drive auth against url='openeofed.dataspace.copernicus.eu'.
INFO conftest:conftest.py:162 Extracted provider_id='CDSE' client_id='openeo-apex-benchmarks-service-account' from auth_env_var='OPENEO_AUTH_CLIENT_CREDENTIALS_CDSEFED'
INFO openeo.rest.connection:connection.py:302 Found OIDC providers: ['CDSE']
INFO openeo.rest.auth.oidc:oidc.py:404 Doing 'client_credentials' token request 'https://identity.dataspace.copernicus.eu/auth/realms/CDSE/protocol/openid-connect/token' with post data fields ['grant_type', 'client_id', 'client_secret', 'scope'] (client_id 'openeo-apex-benchmarks-service-account')
INFO openeo.rest.connection:connection.py:401 Obtained tokens: ['access_token', 'id_token']
INFO openeo.rest.job:job.py:436 Downloading Job result asset 'timeseries.json' from https://s3.waw3-1.openeo.v1.dataspace.copernicus.eu/openeo-data-prod-waw4-1/batch_jobs/j-2603031341014ae38386c2b72ec00d24/timeseries.json?X-Proxy-Head-As-Get=true&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=08907695e8c642739ead47b903269fc3%2F20260303%2Fwaw4-1%2Fs3%2Faws4_request&X-Amz-Date=20260303T134857Z&X-Amz-Expires=86400&X-Amz-SignedHeaders=host&X-Amz-Security-Token=eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlX2FybiI6ImFybjpvcGVuZW93czppYW06Ojpyb2xlL29wZW5lby1kYXRhLXByb2Qtd2F3NC0xLXdvcmtzcGFjZSIsImluaXRpYWxfaXNzdWVyIjoib3BlbmVvLnByb2Qud2F3My0xLm9wZW5lby1pbnQudjEuZGF0YXNwYWNlLmNvcGVybmljdXMuZXUiLCJodHRwczovL2F3cy5hbWF6b24uY29tL3RhZ3MiOnsicHJpbmNpcGFsX3RhZ3MiOnsiam9iX2lkIjpbImotMjYwMzAzMTM0MTAxNGFlMzgzODZjMmI3MmVjMDBkMjQiXSwidXNlcl9pZCI6WyI2YTc3ZmNkMS05YzA4LTQ2ZTktYjg3NS01NGZiOTk5YWIyMDAiXX0sInRyYW5zaXRpdmVfdGFnX2tleXMiOlsidXNlcl9pZCIsImpvYl9pZCJdfSwiaXNzIjoic3RzLndhdzMtMS5vcGVuZW8udjEuZGF0YXNwYWNlLmNvcGVybmljdXMuZXUiLCJzdWIiOiJvcGVuZW8tZHJpdmVyIiwiZXhwIjoxNzcyNTg4OTM3LCJuYmYiOjE3NzI1NDU3MzcsImlhdCI6MTc3MjU0NTczNywianRpIjoiY2ViNWMxMTgtYWFlMS00MTRhLTg3MGEtZmZlOTMwN2E2MWMwIiwiYWNjZXNzX2tleV9pZCI6IjA4OTA3Njk1ZThjNjQyNzM5ZWFkNDdiOTAzMjY5ZmMzIn0.gCja5YQVNxggOz7TR3CEI1GL_TYu67EkS9mBqg7lRmsXh_QNAmlT5tIkLu6HY4aZCi0qcYBZitVwGOH8Gx0HU1EVtFwvpHNvdAZHuJgt_YfWIjiJ91HG1VrZgjONuP9VQo6VCY-o8YMsskJsU8ILVmTBenP6UBeKMiDaen7A1Xe5M8hpLLd6sPwKbWzQsiErs5l2NJBUarSHL6KXQSh7ClBVYf3Hs_l6OIDwrMFdpq8eWGSB1jxaSczPe7vDOrrqHem9ItwpX4-4LrTJVcpiHkqEktms82EdxIY8zsGaMaos_8NuLHBDv9PFwSe4v9z_Rzin2X_gjAJ3TnXxBNAdhQ&X-Amz-Signature=3339cf9962f5a41fcea5ae616d38ee91b7e346f5a73dbc2dbb378ae2bf396e9c to /home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_whittaker_0/actual/timeseries.json
INFO apex_algorithm_qa_tools.scenarios:util.py:345 Downloading reference data for scenario.id='whittaker' to reference_dir=PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_whittaker_0/reference'): start 2026-03-03 13:48:59.813864
INFO apex_algorithm_qa_tools.scenarios:util.py:345 Downloading source='https://s3.waw3-1.cloudferro.com/swift/v1/apex-examples/whittaker/timeseries.json' to path=PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_whittaker_0/reference/timeseries.json'): start 2026-03-03 13:48:59.814109
INFO apex_algorithm_qa_tools.scenarios:util.py:351 Downloading source='https://s3.waw3-1.cloudferro.com/swift/v1/apex-examples/whittaker/timeseries.json' to path=PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_whittaker_0/reference/timeseries.json'): end 2026-03-03 13:49:00.500065, elapsed 0:00:00.685956
INFO apex_algorithm_qa_tools.scenarios:util.py:345 Downloading source='https://s3.waw3-1.cloudferro.com/swift/v1/apex-examples/whittaker/job-results.json' to path=PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_whittaker_0/reference/job-results.json'): start 2026-03-03 13:49:00.500405
INFO apex_algorithm_qa_tools.scenarios:util.py:351 Downloading source='https://s3.waw3-1.cloudferro.com/swift/v1/apex-examples/whittaker/job-results.json' to path=PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_whittaker_0/reference/job-results.json'): end 2026-03-03 13:49:01.395244, elapsed 0:00:00.894839
INFO apex_algorithm_qa_tools.scenarios:util.py:351 Downloading reference data for scenario.id='whittaker' to reference_dir=PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_whittaker_0/reference'): end 2026-03-03 13:49:01.395390, elapsed 0:00:01.581526
INFO openeo.testing.results:results.py:422 Comparing job results: PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_whittaker_0/actual') vs PosixPath('/home/runner/work/apex_algorithms/apex_algorithms/qa/benchmarks/tmp_path_root/test_run_benchmark_whittaker_0/reference')
WARNING openeo.testing.results:results.py:456 Unhandled job result asset 'timeseries.json'
Reactions are currently unavailable
Metadata
Metadata
Assignees
Labels
benchmark-failureintegration test failedintegration test failed