diff --git a/dandi/bids_validator_deno/__init__.py b/dandi/bids_validator_deno/__init__.py new file mode 100644 index 000000000..f7dce26a3 --- /dev/null +++ b/dandi/bids_validator_deno/__init__.py @@ -0,0 +1,9 @@ +"""Package providing an interface to the deno-compiled BIDS validator""" + +from ._validator import bids_validate, get_version + +__all__ = ["bids_validate", "get_version"] + + +def __dir__() -> list[str]: + return list(__all__) # return a copy of `__all__` to avoid modifying the original diff --git a/dandi/bids_validator_deno/_models.py b/dandi/bids_validator_deno/_models.py new file mode 100644 index 000000000..28f407682 --- /dev/null +++ b/dandi/bids_validator_deno/_models.py @@ -0,0 +1,78 @@ +# This file holds the models used to interface with the deno-compiled BIDS validator +# with the `--json` option. The defined entities in this file share the same names and +# structure as those defined in +# https://github.com/bids-standard/bids-validator/blob/main/src/types/validation-result.ts +# and +# https://github.com/bids-standard/bids-validator/blob/main/src/issues/datasetIssues.ts +# The only exception to that rule is that the `ValidationResult` type in the +# BIDS validator source is named `BidsValidationResult` in this file. + +from __future__ import annotations + +from enum import auto +from typing import Any, Literal, Optional, Union + +from pydantic import BaseModel, ConfigDict + +from dandi.utils import StrEnum + + +class BidsValidationResult(BaseModel): + issues: DatasetIssues + summary: SummaryOutput + derivativesSummary: Optional[dict[str, BidsValidationResult]] = None + + model_config = ConfigDict(strict=True) + + +class DatasetIssues(BaseModel): + issues: list[Issue] + codeMessages: dict[str, str] + + model_config = ConfigDict(strict=True) + + +class Issue(BaseModel): + code: str + subCode: Optional[str] = None + severity: Optional[Severity] = None + location: Optional[str] = None + issueMessage: Optional[str] = None + suggestion: Optional[str] = None + affects: Optional[list[str]] = None + rule: Optional[str] = None + line: Optional[int] = None + character: Optional[int] = None + + model_config = ConfigDict(strict=True) + + +class Severity(StrEnum): + warning = auto() + error = auto() + ignore = auto() + + +class SummaryOutput(BaseModel): + sessions: list[str] + subjects: list[str] + subjectMetadata: list[SubjectMetadata] + tasks: list[str] + modalities: list[str] + secondaryModalities: list[str] + totalFiles: int + size: int + dataProcessed: bool + pet: dict[str, Any] + dataTypes: list[str] + schemaVersion: str + + model_config = ConfigDict(strict=True) + + +class SubjectMetadata(BaseModel): + participantId: str + age: Union[int, Literal["89+"], None] = None + sex: Optional[str] = None + + model_config = ConfigDict(strict=True) diff --git a/dandi/bids_validator_deno/_validator.py b/dandi/bids_validator_deno/_validator.py new file mode 100644 index 000000000..71cc69b14 --- /dev/null +++ b/dandi/bids_validator_deno/_validator.py @@ -0,0 +1,449 @@ +# This file provides definitions to do BIDS validation through the deno-compiled BIDS +# validator, https://pypi.org/project/bids-validator-deno/. + +from importlib.metadata import version +import json +from pathlib import Path +import re +from subprocess import CompletedProcess, TimeoutExpired, run +from tempfile import TemporaryDirectory +from typing import Optional + +from pydantic import DirectoryPath, validate_call + +from dandi.utils import find_parent_directory_containing +from dandi.validate_types import ( + Origin, + OriginType, + Scope, + Severity, + Standard, + ValidationResult, + Validator, +) + +from ._models import BidsValidationResult, Issue +from ._models import Severity as BidsSeverity + +DISTRIBUTION_NAME = CMD = "bids-validator-deno" +TIMEOUT = 600.0 # 10 minutes, in seconds + +# ANSI SGR (Select Graphic Rendition) pattern +_ANSI_SGR_PATTERN = re.compile(r"\x1b\[[0-9;]*m") + +# Map from BIDS validator severity levels to Dandi severity levels +# Note: BidsSeverity.ignore level is not mapped. Issues with this severity level will +# be ignored in the harmonization process. +_SEVERITY_MAP = { + BidsSeverity.warning: Severity.HINT, + BidsSeverity.error: Severity.ERROR, +} + + +class ValidatorError(Exception): + """ + Exception raised when the deno-compiled BIDS validator fails in execution, + and the failure is not an indication of the presence of validation errors. + """ + + def __init__( + self, + cmd: list[str], + returncode: int, + stdout: str, + stderr: str, + outfile_content: Optional[str] = None, + ): + """ + Parameters + ---------- + cmd : list[str] + The command that was executed as the execution of BIDS validator + returncode : int + The return code of the execution + stdout : str + The standard output of the execution + stderr : str + The standard error of the execution + outfile_content : Optional[str] + The content of the output file produced by the execution + (if any). This is `None` if the output file was not produced. + """ + # Pass a human-readable message up to the base Exception + super().__init__("Execution of the deno-compiled BIDS validator failed") + self.cmd = cmd + self.returncode = returncode + self.stdout = stdout + self.stderr = stderr + self.outfile_content = outfile_content + + def __str__(self): + base_msg = super().__str__() # the message passed in __init__ + return ( + f"{base_msg}\n" + f"Command: `{' '.join(self.cmd)}`\n" + f"Return code: {self.returncode}\n" + f"Stdout:\n{self.stdout}\n" + f"Stderr:\n{self.stderr}" + + ( + f"\nOutfile content:\n{self.outfile_content}" + if self.outfile_content is not None + else "" + ) + ) + + +def strip_sgr(text: str) -> str: + """ + Strip ANSI SGR (Select Graphic Rendition) sequences from a string. + + For example, this can be used to remove color encoding sequences from terminal + outputs. + """ + return _ANSI_SGR_PATTERN.sub("", text) + + +def _invoke_validator(args: list[str]) -> CompletedProcess: + """ + Invoke the deno compiled BIDS validator + + Parameters + ---------- + args : list[str] + An ordered list of options and arguments to pass to the validator + + Returns + ------- + CompletedProcess + An object representing the result of invoking the deno-compiled BIDS validator + + Raises + ------ + RuntimeError + If the deno-compiled BIDS validator times out in `TIMEOUT` seconds + + Notes + ----- + - The text captured in `stdout` and `stderr` can be color encoded + """ + try: + result = run( + args=[CMD, *args], + capture_output=True, + timeout=TIMEOUT, + text=True, + ) + except TimeoutExpired as e: + raise RuntimeError( + f"The `{' '.join(e.cmd)}` command timed out after {e.timeout} " f"seconds" + ) from e + + return result + + +def bids_validate( + dir_: DirectoryPath, + config: Optional[dict] = None, + ignore_nifti_headers: bool = False, + recursive: bool = False, +) -> list[ValidationResult]: + """ + Validate a file directory as a BIDS dataset with the deno-compiled BIDS validator + + Parameters + ---------- + dir_ : DirectoryPath + The path to the directory to validate + config : Optional[dict] + The configuration to use in the validation. This specifies a JSON configuration + file to be provided through the `--config` option when invoking the underlying + deno-compiled BIDS validator. If `None`, the deno-compiled BIDS validator will + be invoked without the `--config` option. + ignore_nifti_headers : bool + If `True`, disregard NIfTI header content during validation + recursive : bool + If `True`, validate datasets found in derivatives directories in addition to + root dataset + + Returns + ------- + list[ValidationResult] + A list of `ValidationResult` objects in which each object represents an issue + in the validation result produced by the deno-compiled BIDS validator + """ + + try: + bv_result = _bids_validate(dir_, config, ignore_nifti_headers, recursive) + except ValidatorError as e: + return [ + ValidationResult( + id="BIDS.VALIDATOR_ERROR", + origin=( + Origin( + type=OriginType.INTERNAL, + validator=Validator.bids_validator_deno, + validator_version=get_version(), + ) + ), + scope=Scope.DATASET, + origin_result=e, + dandiset_path=find_parent_directory_containing("dandiset.yaml", dir_), + dataset_path=dir_, + message="Deno-compiled BIDS validator failed in execution", + path=dir_, + ) + ] + + return _harmonize(bv_result, dir_) + + +@validate_call +def _bids_validate( + dir_: DirectoryPath, + config: Optional[dict] = None, + ignore_nifti_headers: bool = False, + recursive: bool = False, +) -> BidsValidationResult: + """ + Validate a file directory as a BIDS dataset with the deno-compiled BIDS validator + + Parameters + ---------- + dir_ : DirectoryPath + The path to the directory to validate + config : Optional[dict] + The configuration to use in the validation. This specifies a JSON configuration + file to be provided through the `--config` option when invoking the underlying + deno-compiled BIDS validator. If `None`, the deno-compiled BIDS validator will + be invoked without the `--config` option. + ignore_nifti_headers : bool + If `True`, disregard NIfTI header content during validation + recursive : bool + If `True`, validate datasets found in derivatives directories in addition to + root dataset + + Returns + ------- + BidsValidationResult + The result of the validation using the deno-compiled BIDS validator with + the `--json` option. + + Raises + ------ + ValidatorError + If the deno-compiled BIDS validator fails in execution, and the failure is + not an indication of the presence of validation errors. + pydantic.ValidationError + If any of the parameters can't be validated by Pydantic according to their type + annotation. E.g., If `dir_` is not pointing to a valid directory or not provided + at all + """ + config_fname = "config.json" + out_fname = "out.json" + + # Conditional options + conditional_ops = [] + + if ignore_nifti_headers: + conditional_ops.append("--ignoreNiftiHeaders") + if recursive: + conditional_ops.append("--recursive") + + with TemporaryDirectory() as tmp_dir: + if config is not None: + # Write the config to a file in the temporary directory + configfile_path = Path(tmp_dir) / config_fname + configfile_path.write_text(json.dumps(config)) + + # Add the `--config` option + conditional_ops.extend(["--config", str(configfile_path)]) + + outfile_path = Path(tmp_dir) / out_fname + result = _invoke_validator( + ["--json", "--outfile", str(outfile_path), *conditional_ops, str(dir_)] + ) + # Read the validation result from the outfile if it exists + outfile_content = outfile_path.read_text() if outfile_path.exists() else None + + # The condition of this statement may need to change in the future. + # See https://github.com/bids-standard/bids-validator/issues/191 for details + if result.returncode not in range(0, 2) or result.stderr != "": + raise ValidatorError( + result.args, + result.returncode, + result.stdout, + result.stderr, + outfile_content, + ) + + assert ( + outfile_content is not None + ), "`outfile_content` should not be None when validation is successful" + + # Parse the content, in JSON format, of the outfile + return BidsValidationResult.model_validate_json(outfile_content, strict=True) + + +def get_version() -> str: + """ + Return the version of the deno-compiled BIDS validator + + Returns + ------- + str + The version of the deno-compiled BIDS validator + """ + return version(DISTRIBUTION_NAME) + + +def _harmonize( + bv_result: BidsValidationResult, ds_path: Path +) -> list[ValidationResult]: + """ + Harmonize a `BidsValidationResult` object into a list of `ValidationResult` objects + + Parameters + ---------- + bv_result : BidsValidationResult + The `BidsValidationResult` object representing the result of the validation + using the deno-compiled BIDS validator + ds_path : Path + The path to the dataset that has been validated to produce the `bv_result` + object + + Returns + ------- + list[ValidationResult] + A list of `ValidationResult` objects in which each object represents an issue + in the validation result. + """ + # Ensure the path is absolute and in canonical form + ds_path = ds_path.resolve() + + issues = bv_result.issues.issues + code_messages = bv_result.issues.codeMessages + schema_version = bv_result.summary.schemaVersion + dandiset_path = find_parent_directory_containing("dandiset.yaml", ds_path) + + origin = Origin( + type=OriginType.VALIDATION, + validator=Validator.bids_validator_deno, + validator_version=get_version(), + standard=Standard.BIDS, + # `BIDSVersion` is unavailable through the BIDS validator; see + # https://github.com/bids-standard/bids-validator/issues/10#issuecomment-2848121538 + # for details + # standard_version=, + standard_schema_version=schema_version, + ) + + results: list[ValidationResult] = [] + for issue in issues: + severity = issue.severity + if severity is BidsSeverity.ignore: + # Ignore issues with severity "ignore" + # TODO: If we want to include these issues, we will have to add a new value + # to the Severity enum. + continue + + # The absolute path to the file or directory that the issue is related to + issue_path = _get_path(issue, ds_path) + + results.append( + ValidationResult( + id=f"BIDS.{issue.code}", + origin=origin, + scope=_get_scope(issue_path), + # Store only the issue, not entire bv_result with more context + origin_result=issue, + severity=_SEVERITY_MAP[severity] if severity else None, + dandiset_path=dandiset_path, + dataset_path=ds_path, + message=_get_msg(issue, code_messages), + path=issue_path, + ) + ) + + return results + + +def _get_scope(issue_path: Optional[Path]) -> Scope: + """ + Return the scope of the issue + + Parameters + ---------- + issue_path : Optional[Path] + The path to the file or directory that the issue is related to. `None` if there + is no a file or directory is related to the issue. + + Returns + ------- + Scope + The scope of the issue. If `issue_path` is `None`, the scope is set to + `Scope.DATASET`. + """ + if issue_path is None: + return Scope.DATASET + + if issue_path.is_file() or issue_path.is_symlink(): + return Scope.FILE + if issue_path.is_dir(): + return Scope.FOLDER + + return Scope.DATASET + + +def _get_msg(issue: Issue, code_messages: dict[str, str]) -> Optional[str]: + """ + Produce a human-readable message from an issue in a validation result produced by + the deno-compiled BIDS validator. + + Parameters + ---------- + issue : Issue + The issue to produce a message from + code_messages : dict[str, str] + A dictionary mapping issue codes to human-readable messages given as part of + the validation result + + Returns + ------- + Optional[str] + The human-readable message (or `None` if such a message can't be produced) + """ + coded_msg = code_messages.get(issue.code, "") + sub_code_msg = f"subCode: {issue.subCode}" if issue.subCode else "" + issue_msg = f"issueMessage: {issue.issueMessage}" if issue.issueMessage else "" + + msg = "\n".join(filter(None, [coded_msg, sub_code_msg, issue_msg])) + + # Return `None` if a non-empty message cannot be produced + return msg if msg else None + + +def _get_path(issue: Issue, ds_path: Path) -> Optional[Path]: + """ + Given an issue from the validation result of the deno-compiled BIDS validator, + produce the absolute path to the file or directory that the issue is related to. + + Parameters + ---------- + issue : Issue + The issue to produce a path for + ds_path : Path + The path to the dataset that has been validated to produce the validation result + + Returns + ------- + Optional[Path] + The absolute path to the file or directory that the issue is related to + or `None` there is no file or directory that the issue is related to + """ + if issue.location is None: + return None + + # Ensure the path is absolute and in canonical form + ds_path = ds_path.resolve() + + return ds_path.joinpath(issue.location.lstrip("/")) diff --git a/dandi/cli/tests/test_cmd_validate.py b/dandi/cli/tests/test_cmd_validate.py index d108cf8b4..2d7737f0f 100644 --- a/dandi/cli/tests/test_cmd_validate.py +++ b/dandi/cli/tests/test_cmd_validate.py @@ -1,11 +1,9 @@ -import json from pathlib import Path from click.testing import CliRunner import pytest from ..cmd_validate import _process_issues, validate -from ...tests.fixtures import BIDS_ERROR_TESTDATA_SELECTION from ...validate_types import ( Origin, OriginType, @@ -16,17 +14,37 @@ ) -@pytest.mark.parametrize("dataset", BIDS_ERROR_TESTDATA_SELECTION) -def test_validate_bids_error(bids_error_examples: Path, dataset: str) -> None: - broken_dataset = bids_error_examples / dataset - with (broken_dataset / ".ERRORS.json").open() as f: - expected_errors = json.load(f) - r = CliRunner().invoke(validate, [str(broken_dataset)]) - # Does it break? +@pytest.mark.parametrize( + "ds_name, expected_err_location", + [ + ("invalid_asl003", "sub-Sub1/perf/sub-Sub1_headshape.jpg"), + ("invalid_pet001", "sub-01/ses-01/anat/sub-02_ses-01_T1w.json"), + ], +) +def test_validate_bids_error( + ds_name: str, + expected_err_location: str, + bids_error_examples: Path, + monkeypatch: pytest.MonkeyPatch, +) -> None: + """ + Test validating a selection of datasets at + https://github.com/bids-standard/bids-error-examples + """ + from dandi.files import bids + from dandi.tests.test_bids_validator_deno.test_validator import mock_bids_validate + + monkeypatch.setattr(bids, "bids_validate", mock_bids_validate) + + broken_dataset = bids_error_examples / ds_name + + r = CliRunner().invoke(validate, ["--min-severity", "ERROR", str(broken_dataset)]) + + # Assert there are errors assert r.exit_code == 1 - # Does it detect all errors? - for key in expected_errors: - assert key in r.output + + # Assert that there is at least one error from the expected location + assert str(Path(expected_err_location)) in r.output def test_validate_severity(organized_nwb_dir3: Path) -> None: diff --git a/dandi/files/bids.py b/dandi/files/bids.py index 2235d8411..a441e5999 100644 --- a/dandi/files/bids.py +++ b/dandi/files/bids.py @@ -9,6 +9,8 @@ from dandischema.models import BareAsset +from dandi.bids_validator_deno import bids_validate + from .bases import GenericAsset, LocalFileAsset, NWBAsset from .zarr import ZarrAsset from ..consts import ZARR_MIME_TYPE @@ -32,19 +34,21 @@ class BIDSDatasetDescriptionAsset(LocalFileAsset): #: A list of all other assets in the dataset dataset_files: list[BIDSAsset] = field(default_factory=list) - #: A list of validation error messages pertaining to the dataset as a - #: whole, populated by `_validate()` + #: A list of all the validation results pertaining to the containing dataset + #: as a BIDS dataset populated by `_validate()` _dataset_errors: list[ValidationResult] | None = None #: A list of validation error messages for individual assets in the #: dataset, keyed by `bids_path` properties; populated by `_validate()` - _asset_errors: dict[str, list[ValidationResult]] | None = None + _asset_errors: defaultdict[str, list[ValidationResult]] | None = None #: Asset metadata for individual assets in the dataset, keyed by - #: `bids_path` properties; populated by `_validate()` - _asset_metadata: dict[str, BareAsset] | None = None + #: `bids_path` properties; populated by `_get_metadata()` + _asset_metadata: defaultdict[str, BareAsset] | None = None - #: Version of BIDS used for the validation; + #: Version of BIDS used in the validation + #: (not necessarily the same as the value of the `"BIDSVersion"` field in the + #: represented `dataset_description.json` file); #: populated by `_validate()` #: In future this might be removed and the information included in the #: BareAsset via dandischema. @@ -61,51 +65,78 @@ def bids_root(self) -> Path: """ return self.filepath.parent - def _validate(self) -> None: + @property + def bids_version(self) -> str | None: + """ + The version of BIDS used for in validation + + Note + ---- + This value is not necessarily the same as the value of the `"BIDSVersion"` + field in the represented `dataset_description.json` file. + """ + self._validate() + return self._bids_version + + def _get_metadata(self) -> None: + """ + Get metadata for all assets in the dataset + + This populates `self._asset_metadata` + """ with self._lock: - if self._dataset_errors is None: + if self._asset_metadata is None: # Import here to avoid circular import from dandi.validate import validate_bids + # === Validate the dataset using bidsschematools === + # This is done to obtain the metadata for each asset in the dataset results = validate_bids(self.bids_root) - self._dataset_errors: list[ValidationResult] = [] - self._asset_errors: dict[str, list[ValidationResult]] = defaultdict( - list - ) # Don't apply eta-reduction to the lambda, as mypy needs to be # assured that defaultdict's argument takes no parameters. self._asset_metadata = defaultdict( lambda: BareAsset.model_construct() # type: ignore[call-arg] ) for result in results: - if result.id in BIDS_ASSET_ERRORS: - assert result.path - bids_path = result.path.relative_to(self.bids_root).as_posix() - self._asset_errors[bids_path].append(result) - elif result.id in BIDS_DATASET_ERRORS: - self._dataset_errors.append(result) - elif result.id == "BIDS.MATCH": + if result.id == "BIDS.MATCH": assert result.path bids_path = result.path.relative_to(self.bids_root).as_posix() assert result.metadata is not None self._asset_metadata[bids_path] = prepare_metadata( result.metadata ) - self._bids_version = result.origin.standard_version + + def _validate(self) -> None: + with self._lock: + if self._dataset_errors is None: + + # Obtain BIDS validation results of the entire dataset through the + # deno-compiled BIDS validator + self._dataset_errors = bids_validate(self.bids_root) + + # Categorized validation results related to individual assets by the + # path of the asset in the BIDS dataset + self._asset_errors = defaultdict(list) + for result in self._dataset_errors: + if result.path is not None: + self._asset_errors[ + result.path.relative_to(self.bids_root).as_posix() + ].append(result) + + # Obtain BIDS standard version from one of the validation results + if self._dataset_errors: + bids_version = self._dataset_errors[0].origin.standard_version + self._bids_version = bids_version def get_asset_errors(self, asset: BIDSAsset) -> list[ValidationResult]: """:meta private:""" self._validate() - errors: list[ValidationResult] = [] - if self._dataset_errors: - errors.extend(self._dataset_errors) assert self._asset_errors is not None - errors.extend(self._asset_errors[asset.bids_path]) - return errors + return self._asset_errors[asset.bids_path].copy() def get_asset_metadata(self, asset: BIDSAsset) -> BareAsset: """:meta private:""" - self._validate() + self._get_metadata() assert self._asset_metadata is not None return self._asset_metadata[asset.bids_path] @@ -114,14 +145,12 @@ def get_validation_errors( schema_version: str | None = None, devel_debug: bool = False, ) -> list[ValidationResult]: + """ + Return all validation results for the containing dataset per the BIDS standard + """ self._validate() assert self._dataset_errors is not None - if self._asset_errors is not None: - return self._dataset_errors + [ - i for j in self._asset_errors.values() for i in j - ] - else: - return self._dataset_errors + return self._dataset_errors.copy() # get_metadata(): inherit use of default metadata from LocalFileAsset @@ -182,10 +211,8 @@ def get_metadata( metadata.path = self.path return metadata - def get_validation_bids_version(self) -> str: - self.bids_dataset_description._validate() - assert self.bids_dataset_description._bids_version is not None - return self.bids_dataset_description._bids_version + def get_validation_bids_version(self) -> str | None: + return self.bids_dataset_description.bids_version class NWBBIDSAsset(BIDSAsset, NWBAsset): diff --git a/dandi/tests/test_bids_validator_deno/test_validator.py b/dandi/tests/test_bids_validator_deno/test_validator.py new file mode 100644 index 000000000..e784b01b1 --- /dev/null +++ b/dandi/tests/test_bids_validator_deno/test_validator.py @@ -0,0 +1,638 @@ +from pathlib import Path +from subprocess import CompletedProcess, TimeoutExpired +from typing import Any, Optional +from unittest.mock import ANY, patch + +import pytest + +from dandi.bids_validator_deno._models import BidsValidationResult, DatasetIssues, Issue +from dandi.bids_validator_deno._models import Severity as BidsSeverity +from dandi.bids_validator_deno._models import SummaryOutput + +# Adjust the import as needed for your package structure +# noinspection PyProtectedMember +from dandi.bids_validator_deno._validator import ( + CMD, + TIMEOUT, + ValidatorError, + _bids_validate, + _get_msg, + _get_path, + _get_scope, + _invoke_validator, + bids_validate, + get_version, + strip_sgr, +) +from dandi.consts import dandiset_metadata_file +from dandi.tests.fixtures import BIDS_TESTDATA_SELECTION +from dandi.validate_types import ( + OriginType, + Scope, + Severity, + ValidationResult, + Validator, +) + +# Config to use for validating selected examples in +# https://github.com/bids-standard/bids-examples and +# https://github.com/bids-standard/bids-error-examples +CONFIG_FOR_EXAMPLES = { + "ignore": [ + # Raw Data Files in the examples are empty + {"code": "EMPTY_FILE"}, + # Ignore any error regarding the dandiset metadata file added + # through the `bids_examples` fixture + {"location": f"/{dandiset_metadata_file}"}, + ] +} + + +def mock_bids_validate(*args: Any, **kwargs: Any) -> list[ValidationResult]: + """ + Mock `bids_validate` to validate the examples in + # https://github.com/bids-standard/bids-examples and + # https://github.com/bids-standard/bids-error-examples. These example datasets + contains empty NIFTI files + """ + kwargs["config"] = CONFIG_FOR_EXAMPLES + kwargs["ignore_nifti_headers"] = True + return bids_validate(*args, **kwargs) + + +@pytest.mark.parametrize( + "outfile_content, expected_outfile_content_rep", + [ + (None, ""), + ("", "\nOutfile content:\n"), + ("Some content", "\nOutfile content:\nSome content"), + ], +) +def test_validator_error( + outfile_content: Optional[str], expected_outfile_content_rep: str +) -> None: + """ + Test the ValidatorError exception class. + """ + cmd = ["bids-validator-deno", "--json"] + returncode = 1 + stdout = "Some output" + stderr = "Some error" + + error = ValidatorError(cmd, returncode, stdout, stderr, outfile_content) + + assert error.cmd == cmd + assert error.returncode == returncode + assert error.stdout == stdout + assert error.stderr == stderr + assert error.outfile_content == outfile_content + + # Check the string representation + expected_str = ( + "Execution of the deno-compiled BIDS validator failed\n" + f"Command: `bids-validator-deno --json`\n" + f"Return code: {returncode}\n" + f"Stdout:\n{stdout}\n" + f"Stderr:\n{stderr}" + ) + expected_outfile_content_rep + assert str(error) == expected_str + + +@pytest.mark.parametrize( + "text, expected_output", + [ + ( + "\x1b[1mbids-validator\x1b[22m \x1b[94m2.0.4-dev\x1b[39m\n", + "bids-validator 2.0.4-dev\n", + ), + ( + "\x1b[31mRed text\x1b[0m", + "Red text", + ), + ( + "\x1b[1;32mBold, green text\x1b[0m", + "Bold, green text", + ), + ( + "\x1b[1;4;35mBold, underlined, magenta text\x1b[0m", + "Bold, underlined, magenta text", + ), + ( + "\x1b[93mBright yellow text\x1b[0m", + "Bright yellow text", + ), + ], +) +def test_strip_sgr(text: str, expected_output: str) -> None: + """ + Test the strip_sgr function to ensure it removes ANSI SGR sequences. + """ + assert strip_sgr(text) == expected_output + + +class TestInvokeValidator: + @pytest.mark.parametrize( + # `None` as the value for `expected_in_stdout` or `expected_in_stderr` means + # stdout or stderr are empty strings, respectively. + "args, expected_returncode, expected_in_stdout, expected_in_stderr", + [ + pytest.param( + [], + 2, + "--help", + "Missing argument(s)", + id="no option or argument", + ), + pytest.param(["--help"], 0, "--help", None, id="--help option"), + pytest.param( + ["--version"], 0, "bids-validator", None, id="--version option" + ), + ], + ) + def test_real( + self, + args: list[str], + expected_returncode: int, + expected_in_stdout: Optional[str], + expected_in_stderr: Optional[str], + ) -> None: + """ + Call the real bids-validator-deno command for given args. + These tests validate the return code and partial output. + """ + result = _invoke_validator(args) + + expected_args = [CMD, *args] + assert result.args == expected_args + + assert result.returncode == expected_returncode + + if expected_in_stdout is not None: + assert expected_in_stdout in result.stdout + else: + assert result.stdout == "" + + if expected_in_stderr is not None: + assert expected_in_stderr in result.stderr + else: + assert result.stderr == "" + + def test_timeout(self): + """ + Test that a TimeoutExpired from `subprocess.run` raises a RuntimeError. + """ + with patch("dandi.bids_validator_deno._validator.run") as mock_run: + mock_run.side_effect = TimeoutExpired(cmd=[CMD], timeout=TIMEOUT) + with pytest.raises(RuntimeError, match="timed out after"): + _invoke_validator([]) + + +class TestBidsValidate: + def test_empty_dir(self, tmp_path): + """ + Test validating an empty directory. The validator should return validation + results representing validation errors. + """ + results = bids_validate(tmp_path) + assert type(results) is list + assert len(results) > 0 + assert all(isinstance(result, ValidationResult) for result in results) + + @pytest.mark.parametrize("ds_name", BIDS_TESTDATA_SELECTION) + def test_validate_bids_examples(self, ds_name: str, bids_examples: Path) -> None: + """ + Test validating a selection of datasets at + https://github.com/bids-standard/bids-examples + """ + ds_path = bids_examples / ds_name + results = bids_validate( + ds_path, config=CONFIG_FOR_EXAMPLES, ignore_nifti_headers=True + ) + + # Assert that no result are of severity `ERROR` or above + assert all(r.severity is None or r.severity < Severity.ERROR for r in results) + + @pytest.mark.parametrize( + "ds_name, expected_err_location", + [ + ("invalid_asl003", "sub-Sub1/perf/sub-Sub1_headshape.jpg"), + ("invalid_pet001", "sub-01/ses-01/anat/sub-02_ses-01_T1w.json"), + ], + ) + def test_validate_bids_error_examples( + self, ds_name: str, expected_err_location: str, bids_error_examples: Path + ) -> None: + """ + Test validating a selection of datasets at + https://github.com/bids-standard/bids-error-examples + """ + ds_path = bids_error_examples / ds_name + results = bids_validate( + ds_path, config=CONFIG_FOR_EXAMPLES, ignore_nifti_headers=True + ) + + # All results with severity `ERROR` or above + err_results = list( + r + for r in results + if r.severity is not None and r.severity >= Severity.ERROR + ) + + assert len(err_results) >= 1 # Assert there must be an error + + # Assert all the errors are from the expected location + # as documented in the `.ERRORS.json` of respective datasets + for r in err_results: + assert r.path is not None + assert r.dataset_path is not None + + err_location = r.path.relative_to(r.dataset_path).as_posix() + assert err_location == expected_err_location + + def test_validator_success(self, tmp_path): + """ + Test the case where the deno-compiled BIDS validator succeeds in execution + """ + with patch( + "dandi.bids_validator_deno._validator._bids_validate" + ) as mock_validate: + mock_validate.return_value = BidsValidationResult( + issues=DatasetIssues( + issues=[Issue(code="code1")], codeMessages={"code1": "message1"} + ), + summary=SummaryOutput( + sessions=[], + subjects=[], + subjectMetadata=[], + tasks=[], + modalities=[], + secondaryModalities=[], + totalFiles=0, + size=0, + dataProcessed=False, + pet={}, + dataTypes=[], + schemaVersion="1.0.0", + ), + ) + results = bids_validate(tmp_path) + + assert len(results) == 1 # Corresponding only one issue + + result = results[0] + assert result.id == "BIDS.code1" + assert result.origin.type == OriginType.VALIDATION + assert result.origin.validator == Validator.bids_validator_deno + + def test_validator_failure(self, tmp_path): + """ + Test the case where the deno-compiled BIDS validator fails in execution, and + the failure is not an indication of the presence of validation errors. + """ + validator_error = ValidatorError( + cmd=[CMD, "--json", str(tmp_path)], + returncode=2, + stdout="Some output", + stderr="Some error", + outfile_content="Some content", + ) + with patch( + "dandi.bids_validator_deno._validator._bids_validate", + side_effect=validator_error, + ): + results = bids_validate(tmp_path) + + assert len(results) == 1 + result = results[0] + + assert result.id == "BIDS.VALIDATOR_ERROR" + assert result.origin.type == OriginType.INTERNAL + assert result.origin.validator == Validator.bids_validator_deno + assert result.scope == Scope.DATASET + assert result.origin_result is validator_error + assert result.dandiset_path is None + assert result.dataset_path == tmp_path + assert result.message == "Deno-compiled BIDS validator failed in execution" + assert result.path == tmp_path + + +# noinspection PyPep8Naming +class Test_BidsValidate: + @pytest.mark.parametrize( + "config, ignore_nifti_headers, recursive, expected_conditional_ops", + [ + (None, False, False, []), + (None, True, False, ["--ignoreNiftiHeaders"]), + (None, False, True, ["--recursive"]), + (None, True, True, ["--ignoreNiftiHeaders", "--recursive"]), + ( + { + "ignore": [ + {"code": "JSON_KEY_RECOMMENDED", "location": "/T1w.json"} + ], + "warning": [], + "error": [{"code": "NO_AUTHORS"}], + }, + True, + True, + ["--ignoreNiftiHeaders", "--recursive", "--config", ANY], + ), + ], + ) + def test_invoke_validator_args( + self, + config: Optional[dict], + ignore_nifti_headers: bool, + recursive: bool, + expected_conditional_ops: list[str], + tmp_path: Path, + ) -> None: + """ + Spy on the `_invoke_validator()` call inside `_bids_validate()` and verify + the arguments passed + """ + + # `ANY` is used as the third argument because we only know that the argument + # ends with "out.json" but not the full path. + expected_args = ( + ["--json", "--outfile", ANY] + expected_conditional_ops + [str(tmp_path)] + ) + + with patch( + "dandi.bids_validator_deno._validator._invoke_validator", + wraps=_invoke_validator, # <--- "Spy" on the original function + ) as mock_invoke: + + _bids_validate( + dir_=tmp_path, + config=config, + ignore_nifti_headers=ignore_nifti_headers, + recursive=recursive, + ) + + # Ensure _invoke_validator was called at exactly once + mock_invoke.assert_called_once() + + # The real call arguments are in mock_invoke.call_args + # e.g., ( (args,), {} ) + actual_args = mock_invoke.call_args.args[0] + + assert actual_args == expected_args + + # Check that the third argument ends with "out.json" + assert actual_args[2].endswith("out.json") + + if config is not None: + # Ensure the second to the last argument is the path to the config file + assert actual_args[-2].endswith("config.json") + + def test_validate_empty_dir(self, tmp_path): + """ + Test the case where an empty directory is validated + """ + result = _bids_validate(tmp_path) + assert isinstance(result, BidsValidationResult) + + @pytest.mark.parametrize("ds_name", BIDS_TESTDATA_SELECTION) + def test_validate_bids_examples(self, ds_name: str, bids_examples: Path) -> None: + """ + Test validating a selection of datasets at + https://github.com/bids-standard/bids-examples + """ + ds_path = bids_examples / ds_name + result = _bids_validate( + ds_path, config=CONFIG_FOR_EXAMPLES, ignore_nifti_headers=True + ) + + assert isinstance(result, BidsValidationResult) + + # Assert that no issue are of severity "error" + assert all(i.severity is not BidsSeverity.error for i in result.issues.issues) + + @pytest.mark.parametrize( + "ds_name, expected_err_location", + [ + ("invalid_asl003", "/sub-Sub1/perf/sub-Sub1_headshape.jpg"), + ("invalid_pet001", "/sub-01/ses-01/anat/sub-02_ses-01_T1w.json"), + ], + ) + def test_validate_bids_error_examples( + self, ds_name: str, expected_err_location: str, bids_error_examples: Path + ) -> None: + """ + Test validating a selection of datasets at + https://github.com/bids-standard/bids-error-examples + """ + ds_path = bids_error_examples / ds_name + result = _bids_validate( + ds_path, config=CONFIG_FOR_EXAMPLES, ignore_nifti_headers=True + ) + + assert isinstance(result, BidsValidationResult) + + err_issues = list( + i for i in result.issues.issues if i.severity is BidsSeverity.error + ) + + assert len(err_issues) >= 1 # Assert there must be an error + + # Assert all the errors are from the expected location + # as documented in the `.ERRORS.json` of respective datasets + assert all(i.location == expected_err_location for i in err_issues) + + @pytest.mark.parametrize( + "exit_code, stderr", + [ + (-42, ""), + (-1, ""), + (2, ""), + (2, "Some error"), + (100, ""), + (0, "Some other error"), + (1, "Errr!"), + ], + ) + @pytest.mark.parametrize("stdout", ["", "Some output", "Some other output"]) + def test_execution_error(self, exit_code, stdout, stderr, tmp_path): + """ + Test the cases where the deno-compiled BIDS validator fails not due to + the input directory being an invalid BIDS dataset but due to some other error + in the execution of the validator. + """ + cmd = [CMD, "--json", str(tmp_path)] + with patch( + "dandi.bids_validator_deno._validator._invoke_validator" + ) as mock_invoke: + # Simulate a CompletedProcess + mock_invoke.return_value = CompletedProcess( + args=cmd, + returncode=exit_code, + stdout=stdout, + stderr=stderr, + ) + # We expect a `ValidatorError` + with pytest.raises(ValidatorError) as excinfo: + _bids_validate(tmp_path) + + e = excinfo.value + + assert e.cmd == cmd + assert e.returncode == exit_code + assert e.stdout == stdout + assert e.stderr == stderr + + +def test_get_version(): + """ + Test the `get_version()` function + """ + version = get_version() + assert "." in version + + +@pytest.fixture +def folder_path(tmp_path: Path) -> Path: + folder = tmp_path / "test_folder" + folder.mkdir() + return folder + + +@pytest.fixture +def file_path(tmp_path: Path) -> Path: + file = tmp_path / "test_file.txt" + file.touch() + return file + + +@pytest.fixture +def symlink_path(tmp_path: Path, file_path: Path) -> Path: + symlink = tmp_path / "test_symlink" + symlink.symlink_to(file_path) + return symlink + + +@pytest.fixture +def non_existent_path(tmp_path: Path) -> Path: + non_existent = tmp_path / "non_existent_file.txt" + return non_existent + + +@pytest.mark.parametrize( + "path_fixture, expected_scope", + [ + (None, Scope.DATASET), # Passing None => `_get_scope(None)` => DATASET + ("file_path", Scope.FILE), # File => FILE + ("symlink_path", Scope.FILE), # Symlink => FILE + ("folder_path", Scope.FOLDER), # Folder => FOLDER + ("non_existent_path", Scope.DATASET), # Non-existent path => DATASET + ], +) +def test_get_scope(path_fixture, expected_scope, request): + """ + Test the `_get_scope()` function + """ + if path_fixture is None: + issue_path = None + else: + issue_path = request.getfixturevalue(path_fixture) + + result = _get_scope(issue_path) + assert result == expected_scope + + +@pytest.mark.parametrize( + "issue_code, issue_sub_code, issue_issue_message, code_messages, expected_result", + [ + ("code1", None, None, {}, None), + ("code1", None, None, {"code1": "message1"}, "message1"), + ( + "code1", + "sub code 1", + None, + {"code1": "message1"}, + "message1\nsubCode: sub code 1", + ), + ( + "code1", + "sub code 1", + "issue msg 1", + {"code1": "message1"}, + "message1\nsubCode: sub code 1\nissueMessage: issue msg 1", + ), + ( + "code1", + None, + "issue msg 1", + {"code1": "message1"}, + "message1\nissueMessage: issue msg 1", + ), + ( + "code1", + "sub code 1", + "issue msg 1", + {"code2": "message2"}, + "subCode: sub code 1\nissueMessage: issue msg 1", + ), + ( + "code1", + None, + "issue msg 1", + {"code2": "message2"}, + "issueMessage: issue msg 1", + ), + ], +) +def test_get_msg( + issue_code: str, + issue_sub_code: Optional[str], + issue_issue_message: Optional[str], + code_messages: dict[str, str], + expected_result: Optional[str], +) -> None: + """ + Test the `_get_msg()` function + """ + issue = Issue( + code=issue_code, + subCode=issue_sub_code, + issueMessage=issue_issue_message, + ) + + result = _get_msg(issue, code_messages) + + assert result == expected_result + + +@pytest.mark.parametrize( + "issue_location, expected_tail", + [ + (None, None), + ("a/b/c", "a/b/c"), + ("/a/b/c", "a/b/c"), + ("///a/b/c", "a/b/c"), + ("/a/b/c.json", "a/b/c.json"), + ], +) +def test_get_path( + issue_location: Optional[str], expected_tail: Optional[str], tmp_path: Path +) -> None: + """ + Test the `_get_path()` function + + Parameters: + issue_location: str or None + The value of the `location` attribute of the `Issue` object. + expected_tail: str or None + The expected tail of the resulting path, the portion of the path that extends + after the dataset path. `None` means `None` is expected to be the result. + """ + ds_path = tmp_path + + issue = Issue(code="DUMMY", location=issue_location) + + result = _get_path(issue, ds_path) + + if expected_tail is not None: + assert result == ds_path.resolve().joinpath(expected_tail) + else: + assert result is None diff --git a/dandi/tests/test_upload.py b/dandi/tests/test_upload.py index 497310e48..2cda5f4ac 100644 --- a/dandi/tests/test_upload.py +++ b/dandi/tests/test_upload.py @@ -11,6 +11,8 @@ from pytest_mock import MockerFixture import zarr +from dandi.tests.test_bids_validator_deno.test_validator import mock_bids_validate + from .fixtures import SampleDandiset, sweep_embargo from .test_helpers import assert_dirtrees_eq from ..consts import ZARR_MIME_TYPE, EmbargoStatus, dandiset_metadata_file @@ -217,8 +219,16 @@ def test_upload_bids_validation_ignore( def test_upload_bids_metadata( - mocker: MockerFixture, bids_dandiset: SampleDandiset + bids_dandiset: SampleDandiset, monkeypatch: pytest.MonkeyPatch ) -> None: + """ + Test the uploading of metadata of a dataset at + https://github.com/bids-standard/bids-examples + """ + from dandi.files import bids + + monkeypatch.setattr(bids, "bids_validate", mock_bids_validate) + bids_dandiset.upload(existing=UploadExisting.FORCE) dandiset = bids_dandiset.dandiset # Automatically check all files, heuristic should remain very BIDS-stable @@ -231,7 +241,19 @@ def test_upload_bids_metadata( assert metadata.wasAttributedTo[0].identifier == "Sub1" -def test_upload_bids(mocker: MockerFixture, bids_dandiset: SampleDandiset) -> None: +def test_upload_bids( + mocker: MockerFixture, + bids_dandiset: SampleDandiset, + monkeypatch: pytest.MonkeyPatch, +) -> None: + """ + Test the uploading of a dataset at + https://github.com/bids-standard/bids-examples + """ + from dandi.files import bids + + monkeypatch.setattr(bids, "bids_validate", mock_bids_validate) + iter_upload_spy = mocker.spy(LocalFileAsset, "iter_upload") bids_dandiset.upload(existing=UploadExisting.FORCE) # Check whether upload was run @@ -301,7 +323,17 @@ def test_upload_zarr(new_dandiset: SampleDandiset) -> None: # identical to above, but different scenaior/fixture and path. TODO: avoid duplication -def test_upload_bids_zarr(bids_zarr_dandiset: SampleDandiset) -> None: +def test_upload_bids_zarr( + bids_zarr_dandiset: SampleDandiset, monkeypatch: pytest.MonkeyPatch +) -> None: + """ + Test the uploading of a dataset based on one of the datasets at + https://github.com/bids-standard/bids-examples + """ + from dandi.files import bids + + monkeypatch.setattr(bids, "bids_validate", mock_bids_validate) + bids_zarr_dandiset.upload() assets = list(bids_zarr_dandiset.dandiset.get_assets()) assert len(assets) > 10 # it is a bigish dataset diff --git a/dandi/tests/test_validate.py b/dandi/tests/test_validate.py index f852039b1..5b240eff8 100644 --- a/dandi/tests/test_validate.py +++ b/dandi/tests/test_validate.py @@ -1,9 +1,10 @@ import json from pathlib import Path +from typing import Any import pytest -from .fixtures import BIDS_ERROR_TESTDATA_SELECTION, BIDS_TESTDATA_SELECTION +from .fixtures import BIDS_TESTDATA_SELECTION from .. import __version__ from ..consts import dandiset_metadata_file from ..validate import validate @@ -61,11 +62,63 @@ def test_validate_just_dandiset_yaml(tmp_path: Path) -> None: @pytest.mark.parametrize("dataset", BIDS_TESTDATA_SELECTION) -def test_validate_bids(bids_examples: Path, tmp_path: Path, dataset: str) -> None: +def test_validate_bids( + bids_examples: Path, tmp_path: Path, dataset: str, monkeypatch: pytest.MonkeyPatch +) -> None: + """ + Test validating a selection of datasets at + https://github.com/bids-standard/bids-examples + """ + from dandi.files import bids + + def mock_bids_validate(*args: Any, **kwargs: Any) -> list[ValidationResult]: + """ + Mock `bids_validate` to validate the examples in + # https://github.com/bids-standard/bids-examples. These example datasets + contains empty NIFTI files + + Note + ----- + Unlike other mock function for `bids_validate`, this one doesn't + configure the validator to ignore the dandiset metadata file. Thus, + an error regarding the `dandiset.yaml` file is to be expected. + """ + from dandi.bids_validator_deno import bids_validate + + kwargs["config"] = { + "ignore": [ + # Raw Data Files in the examples are empty + {"code": "EMPTY_FILE"} + ] + } + kwargs["ignore_nifti_headers"] = True + return bids_validate(*args, **kwargs) + + monkeypatch.setattr(bids, "bids_validate", mock_bids_validate) + selected_dataset = bids_examples / dataset - validation_result = validate(selected_dataset) - for i in validation_result: - assert i.severity is None + validation_results = list(validate(selected_dataset)) + + validation_errs = [ + r + for r in validation_results + if r.severity is not None and r.severity >= Severity.ERROR + ] + + # Assert that there is one error + assert len(validation_errs) == 1 + + err = validation_errs[0] + + assert err.path is not None + assert err.dataset_path is not None + assert err.path.relative_to(err.dataset_path).as_posix() == dandiset_metadata_file + + assert err.message is not None + assert err.message.startswith( + f"The dandiset metadata file, `{dandiset_metadata_file}`, is not a part of " + f"BIDS specification." + ) def test_validate_bids_onefile(bids_error_examples: Path, tmp_path: Path) -> None: @@ -97,29 +150,44 @@ def test_validate_bids_onefile(bids_error_examples: Path, tmp_path: Path) -> Non assert relative_error_path in expected_errors[error_id.lstrip("BIDS.")]["scope"] -@pytest.mark.parametrize("dataset", BIDS_ERROR_TESTDATA_SELECTION) -def test_validate_bids_errors(bids_error_examples: Path, dataset: str) -> None: - # This only checks that the error we found is correct, not that we found - # all errors. ideally make a list and erode etc. - selected_dataset = bids_error_examples / dataset - validation_result = list(validate(selected_dataset)) - with (selected_dataset / ".ERRORS.json").open() as f: - expected_errors = json.load(f) +@pytest.mark.parametrize( + "ds_name, expected_err_location", + [ + ("invalid_asl003", "sub-Sub1/perf/sub-Sub1_headshape.jpg"), + ("invalid_pet001", "sub-01/ses-01/anat/sub-02_ses-01_T1w.json"), + ], +) +def test_validate_bids_errors( + ds_name: str, + expected_err_location: str, + bids_error_examples: Path, + monkeypatch: pytest.MonkeyPatch, +) -> None: + """ + Test validating a selection of datasets at + https://github.com/bids-standard/bids-error-examples + """ + from dandi.files import bids + from dandi.tests.test_bids_validator_deno.test_validator import mock_bids_validate - # We know that these datasets contain errors. - assert len(validation_result) > 0 + monkeypatch.setattr(bids, "bids_validate", mock_bids_validate) - # But are they the right errors? - for i in validation_result: - if i.id == "BIDS.MATCH": - continue - error_id = i.id - if i.path is not None: - assert i.dataset_path is not None - relative_error_path = i.path.relative_to(i.dataset_path).as_posix() - assert ( - relative_error_path - in expected_errors[error_id.lstrip("BIDS.")]["scope"] - ) - else: - assert i.id.lstrip("BIDS.") in expected_errors.keys() + ds_path = bids_error_examples / ds_name + + results = list(validate(ds_path)) + + # All results with severity `ERROR` or above + err_results = list( + r for r in results if r.severity is not None and r.severity >= Severity.ERROR + ) + + assert len(err_results) >= 1 # Assert there must be an error + + # Assert all the errors are from the expected location + # as documented in the `.ERRORS.json` of respective datasets + for r in err_results: + assert r.path is not None + assert r.dataset_path is not None + + err_location = r.path.relative_to(r.dataset_path).as_posix() + assert err_location == expected_err_location diff --git a/dandi/validate.py b/dandi/validate.py index 3b4dae26f..e67dfb383 100644 --- a/dandi/validate.py +++ b/dandi/validate.py @@ -186,6 +186,22 @@ def validate( ): r_id = id(r) if r_id not in df_result_ids: + # If the error is about the dandiset metadata file, modify + # the message in the validation to give the context of DANDI + if ( + r.path is not None + and r.dataset_path is not None + and r.path.relative_to(r.dataset_path).as_posix() + == dandiset_metadata_file + ): + r.message = ( + f"The dandiset metadata file, `{dandiset_metadata_file}`, " + f"is not a part of BIDS specification. Please include a " + f"`.bidsignore` file with specification to ignore the " + f"metadata file in your dataset. For more details, see " + f"https://github.com/bids-standard/bids-specification/" + f"issues/131#issuecomment-461060166." + ) df_results.append(r) df_result_ids.add(r_id) yield r diff --git a/dandi/validate_types.py b/dandi/validate_types.py index dad8d5553..2abe01780 100644 --- a/dandi/validate_types.py +++ b/dandi/validate_types.py @@ -75,6 +75,12 @@ class Origin(BaseModel): standard_version: str | None = None """Version of the standard""" + standard_schema_version: str | None = None + """ + Version of the schema used in defining or implementing the standard, + such as BIDS schema version in BIDS + """ + # Some commonly used `Origin` instances ORIGIN_VALIDATION_DANDI = Origin( diff --git a/setup.cfg b/setup.cfg index b799d2952..12a0c8ba0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -31,6 +31,7 @@ project_urls = python_requires = >=3.9 install_requires = bidsschematools ~= 1.0 + bids-validator-deno # 8.2.0: https://github.com/pallets/click/issues/2911 click >= 7.1, !=8.2.0 click-didyoumean