Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .devcontainer/S-CORE/requirements.in
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,3 @@ pip-tools==7.5.2
colorama>=0.4
exceptiongroup>=1
tomli>=1a

27 changes: 14 additions & 13 deletions .devcontainer/S-CORE/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -114,15 +114,15 @@ click==8.3.1 \
colorama==0.4.6 \
--hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \
--hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6
# via -r .devcontainer/S-CORE/requirements.in
# via -r requirements.in
docutils==0.21.2 \
--hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \
--hash=sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2
# via sphinx
exceptiongroup==1.3.1 \
--hash=sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219 \
--hash=sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598
# via -r .devcontainer/S-CORE/requirements.in
# via -r requirements.in
idna==3.10 \
--hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \
--hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3
Expand Down Expand Up @@ -217,10 +217,11 @@ packaging==25.0 \
# build
# pytest
# sphinx
# wheel
pip-tools==7.5.2 \
--hash=sha256:2d64d72da6a044da1110257d333960563d7a4743637e8617dd2610ae7b82d60f \
--hash=sha256:2fe16db727bbe5bf28765aeb581e792e61be51fc275545ef6725374ad720a1ce
# via -r .devcontainer/S-CORE/requirements.in
# via -r requirements.in
pluggy==1.6.0 \
--hash=sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3 \
--hash=sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746
Expand All @@ -240,7 +241,7 @@ pyproject-hooks==1.2.0 \
pytest==8.4.1 \
--hash=sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7 \
--hash=sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c
# via -r .devcontainer/S-CORE/requirements.in
# via -r requirements.in
pyyaml==6.0.3 \
--hash=sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c \
--hash=sha256:0150219816b6a1fa26fb4699fb7daa9caf09eb1999f3b70fb6e786805e80375a \
Expand Down Expand Up @@ -315,7 +316,7 @@ pyyaml==6.0.3 \
--hash=sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6 \
--hash=sha256:fa160448684b4e94d80416c0fa4aac48967a969efe22931448d853ada8baf926 \
--hash=sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0
# via -r .devcontainer/S-CORE/requirements.in
# via -r requirements.in
referencing==0.36.2 \
--hash=sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa \
--hash=sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0
Expand Down Expand Up @@ -504,7 +505,7 @@ sphinx==8.2.3 \
--hash=sha256:398ad29dee7f63a75888314e9424d40f52ce5a6a87ae88e7071e80af296ec348 \
--hash=sha256:4405915165f13521d875a8c29c8970800a0141c14cc5416a38feca4ea5d9b9c3
# via
# -r .devcontainer/S-CORE/requirements.in
# -r requirements.in
# sphinx-data-viewer
# sphinx-design
# sphinx-needs
Expand All @@ -517,11 +518,11 @@ sphinx-data-viewer==0.1.5 \
sphinx-design==0.6.1 \
--hash=sha256:b11f37db1a802a183d61b159d9a202314d4d2fe29c163437001324fe2f19549c \
--hash=sha256:b44eea3719386d04d765c1a8257caca2b3e6f8421d7b3a5e742c0fd45f84e632
# via -r .devcontainer/S-CORE/requirements.in
# via -r requirements.in
sphinx-needs==5.1.0 \
--hash=sha256:23a0ca1dfe733a0a58e884b59ce53a8b63a530f0ac87ae5ab0d40f05f853fbe7 \
--hash=sha256:7adf3763478e91171146918d8af4a22aa0fc062a73856f1ebeb6822a62cbe215
# via -r .devcontainer/S-CORE/requirements.in
# via -r requirements.in
sphinxcontrib-applehelp==2.0.0 \
--hash=sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1 \
--hash=sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5
Expand All @@ -544,7 +545,7 @@ sphinxcontrib-jsmath==1.0.1 \
# via sphinx
sphinxcontrib-plantuml==0.31 \
--hash=sha256:fd74752f8ea070e641c3f8a402fccfa1d4a4056e0967b56033d2a76282d9f956
# via -r .devcontainer/S-CORE/requirements.in
# via -r requirements.in
sphinxcontrib-qthelp==2.0.0 \
--hash=sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab \
--hash=sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb
Expand Down Expand Up @@ -601,7 +602,7 @@ tomli==2.4.0 \
--hash=sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132 \
--hash=sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa \
--hash=sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087
# via -r .devcontainer/S-CORE/requirements.in
# via -r requirements.in
typing-extensions==4.15.0 \
--hash=sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466 \
--hash=sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548
Expand All @@ -612,9 +613,9 @@ urllib3==2.6.3 \
--hash=sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed \
--hash=sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4
# via requests
wheel==0.45.1 \
--hash=sha256:661e1abd9198507b1409a20c02106d9670b2576e916d58f520316666abca6729 \
--hash=sha256:708e7481cc80179af0e556bbf0cc00b8444c7321e2700b8d8580231d13017248
wheel==0.46.2 \
--hash=sha256:33ae60725d69eaa249bc1982e739943c23b34b58d51f1cb6253453773aca6e65 \
--hash=sha256:3d79e48fde9847618a5a181f3cc35764c349c752e2fe911e65fa17faab9809b0
# via pip-tools

# WARNING: The following packages were not pinned, but pip requires them to be
Expand Down
2 changes: 1 addition & 1 deletion .dotstop.dot
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# This file is automatically generated by dotstop and should not be edited manually.
# Generated using trustable 2025.10.22.
# Generated using trustable 2025.9.16.

digraph G {
"TT-CHANGES" [sha="9e0557f5781fb6abefc197529df3f14dfa28cec2d7470629bdd11bcf00e3a18e"];
Expand Down
1 change: 1 addition & 0 deletions MODULE.bazel
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
module(
name = "nlohmann_json",
compatibility_level = 1,
version= "3.11.3",
)

# bazel_dep(name = "rules_cc", version = "0.0.17")
Expand Down
6 changes: 3 additions & 3 deletions TSF/trustable/assertions/TA-BEHAVIOURS_CONTEXT.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,11 @@ and that the resulting system and tests are validated by appropriate strategies.
- How confident can we be that this list covers all critical requirements?
- **Answer**: We are very confident that this list covers all critical requirements.
- How comprehensive is the list of tests?
- **Answer**: Currently, the branch coverage is 93.865% and the line coverage is 99.186%, cf. JLS-27. Therefore, we deem the list of tests to be very comprehensive.
- **Answer**: Currently, branch coverage is 93.865% and line coverage is 99.186% (JLS-27). Deviation from 100% branch coverage is expected for this kind of template-heavy library due to known factors such as tooling artifacts, defensive/unreachable paths (e.g., LCOV-excluded branches), and configuration-dependent code paths. Coverage is continuously monitored in CI and reviewed in pull requests and maintainers have documented concrete coverage-artifact cases (e.g., https://github.com/nlohmann/json/pull/4595). In addition to raw percentages, we use statement-level traceability from Expectations JLEX-01/02 to concrete CI tests (JLS-74). Therefore, we deem the test set very comprehensive for the intended scope.
- Is every Expectation covered by at least one implemented test?
- **Answer**: Yes, both of the Expectations are covered by at least one implemented test.
- **Answer**: Yes, both Expectations (JLEX-01, JLEX-02) are covered by a broad set of implemented tests across their supporting statements (WFJ-, PJD-, NJF-, NPF-, TIJ-). Evidence is assessed through Trustable traceability and SME reviews (JLS-74), with direct CI-test links provided where available. In addition, completeness is assessed against the upstream basic_json API (JLS-72) with evidence of tests of a comprehensive set of arguments (JLS-31).
- Are there any Expectations where we believe more coverage would help?
- **Answer**: No, the coverage is already on a high level and no further gains are expected by further increasing the coverage.
- **Answer**: No additional top-level Expectation is currently missing. Extra tests would mainly increase confidence for already identified boundary cases (especially around WFJ-06), rather than reveal a gap in the Expectation set.
- How do dependencies affect Expectations, and are their properties verifiable?
- **Answer**: The nlohmann/json library does not have any external dependencies apart from the testing pipeline, so there are no dependencies that could possibly affect the Expectations.
- Are input analysis findings from components, tools, and data considered in relation to Expectations?
Expand Down
6 changes: 5 additions & 1 deletion TSF/trustable/assertions/TA-INDICATORS_CONTEXT.md
Original file line number Diff line number Diff line change
Expand Up @@ -103,4 +103,8 @@ monitoring mechanisms have been implemented to collect the required data.
- **Answer**: Yes, the CI data from the AWIs is useful to prevent regressions in the tested behaviour of the library and possible issues introduced due to a large number of open PRs from entering protected branches.
- Are indicators from code, component, tool, or data inspections taken into
consideration?
- **Answer**: Yes, all types of indicator are taken into consideration.
- **Answer**: All indicator types are considered in scope and in the analysis rationale, with different level of implementation in this repository.
For code inspections, we use CI-derived code evidence (JLS-54) and related CI analysis evidence.
For component inspections, no separate component-inspection AWI is implemented in this repository context because the library has no external components (JLS-34), and component-related runtime/system monitoring is therefore expected at integration level (AOU-19).
For tool inspections, tools are explicitly assessed (JLS-50), and a monitored CI/process indicator is implemented via the PR-count gate (JLS-55).
For data inspections, indicator data is collected and traceable for the implemented CI AWIs (JLS-54 and JLS-55), while continuous production/runtime monitoring is not implemented in this repository and is delegated to the integrator (AOU-09 and AOU-19).
6 changes: 3 additions & 3 deletions TSF/trustable/assertions/TA-MISBEHAVIOURS_CONTEXT.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ established and reusable solutions.
**Suggested evidence**

- List of identified Misbehaviours
- **Answer**: See JLS-11.
- **Answer**: A list of Misbehaviours was identified through STPA risk analysis (risk_analysis.md). Upstream bug tracking (JLS-11 and nlohmann_misbehaviours_comments.md) is used as complementary empirical evidence to confirm coverage and update the list where needed.
- List of Expectations for mitigations addressing identified Misbehaviours
- **Answer**: Mitigation expectations are expressed implicitly through (a) documented Quality assurance (https://json.nlohmann.me/community/quality_assurance) requirements and (b) concrete mitigation mechanisms captured by existing Statements: JLS-02 (fuzzing), JLS-31 (static analysis), JLS-25 (review/security policy), JLS-24 (defined failure mode via exceptions), and WFJ-06 (input validation via accept()).
- Risk analysis
Expand Down Expand Up @@ -127,9 +127,9 @@ considered against the list of Expectations.
- Can we identify some new misbehaviours, right now?
- **Answer**: No, currently no new misbehaviors can be identified.
- Is every misbehaviour represented by at least one fault induction test?
- **Answer**: The expected behaviour of nlohmann/json is described by JLS-24 and its substatements in the trustable graph. For a random subset of 10 of these substatements (TIJ-01.1, TIJ-01.2, TIJ-05.1, TIJ-05.3, NPF-01.2, NPF-01.3, NPF-07.2, TIJ-02.4, TIJ-02.5, TIJ-02.2) we checked whether possible misbehaviours (like wrong inputs) are tested using fault induction tests. For all of these 10 substatements, at least one fault induction test is performed. We therefore conclude that most misbehaviours are represented by at least one fault induction test.
- **Answer**: The expected behaviour of nlohmann/json is described by JLS-24 and its substatements in the trustable graph. For every substatement at least one fault induction test is performed. Thus, every misbehaviour is represented by at least one fault induction test.
- Are fault inductions used to demonstrate that tests which usually pass can and do fail appropriately?
- **Answer**: Yes. The project uses several forms of fault induction (malformed JSON, invalid API usage, simulated allocation failures, and fuzzing). Dedicated tests assert that these induced faults cause the library to fail in a well‑defined, expected way (e.g. by throwing specific exceptions). CI then confirms that these failure‑expecting tests keep behaving as specified. See JLS-76.
- **Answer**: The project uses several forms of fault induction on the library-behaviour level (malformed JSON, invalid API usage, simulated allocation failures, and fuzzing). Dedicated tests assert that these induced faults cause the library to fail in a well‑defined, expected way (e.g. by throwing specific exceptions). While the test framework itself is not tested by fault induction, there are some statements where positive and negative tests actively show expected failure behaviour of the library under induced faulty input, that pass with well-formed data (e.g., TIJ-04.1, evidence: nst's JSONTestSuite). CI then confirms that these "failure‑expecting" tests keep behaving as specified (JLS-76.). Due to the very comprehensive general fault induction testing employed, we see the instances of explicitly failing tests that usually pass rather as representative evidence than requiring a one-to-one matching to every graph leaf.
- Are all the fault induction results actually collected?
- **Answer**: Partially. For Unit / regression tests: their results are only captured as normal test pass/fail status and CI logs; there is no separate, persistent database of all induced faults and outcomes in the repository. For Fuzz tests (OSS‑Fuzz): the fuzzing infrastructure stores crashing inputs, logs, and statistics on the OSS‑Fuzz side, not in the nlohmann/json repo itself. See JLS-76 for further information.
- Are the results evaluated?
Expand Down
Loading