From 2cf5283c961778e828c396d2e22fdaad62c0d1d0 Mon Sep 17 00:00:00 2001 From: jithinraj <7850727+jithinraj@users.noreply.github.com> Date: Sun, 8 Mar 2026 09:56:14 +0530 Subject: [PATCH 1/3] feat: stable release gates and integration evidence Wire all 4 DD-90 stable-only gates in run-gates.sh with real implementations (perf-benchmarks, ssrf-suite, fuzz-suite, integration-evidence). Add integration evidence infrastructure: - Structured JSON source with immutable pointers (commit SHAs, test file paths, spec refs) - JSON Schema (2020-12) validated via Ajv - Node validator with schema, pointer, and parity checks - Markdown generated from JSON with drift detection - 18 validator tests Add release verification and security posture docs. Add public-artifact linter and PR metadata CI workflow. Add test:property canonical script (11 files). --- .github/workflows/pr-metadata-lint.yml | 54 +++ docs/VERIFY-RELEASE.md | 105 +++++ docs/adoption/confirmations.md | 30 ++ docs/adoption/integration-evidence.json | 56 +++ docs/adoption/integration-evidence.md | 46 ++ .../adoption/integration-evidence.schema.json | 114 +++++ docs/maintainers/SECURITY-POSTURE.md | 76 ++++ docs/specs/TEST_VECTORS.md | 2 +- package.json | 3 +- packages/adapters/eat/README.md | 30 ++ scripts/check-planning-leak.sh | 77 ---- scripts/check-public-artifacts.mjs | 163 ++++++++ scripts/guard.sh | 2 +- scripts/release/run-gates.sh | 21 +- .../release/validate-adoption-evidence.mjs | 393 ++++++++++++++++++ .../adoption-evidence-validator.test.ts | 226 ++++++++++ vitest.config.ts | 1 + 17 files changed, 1311 insertions(+), 88 deletions(-) create mode 100644 .github/workflows/pr-metadata-lint.yml create mode 100644 docs/VERIFY-RELEASE.md create mode 100644 docs/adoption/confirmations.md create mode 100644 docs/adoption/integration-evidence.json create mode 100644 docs/adoption/integration-evidence.md create mode 100644 docs/adoption/integration-evidence.schema.json create mode 100644 docs/maintainers/SECURITY-POSTURE.md create mode 100644 packages/adapters/eat/README.md delete mode 100755 scripts/check-planning-leak.sh create mode 100644 scripts/check-public-artifacts.mjs create mode 100644 scripts/release/validate-adoption-evidence.mjs create mode 100644 tests/release/adoption-evidence-validator.test.ts diff --git a/.github/workflows/pr-metadata-lint.yml b/.github/workflows/pr-metadata-lint.yml new file mode 100644 index 000000000..00711a358 --- /dev/null +++ b/.github/workflows/pr-metadata-lint.yml @@ -0,0 +1,54 @@ +name: PR Metadata Lint + +on: + pull_request: + types: [opened, edited, synchronize] + +jobs: + lint-metadata: + name: Check PR title and body + runs-on: ubuntu-latest + timeout-minutes: 2 + + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Lint PR metadata + env: + PR_TITLE: ${{ github.event.pull_request.title }} + PR_BODY: ${{ github.event.pull_request.body }} + run: | + FAILED=0 + + # Write PR content to temp files for safe grep + echo "$PR_TITLE" > /tmp/pr-title.txt + echo "$PR_BODY" > /tmp/pr-body.txt + + # Check for reference path leaks + if grep -iE 'reference/.*_LOCAL\.' /tmp/pr-title.txt /tmp/pr-body.txt 2>/dev/null; then + echo "::error::PR metadata contains reference to local-only files" + FAILED=1 + fi + + # Check for x403 typo + if grep -q 'x403' /tmp/pr-title.txt /tmp/pr-body.txt 2>/dev/null; then + echo "::error::PR metadata contains x403 typo (should be x402)" + FAILED=1 + fi + + # Check for internal planning markers + if grep -iE '(NEXT_STEPS|ROADMAP_LOCAL|STRATEGY_LOCAL|SCOPE_LEDGER|MASTER_PLAN_LOCAL)' /tmp/pr-title.txt /tmp/pr-body.txt 2>/dev/null; then + echo "::error::PR metadata contains internal planning markers" + FAILED=1 + fi + + rm -f /tmp/pr-title.txt /tmp/pr-body.txt + + if [ "$FAILED" -eq 1 ]; then + echo "" + echo "PR metadata lint failed. Remove non-technical language from PR title/body." + exit 1 + fi + + echo "PR metadata lint: OK" diff --git a/docs/VERIFY-RELEASE.md b/docs/VERIFY-RELEASE.md new file mode 100644 index 000000000..b53bc766b --- /dev/null +++ b/docs/VERIFY-RELEASE.md @@ -0,0 +1,105 @@ +# Verifying a PEAC Release + +This document describes independent methods for verifying the integrity and provenance of a PEAC Protocol release. Each method addresses a different layer of the supply chain. + +## 1. Gate Report (Authoritative Evidence) + +Each release includes a machine-generated gate report that records every quality gate result. For stable releases, this includes all DD-90 gates. + +```bash +# Generate the authoritative gate report (writes JSON artifact) +bash scripts/release/run-gates.sh --target stable --write-release-artifacts + +# Dry-run only (no artifacts, no release claim) +bash scripts/release/run-gates.sh --target stable +``` + +The `--write-release-artifacts` flag is the authoritative gate path. Without it, the script is a local dry-run that never claims "ready to tag." The JSON artifact at `docs/releases/-gate-report.json` includes: timestamp, commit SHA, Node version, runner metadata, publish-manifest hash, conformance-fixtures hash, and individual gate pass/fail status with duration. + +## 2. npm Provenance + +PEAC packages published via GitHub Actions OIDC include npm provenance attestations. This cryptographically links each package version to the specific workflow run that produced it. + +**Current state (v0.12.0-preview.2):** 9 of 28 publishable packages are configured for OIDC trusted publishing. The remaining 19 are pending migration via `npm trust` CLI (tracked in PR 6a). All packages published through the CI workflow receive `--provenance` attestations regardless of OIDC status. + +```bash +# Verify provenance for published packages +# Use a temp project to avoid workspace interference: +mkdir /tmp/peac-verify && cd /tmp/peac-verify +npm init -y +npm install @peac/protocol@next +npm audit signatures +cd - && rm -rf /tmp/peac-verify +``` + +The provenance attestation confirms: + +- The package was built from the declared source repository +- The build ran in a GitHub Actions environment +- No human had direct access to the npm publish token + +**Note:** `npm audit signatures` must be run in a project that has installed the packages. Running it with a bare package name does not work. + +## 3. Conformance Matrix + +The conformance matrix traces every normative requirement (BCP 14 statements) to test coverage. + +```bash +# Verify conformance tooling +node scripts/conformance/validate-schemas.mjs +node scripts/conformance/verify-registry-drift.mjs +node scripts/conformance/generate-inventory.mjs --check +``` + +Artifacts: + +- `specs/conformance/requirement-ids.json`: machine-readable requirement registry (146 IDs) +- `docs/specs/CONFORMANCE-MATRIX.md`: generated coverage matrix +- `specs/conformance/fixtures/inventory.json`: fixture inventory with requirement mappings + +## 4. API Surface Snapshots + +Public API exports are snapshot-locked. Any unreviewed change to the public API surface causes the gate to fail. + +```bash +# Verify API surface matches committed snapshots +bash scripts/release/api-surface-lock.sh +``` + +Snapshots are stored in `scripts/release/api-snapshots/` and cover the primary packages (`kernel`, `schema`, `crypto`, `protocol`, `control`, `mcp-server`, `middleware-core`, `middleware-express`, `sdk-js`). + +## 5. Pack-Install Smoke + +Representative packages are packed into tarballs, installed in isolated temp directories, and verified for ESM import, CJS require, TypeScript types resolution, and CLI bin execution. + +```bash +# Run the pack-install smoke test +bash scripts/release/pack-install-smoke.sh +``` + +This catches packaging errors that unit tests cannot detect: missing files in the `files` array, broken exports maps, missing bin entries, and CJS/ESM resolution failures. + +## 6. Attestations and SBOM (Pending) + +The following verification methods are planned but not yet implemented: + +- **Sigstore attestations:** Per-package Sigstore attestation bundles (pending PR 6a: OIDC migration) +- **SBOM generation:** CycloneDX or SPDX SBOM for each published package (pending tooling evaluation) +- **Checksum manifest:** SHA-256 checksums for all published tarballs (pending release automation) + +These will be added as part of the publisher-trust work tracked in the stable release plan. + +## Verification Checklist + +For a stable release, all of these should pass: + +```bash +# Full authoritative gate suite +bash scripts/release/run-gates.sh --target stable --write-release-artifacts + +# Individual checks +bash scripts/release/api-surface-lock.sh +bash scripts/release/pack-install-smoke.sh +node scripts/conformance/verify-registry-drift.mjs +node scripts/conformance/generate-inventory.mjs --check +``` diff --git a/docs/adoption/confirmations.md b/docs/adoption/confirmations.md new file mode 100644 index 000000000..3a6f9e8a0 --- /dev/null +++ b/docs/adoption/confirmations.md @@ -0,0 +1,30 @@ +# External Integration Confirmations + +> **Purpose:** DD-90 stable gate requires at least one external confirmation meeting the quality bar below. +> **Gate:** `run-gates.sh --target stable` checks this file has >= 1 valid entry. + +## Quality Bar (6 Required Fields) + +Each confirmation entry MUST include all 6 fields: + +1. **Team/Project:** Name of the team or project +2. **Integration Surface:** Which PEAC integration point is used (e.g., MCP receipts, A2A metadata carrier, HTTP header) +3. **Integration Impact:** One sentence describing what this integration enables or replaces +4. **Date:** ISO 8601 date of confirmation +5. **Public Link:** URL to public evidence (PR, blog post, repo, or issue); "private" if under NDA (requires maintainer attestation) +6. **Contact Role:** Role of the confirming contact (e.g., "Engineering Lead", "CTO"); no PII + +## Confirmations + + + + +_No external confirmations recorded._ diff --git a/docs/adoption/integration-evidence.json b/docs/adoption/integration-evidence.json new file mode 100644 index 000000000..26447eee9 --- /dev/null +++ b/docs/adoption/integration-evidence.json @@ -0,0 +1,56 @@ +{ + "$schema": "./integration-evidence.schema.json", + "$comment": "Canonical source for DD-90 integration evidence. docs/adoption/integration-evidence.md is generated from this file.", + "version": "1.0.0", + "integrations": [ + { + "ecosystem": "MCP", + "full_name": "Model Context Protocol", + "pr": 472, + "pr_commit": "9e5c5dea", + "dd90_gate": true, + "surface": "peac_issue tool produces Wire 0.2 receipts; peac_verify tool verifies them", + "evidence": "Round-trip issuance and verification via MCP tool calls", + "wire_version": "0.2", + "test_files": [ + "packages/mcp-server/tests/handlers/issue.test.ts", + "packages/mcp-server/tests/handlers/verify.test.ts" + ], + "spec_refs": ["docs/specs/EVIDENCE-CARRIER-CONTRACT.md"] + }, + { + "ecosystem": "A2A", + "full_name": "Agent-to-Agent Protocol", + "pr": 473, + "pr_commit": "56fd7047", + "dd90_gate": true, + "surface": "Wire 0.2 receipts carried in A2A metadata[extensionURI] per Evidence Carrier Contract", + "evidence": "Round-trip through A2A metadata carrier (issue, embed, extract, verify)", + "wire_version": "0.2", + "test_files": ["tests/integration/a2a/wire02-roundtrip.test.ts"], + "spec_refs": ["docs/specs/EVIDENCE-CARRIER-CONTRACT.md"] + }, + { + "ecosystem": "EAT", + "full_name": "Entity Attestation Token", + "pr": 474, + "pr_commit": "f20e0f61", + "dd90_gate": false, + "dd_reference": "DD-154", + "surface": "COSE_Sign1 (RFC 9052) identity adapter; maps EAT claims to PEAC actor binding", + "evidence": "Passport-style identity input; does not produce Wire 0.2 receipts in the EAT ecosystem", + "wire_version": null, + "rationale": "EAT is an identity input surface. It enriches PEAC receipts with external attestations but does not constitute a distinct ecosystem producing Wire 0.2 evidence.", + "test_files": [ + "packages/adapters/eat/tests/passport.test.ts", + "packages/adapters/eat/tests/claim-mapper.test.ts" + ], + "spec_refs": ["docs/specs/EVIDENCE-CARRIER-CONTRACT.md"] + } + ], + "classification_rules": [ + "An integration counts toward DD-90 if it produces or consumes Wire 0.2 receipts (interaction-record+jwt) in a distinct protocol ecosystem.", + "Identity adapters, claim mappers, and format converters that feed into PEAC but do not themselves produce receipts are classified under their own DDs.", + "Do not inflate the ecosystem count by reclassifying adapters as integrations." + ] +} diff --git a/docs/adoption/integration-evidence.md b/docs/adoption/integration-evidence.md new file mode 100644 index 000000000..766685cf3 --- /dev/null +++ b/docs/adoption/integration-evidence.md @@ -0,0 +1,46 @@ +# Integration Evidence Catalog + +> **Purpose:** Documents which ecosystem integrations count toward DD-90 gates and which do not. +> **Rule:** Only integrations that produce or consume Wire 0.2 receipts in a distinct ecosystem count. +> **Source:** Generated from `docs/adoption/integration-evidence.json`. Do not edit manually. + +## DD-90 Ecosystem Integrations (Count: 2) + +### MCP (Model Context Protocol) + +- **PR:** #472 (commit `9e5c5dea`) +- **Surface:** peac_issue tool produces Wire 0.2 receipts; peac_verify tool verifies them +- **Evidence:** Round-trip issuance and verification via MCP tool calls +- **Wire version:** Wire 0.2 +- **DD-90 gate:** YES (distinct ecosystem with Wire 0.2 production) +- **Test files:** `packages/mcp-server/tests/handlers/issue.test.ts`, `packages/mcp-server/tests/handlers/verify.test.ts` +- **Spec refs:** `docs/specs/EVIDENCE-CARRIER-CONTRACT.md` + +### A2A (Agent-to-Agent Protocol) + +- **PR:** #473 (commit `56fd7047`) +- **Surface:** Wire 0.2 receipts carried in A2A metadata[extensionURI] per Evidence Carrier Contract +- **Evidence:** Round-trip through A2A metadata carrier (issue, embed, extract, verify) +- **Wire version:** Wire 0.2 +- **DD-90 gate:** YES (distinct ecosystem with Wire 0.2 production) +- **Test files:** `tests/integration/a2a/wire02-roundtrip.test.ts` +- **Spec refs:** `docs/specs/EVIDENCE-CARRIER-CONTRACT.md` + +## Non-DD-90 Integrations (Correctly Classified) + +### EAT (Entity Attestation Token) + +- **PR:** #474 (commit `f20e0f61`) +- **Surface:** COSE_Sign1 (RFC 9052) identity adapter; maps EAT claims to PEAC actor binding +- **Evidence:** Passport-style identity input; does not produce Wire 0.2 receipts in the EAT ecosystem +- **Wire version:** N/A (identity input, not receipt output) +- **DD-90 gate:** NO (DD-154) +- **Rationale:** EAT is an identity input surface. It enriches PEAC receipts with external attestations but does not constitute a distinct ecosystem producing Wire 0.2 evidence. +- **Test files:** `packages/adapters/eat/tests/passport.test.ts`, `packages/adapters/eat/tests/claim-mapper.test.ts` +- **Spec refs:** `docs/specs/EVIDENCE-CARRIER-CONTRACT.md` + +## Classification Rules + +1. An integration counts toward DD-90 if it produces or consumes Wire 0.2 receipts (interaction-record+jwt) in a distinct protocol ecosystem. +2. Identity adapters, claim mappers, and format converters that feed into PEAC but do not themselves produce receipts are classified under their own DDs. +3. Do not inflate the ecosystem count by reclassifying adapters as integrations. diff --git a/docs/adoption/integration-evidence.schema.json b/docs/adoption/integration-evidence.schema.json new file mode 100644 index 000000000..88751a1db --- /dev/null +++ b/docs/adoption/integration-evidence.schema.json @@ -0,0 +1,114 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "integration-evidence.schema.json", + "title": "PEAC Integration Evidence", + "description": "Schema for docs/adoption/integration-evidence.json. Validated in CI by the adoption-evidence gate.", + "type": "object", + "required": ["version", "integrations", "classification_rules"], + "additionalProperties": false, + "properties": { + "$schema": { + "type": "string" + }, + "$comment": { + "type": "string" + }, + "version": { + "type": "string", + "pattern": "^\\d+\\.\\d+\\.\\d+$" + }, + "integrations": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/$defs/Integration" + } + }, + "classification_rules": { + "type": "array", + "minItems": 1, + "items": { + "type": "string", + "minLength": 1 + } + } + }, + "$defs": { + "Integration": { + "type": "object", + "required": [ + "ecosystem", + "full_name", + "pr", + "pr_commit", + "dd90_gate", + "surface", + "evidence", + "wire_version", + "test_files", + "spec_refs" + ], + "additionalProperties": false, + "properties": { + "ecosystem": { + "type": "string", + "minLength": 1 + }, + "full_name": { + "type": "string", + "minLength": 1 + }, + "pr": { + "type": "integer", + "minimum": 1 + }, + "pr_commit": { + "type": "string", + "pattern": "^[0-9a-f]{8,40}$", + "description": "Short or full commit SHA (hex, 8-40 chars)" + }, + "dd90_gate": { + "type": "boolean" + }, + "dd_reference": { + "type": "string", + "pattern": "^DD-\\d+$", + "description": "For non-DD-90 integrations, the DD that tracks this work" + }, + "surface": { + "type": "string", + "minLength": 1 + }, + "evidence": { + "type": "string", + "minLength": 1 + }, + "wire_version": { + "type": ["string", "null"], + "description": "Wire format version (e.g. '0.2') or null for non-receipt integrations" + }, + "rationale": { + "type": "string", + "minLength": 1, + "description": "Required for non-DD-90 integrations to explain classification" + }, + "test_files": { + "type": "array", + "minItems": 1, + "items": { + "type": "string", + "minLength": 1 + } + }, + "spec_refs": { + "type": "array", + "minItems": 1, + "items": { + "type": "string", + "minLength": 1 + } + } + } + } + } +} diff --git a/docs/maintainers/SECURITY-POSTURE.md b/docs/maintainers/SECURITY-POSTURE.md new file mode 100644 index 000000000..960fef4b5 --- /dev/null +++ b/docs/maintainers/SECURITY-POSTURE.md @@ -0,0 +1,76 @@ +# Maintainer Security Posture + +This document records the security controls applied to the PEAC Protocol npm organization and GitHub repository. It distinguishes between controls that are verified today and controls that are in transition. + +## npm Organization + +### Authentication + +- **2FA:** Required for all org members (enforced at org level) +- **Session policy:** npm sessions expire after 30 days + +### Package Publishing + +- **Publish workflow:** `.github/workflows/publish.yml` is the sole publish path; no manual `npm publish` +- **Provenance attestations:** Generated automatically on every publish via `--provenance` flag +- **Publish manifest:** `scripts/publish-manifest.json` is the single source of truth for publishable packages (28 packages) + +### OIDC Trusted Publishing (In Transition) + +OIDC trusted publishing eliminates long-lived npm tokens by using GitHub Actions OIDC for authentication. + +| State | Count | Details | +| --------------------- | -------- | ------------------------------------------------------- | +| **Configured** | 9 of 28 | Using OIDC trusted publishing today | +| **Pending migration** | 19 of 28 | Will be migrated via `npm trust` CLI (tracked in PR 6a) | + +**Target state:** All 28 publishable packages use OIDC trusted publishing with no long-lived npm tokens. + +**Migration command:** + +```bash +bash scripts/setup-trusted-publishing.sh +``` + +This requires npm CLI >= 11.5.1, an active npm session with 2FA, and org admin or package owner role. + +## GitHub Repository + +### Branch Protection + +- **main branch:** Protected. Requires PR review, status checks, and linear history +- **Force push:** Disabled on main +- **Admin bypass:** Disabled + +### CI Security + +- **CodeQL:** Security-extended analysis on every PR and weekly schedule +- **Dependency review:** `.github/workflows/dependency-review.yml` blocks PRs with critical vulnerabilities +- **SHA-pinned actions:** All GitHub Actions are pinned to full commit SHAs (not tags) +- **Minimal permissions:** Workflows use least-privilege `permissions` blocks + +### Secrets Management + +- **GitHub token:** Uses default `GITHUB_TOKEN` with minimal scope +- **No third-party secret services:** All secrets managed via GitHub native secrets +- **npm tokens:** 9 packages use OIDC (no token); 19 packages use scoped automation tokens (pending OIDC migration) + +## Verification + +Consumers can verify the current security posture: + +```bash +# Verify npm provenance (use temp project) +mkdir /tmp/peac-verify && cd /tmp/peac-verify +npm init -y && npm install @peac/protocol@next +npm audit signatures +cd - && rm -rf /tmp/peac-verify + +# Check OIDC migration status +node -p "const m=require('./scripts/publish-manifest.json'); console.log('Configured:', 28 - (m.pendingTrustedPublishing?.length || 0), 'Pending:', m.pendingTrustedPublishing?.length || 0)" + +# Run full authoritative gate suite +bash scripts/release/run-gates.sh --target stable --write-release-artifacts +``` + +See `docs/VERIFY-RELEASE.md` for the full verification guide. diff --git a/docs/specs/TEST_VECTORS.md b/docs/specs/TEST_VECTORS.md index 9f99aff4a..d1b07faf7 100644 --- a/docs/specs/TEST_VECTORS.md +++ b/docs/specs/TEST_VECTORS.md @@ -140,7 +140,7 @@ tests/vectors/ **Purpose**: - Shows HTTP 402 as enforcement method (not baked into core) -- Demonstrates x402 rail with Base/USDC (primary GTM path) +- Demonstrates x402 rail with Base/USDC (HTTP 402 payment flow) - Shows how control requirement applies to enforcement.method=="http-402" --- diff --git a/package.json b/package.json index 45d4b91aa..57bce8b78 100644 --- a/package.json +++ b/package.json @@ -66,7 +66,8 @@ "gate": "bash scripts/gate.sh", "gate:fast": "bash scripts/gate.sh --fast", "evidence-pack": "node scripts/build-submission-pack.mjs", - "release:gate:0.11.3": "bash scripts/release-gate-0.11.3.sh" + "release:gate:0.11.3": "bash scripts/release-gate-0.11.3.sh", + "test:property": "vitest run packages/schema/__tests__/json.property.test.ts packages/schema/__tests__/workflow.property.test.ts packages/schema/__tests__/constraints.property.test.ts packages/schema/__tests__/constraints.fuzz.test.ts packages/schema/__tests__/wire02-claims.property.test.ts packages/crypto/__tests__/jws.property.test.ts packages/protocol/__tests__/jti-collision.property.test.ts packages/protocol/__tests__/policy-binding.property.test.ts packages/protocol/__tests__/strictness.property.test.ts packages/protocol/__tests__/verify-local.property.test.ts packages/net/node/tests/jcs-property.test.ts" }, "devDependencies": { "@eslint/js": "^10.0.1", diff --git a/packages/adapters/eat/README.md b/packages/adapters/eat/README.md new file mode 100644 index 000000000..065f4a85b --- /dev/null +++ b/packages/adapters/eat/README.md @@ -0,0 +1,30 @@ +# @peac/adapter-eat + +EAT (Entity Attestation Token, RFC 9711) passport decoder and PEAC claim mapper. + +## Features + +- Decodes COSE_Sign1 tokens (RFC 9052) with Ed25519 signature verification +- Privacy-first claim mapping: SHA-256 hashes all values by default +- 64 KB size limit enforced before CBOR decode +- Only EdDSA (alg: -8) is supported + +## Usage + +```typescript +import { decodeEatPassport, mapEatClaims } from '@peac/adapter-eat'; + +const result = await decodeEatPassport(coseBytes, publicKey); +if (result.verified) { + const mapped = await mapEatClaims(result.claims); + // mapped.values: Map with sha256:hex hashed values +} +``` + +## Layer + +Layer 4 adapter. Depends on `@peac/kernel`, `@peac/schema`, `@peac/crypto`. + +## License + +Apache-2.0 diff --git a/scripts/check-planning-leak.sh b/scripts/check-planning-leak.sh deleted file mode 100755 index 8439733db..000000000 --- a/scripts/check-planning-leak.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash -# check-planning-leak.sh -# -# Verifies that planning/strategic content hasn't leaked into tracked files. -# Planning docs live in reference/ (gitignored) and must not appear in code, -# commits, or published documentation. -# -# Run: bash scripts/check-planning-leak.sh - -set -euo pipefail - -echo "=== Planning Leak Check ===" - -FAILED=0 - -# 1. Check for references to local-only reference files in tracked code -echo "Checking for reference/ path leaks in source..." -LEAKS=$(git grep -l 'reference/.*_LOCAL\.' -- '*.ts' '*.js' '*.json' '*.sh' '*.yml' '*.yaml' 2>/dev/null | grep -v 'CLAUDE.md' | grep -v 'scripts/check-planning-leak.sh' | grep -v '.gitignore' || true) -if [ -n "$LEAKS" ]; then - echo " FAIL: Found reference/*_LOCAL.* paths in tracked files:" - echo "$LEAKS" | sed 's/^/ /' - FAILED=1 -else - echo " OK: No reference path leaks" -fi - -# 2. Check for strategic/planning keywords in code and docs -echo "Checking for strategic content in tracked files..." -STRATEGIC_PATTERNS='(competitive advantage|market position|business strategy|revenue model|pricing strategy|monetization plan)' -STRATEGIC_HITS=$(git grep -liE "$STRATEGIC_PATTERNS" -- '*.ts' '*.js' '*.md' '*.json' 2>/dev/null | grep -v 'CLAUDE.md' | grep -v 'node_modules' | grep -v 'scripts/check-planning-leak.sh' || true) -if [ -n "$STRATEGIC_HITS" ]; then - echo " FAIL: Found strategic content in tracked files:" - echo "$STRATEGIC_HITS" | sed 's/^/ /' - FAILED=1 -else - echo " OK: No strategic content leaks" -fi - -# 3. Check for internal planning markers in source code -echo "Checking for internal planning markers..." -PLANNING_PATTERNS='(NEXT_STEPS|ROADMAP_LOCAL|STRATEGY_LOCAL|SCOPE_LEDGER|MASTER_PLAN_LOCAL)' -PLANNING_HITS=$(git grep -liE "$PLANNING_PATTERNS" -- '*.ts' '*.js' 2>/dev/null | grep -v 'node_modules' || true) -if [ -n "$PLANNING_HITS" ]; then - echo " FAIL: Found internal planning markers in source code:" - echo "$PLANNING_HITS" | sed 's/^/ /' - FAILED=1 -else - echo " OK: No planning markers in source" -fi - -# 4. Check for planning artifacts in packages (build outputs in source) -echo "Checking for build artifacts in source directories..." -BUILD_ARTIFACTS=$(git ls-files 'packages/*/src/*.d.ts' 'packages/*/src/*.d.ts.map' 'packages/*/src/*.js.map' 2>/dev/null || true) -if [ -n "$BUILD_ARTIFACTS" ]; then - echo " WARN: Build artifacts tracked in source directories:" - echo "$BUILD_ARTIFACTS" | sed 's/^/ /' -fi - -# 5. Check for x403 typo (should be x402) -echo "Checking for x403 typo..." -X403_HITS=$(git grep -l 'x403' -- '*.ts' '*.js' '*.md' '*.json' 2>/dev/null | grep -v 'node_modules' | grep -v 'scripts/check-planning-leak.sh' | grep -v 'CHANGELOG.md' || true) -if [ -n "$X403_HITS" ]; then - echo " FAIL: Found 'x403' typo (should be 'x402') in tracked files:" - echo "$X403_HITS" | sed 's/^/ /' - FAILED=1 -else - echo " OK: No x403 typos" -fi - -if [ "$FAILED" -eq 1 ]; then - echo "" - echo "FAIL: Planning leak check failed" - exit 1 -fi - -echo "" -echo "=== Planning Leak Check PASSED ===" diff --git a/scripts/check-public-artifacts.mjs b/scripts/check-public-artifacts.mjs new file mode 100644 index 000000000..f6d022e18 --- /dev/null +++ b/scripts/check-public-artifacts.mjs @@ -0,0 +1,163 @@ +#!/usr/bin/env node +/** + * Public Artifact Linter + * + * Checks text content (commit messages, PR body files, staged files) + * for non-technical language that should not appear in public artifacts. + * + * Usage: + * node scripts/check-public-artifacts.mjs # check staged files + commit msg + * node scripts/check-public-artifacts.mjs --body-file .tmp/pr.md # check a PR body file + * node scripts/check-public-artifacts.mjs --text "some text" # check inline text + * + * Pattern sources: + * 1. Built-in patterns (objective checks, always active) + * 2. reference/guard-denylist.txt (local-only, if present) + * + * Exit codes: + * 0 No violations found + * 1 One or more violations found + */ + +import { readFileSync, existsSync } from 'node:fs'; +import { resolve, dirname } from 'node:path'; +import { fileURLToPath } from 'node:url'; +import { execSync } from 'node:child_process'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const REPO_ROOT = resolve(__dirname, '..'); +const DENYLIST_PATH = resolve(REPO_ROOT, 'reference/guard-denylist.txt'); + +// Built-in patterns: objective checks that are safe to have in a tracked file. +// These are standard OSS hygiene patterns, not strategy-revealing. +const BUILTIN_PATTERNS = [ + /reference\/.*_LOCAL\./i, + /x403/, +]; + +/** + * Load additional patterns from the local-only denylist file. + * Returns empty array if file doesn't exist (e.g., in CI). + */ +function loadDenylistPatterns() { + if (!existsSync(DENYLIST_PATH)) { + return []; + } + const content = readFileSync(DENYLIST_PATH, 'utf-8'); + const patterns = []; + for (const line of content.split('\n')) { + const trimmed = line.trim(); + if (!trimmed || trimmed.startsWith('#')) continue; + try { + patterns.push(new RegExp(trimmed, 'i')); + } catch { + // Skip invalid regex lines + } + } + return patterns; +} + +/** + * Check text against all patterns. Returns array of violations. + */ +function checkText(text, source, patterns) { + const violations = []; + const lines = text.split('\n'); + for (let i = 0; i < lines.length; i++) { + for (const pattern of patterns) { + if (pattern.test(lines[i])) { + violations.push({ + source, + line: i + 1, + pattern: pattern.source, + text: lines[i].trim().slice(0, 120), + }); + } + } + } + return violations; +} + +/** + * Get staged file contents (only text files). + */ +function getStagedFiles() { + try { + const output = execSync('git diff --cached --name-only --diff-filter=ACM', { + encoding: 'utf-8', + cwd: REPO_ROOT, + }); + return output + .split('\n') + .filter(Boolean) + .filter((f) => /\.(ts|js|mjs|cjs|md|json|yml|yaml|sh)$/.test(f)); + } catch { + return []; + } +} + +function main() { + const args = process.argv.slice(2); + const allPatterns = [...BUILTIN_PATTERNS, ...loadDenylistPatterns()]; + const allViolations = []; + + // Check --body-file argument + const bodyFileIdx = args.indexOf('--body-file'); + if (bodyFileIdx !== -1 && args[bodyFileIdx + 1]) { + const bodyFile = resolve(args[bodyFileIdx + 1]); + if (existsSync(bodyFile)) { + const content = readFileSync(bodyFile, 'utf-8'); + allViolations.push(...checkText(content, `body-file: ${args[bodyFileIdx + 1]}`, allPatterns)); + } + } + + // Check --text argument + const textIdx = args.indexOf('--text'); + if (textIdx !== -1 && args[textIdx + 1]) { + allViolations.push(...checkText(args[textIdx + 1], 'inline text', allPatterns)); + } + + // Check commit message if .git/COMMIT_EDITMSG exists + const commitMsgPath = resolve(REPO_ROOT, '.git/COMMIT_EDITMSG'); + if (existsSync(commitMsgPath) && !args.includes('--no-commit-msg')) { + const commitMsg = readFileSync(commitMsgPath, 'utf-8'); + allViolations.push(...checkText(commitMsg, 'commit message', allPatterns)); + } + + // Check staged files (unless --no-staged) + if (!args.includes('--no-staged')) { + const stagedFiles = getStagedFiles(); + for (const file of stagedFiles) { + const fullPath = resolve(REPO_ROOT, file); + if (!existsSync(fullPath)) continue; + // Skip this script and the denylist + if (file === 'scripts/check-public-artifacts.mjs') continue; + if (file.startsWith('reference/')) continue; + try { + const content = readFileSync(fullPath, 'utf-8'); + allViolations.push(...checkText(content, file, allPatterns)); + } catch { + // Skip binary or unreadable files + } + } + } + + if (allViolations.length === 0) { + if (allPatterns.length <= BUILTIN_PATTERNS.length) { + console.log('Public artifact check: OK (built-in patterns only; local denylist not found)'); + } else { + console.log('Public artifact check: OK'); + } + process.exit(0); + } + + console.error(`Public artifact check: ${allViolations.length} violation(s) found\n`); + for (const v of allViolations) { + console.error(` ${v.source}:${v.line}: matched /${v.pattern}/i`); + console.error(` ${v.text}`); + } + console.error('\nFix: remove non-technical language from public artifacts.'); + process.exit(1); +} + +main(); diff --git a/scripts/guard.sh b/scripts/guard.sh index 2550b8e8e..3d8eaf689 100755 --- a/scripts/guard.sh +++ b/scripts/guard.sh @@ -138,7 +138,7 @@ echo "== forbid npm invocations ==" # net-node test-pack-install (tests published package in clean npm project), # capture-core test-exports (tests consumer exports resolution), # publish workflow (npm install for OIDC), docs/release (npm publish docs), publish-manifest (description) -NPM_ALLOW='^(IMPLEMENTATION_STATUS\.md|README\.md|packages/.*/README\.md|(docs/)?RELEASING\.md|CHANGELOG\.md|docs/ROADMAP\.md|docs/maintainers/(RELEASING|NPM_PUBLISH|RELEASE-INTEGRITY).*\.md|docs/guides/edge/|docs/release/|scripts/(guard\.sh|pack-smoke\.mjs|pack-.*\.sh|otel-smoke\.sh|check-readme-consistency\.sh|publish-manifest\.json|setup-trusted-publishing\.sh|release/pack-install-smoke\.sh)|packages/net/node/scripts/test-pack-install\.mjs|packages/capture/core/scripts/test-exports\.mjs|\.github/workflows/(publish|promote-latest|publish-mcp-registry)\.yml|integrator-kits/|surfaces/distribution/|llms\.txt|examples/hello-world/)' +NPM_ALLOW='^(IMPLEMENTATION_STATUS\.md|README\.md|packages/.*/README\.md|(docs/)?RELEASING\.md|CHANGELOG\.md|docs/ROADMAP\.md|docs/maintainers/(RELEASING|NPM_PUBLISH|RELEASE-INTEGRITY|SECURITY-POSTURE).*\.md|docs/(VERIFY-RELEASE|guides/edge/|release/)|scripts/(guard\.sh|pack-smoke\.mjs|pack-.*\.sh|otel-smoke\.sh|check-readme-consistency\.sh|publish-manifest\.json|setup-trusted-publishing\.sh|release/pack-install-smoke\.sh)|packages/net/node/scripts/test-pack-install\.mjs|packages/capture/core/scripts/test-exports\.mjs|\.github/workflows/(publish|promote-latest|publish-mcp-registry)\.yml|integrator-kits/|surfaces/distribution/|llms\.txt|examples/hello-world/)' if gg_wb n '\bnpm (run|ci|install|pack|publish)\b' '(^|[^[:alnum:]_])npm (run|ci|install|pack|publish)([^[:alnum:]_]|$)' -- ':!node_modules' ':!archive/**' | grep -vE "$NPM_ALLOW" | grep .; then bad=1 else diff --git a/scripts/release/run-gates.sh b/scripts/release/run-gates.sh index d8611d173..c00ca1424 100755 --- a/scripts/release/run-gates.sh +++ b/scripts/release/run-gates.sh @@ -141,7 +141,11 @@ run_gate "test" pnpm test echo "" echo "--- Guards ---" run_gate "guard" bash scripts/guard.sh -run_gate "planning-leak" bash scripts/check-planning-leak.sh +if [ -f scripts/check-planning-leak.sh ]; then + run_gate "planning-leak" bash scripts/check-planning-leak.sh +else + echo " [planning-leak] SKIP (local-only script not present)" +fi run_gate "format" pnpm format:check # --- Architecture --- @@ -249,13 +253,14 @@ if [[ "$TARGET" == "stable" ]]; then # Implemented gates (PR 5: security hardening) run_gate "ssrf-suite" pnpm exec vitest run packages/net/node/tests/ssrf-expansion.test.ts tests/security/no-fetch-audit.test.ts --reporter=dot - # These stubs hard-fail until real implementations land in later PRs. - for stub_gate in "adoption-evidence" "fuzz-suite"; do - TOTAL=$((TOTAL + 1)) - echo " [$stub_gate] FAIL (not implemented: DD-90 requires implementation before stable release)" - FAILED=$((FAILED + 1)) - append_gate "$stub_gate" "failed" 0 - done + # Implemented gates (PR 3: property/fuzz tests) + run_gate "fuzz-suite" pnpm run test:property + + # Implemented gate (PR 8: adoption evidence) + # Uses Node validator that enforces the 6-field quality bar structurally, + # validates ISO dates and URL formats, and reads ecosystem count from + # docs/adoption/integration-evidence.json (not markdown prose). + run_gate "adoption-evidence" node scripts/release/validate-adoption-evidence.mjs fi # --- Summary --- diff --git a/scripts/release/validate-adoption-evidence.mjs b/scripts/release/validate-adoption-evidence.mjs new file mode 100644 index 000000000..e0eb874a5 --- /dev/null +++ b/scripts/release/validate-adoption-evidence.mjs @@ -0,0 +1,393 @@ +#!/usr/bin/env node +/** + * Adoption Evidence Validator (DD-90) + * + * Validates: + * 1. Integration evidence: reads docs/adoption/integration-evidence.json, + * validates against JSON Schema, checks >= 2 DD-90 ecosystems, + * verifies immutable pointers (test_files exist, spec_refs exist, + * pr_commit is valid hex SHA) + * 2. External confirmations: parses docs/adoption/confirmations.md, + * enforces the 6-field quality bar per entry + * 3. Markdown parity: verifies integration-evidence.md matches JSON + * (run with --generate to regenerate the Markdown) + * + * Exit codes: + * 0 All checks pass + * 1 One or more checks failed + */ + +import { readFileSync, writeFileSync, existsSync } from 'node:fs'; +import { resolve, dirname } from 'node:path'; +import { fileURLToPath } from 'node:url'; +import Ajv from 'ajv/dist/2020.js'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const REPO_ROOT = resolve(__dirname, '..', '..'); + +const EVIDENCE_JSON = resolve(REPO_ROOT, 'docs/adoption/integration-evidence.json'); +const EVIDENCE_SCHEMA = resolve(REPO_ROOT, 'docs/adoption/integration-evidence.schema.json'); +const EVIDENCE_MD = resolve(REPO_ROOT, 'docs/adoption/integration-evidence.md'); +const CONFIRMATIONS_MD = resolve(REPO_ROOT, 'docs/adoption/confirmations.md'); + +const REQUIRED_ECOSYSTEMS = 2; +const REQUIRED_CONFIRMATIONS = 1; + +const REQUIRED_FIELDS = [ + 'Team/Project', + 'Integration Surface', + 'Integration Impact', + 'Date', + 'Public Link', + 'Contact Role', +]; + +const ISO_DATE_RE = /^\d{4}-\d{2}-\d{2}$/; +const URL_RE = /^https?:\/\/.+/; +const SHA_RE = /^[0-9a-f]{8,40}$/; + +// --------------------------------------------------------------------------- +// Integration evidence (structured JSON + schema + pointer checks) +// --------------------------------------------------------------------------- + +function validateIntegrationEvidence() { + const errors = []; + + if (!existsSync(EVIDENCE_JSON)) { + return { ok: false, errors: [`Missing ${EVIDENCE_JSON}`], data: null }; + } + + let data; + try { + data = JSON.parse(readFileSync(EVIDENCE_JSON, 'utf-8')); + } catch (err) { + return { + ok: false, + errors: [`Invalid JSON in ${EVIDENCE_JSON}: ${err.message}`], + data: null, + }; + } + + // Schema validation + if (existsSync(EVIDENCE_SCHEMA)) { + const schema = JSON.parse(readFileSync(EVIDENCE_SCHEMA, 'utf-8')); + const ajv = new Ajv({ allErrors: true }); + const validate = ajv.compile(schema); + if (!validate(data)) { + for (const err of validate.errors) { + errors.push(`Schema: ${err.instancePath || '/'} ${err.message}`); + } + } + } else { + errors.push(`Missing schema file: ${EVIDENCE_SCHEMA}`); + } + + if (!Array.isArray(data.integrations)) { + return { + ok: false, + errors: [...errors, 'integrations must be an array'], + data, + }; + } + + const dd90 = data.integrations.filter((i) => i.dd90_gate === true); + + if (dd90.length < REQUIRED_ECOSYSTEMS) { + errors.push(`Need >= ${REQUIRED_ECOSYSTEMS} DD-90 ecosystems, found ${dd90.length}`); + } + + // Immutable pointer checks + for (const integration of data.integrations) { + const name = integration.ecosystem || '(unnamed)'; + + // pr_commit must be valid hex SHA + if (integration.pr_commit && !SHA_RE.test(integration.pr_commit)) { + errors.push( + `${name}: pr_commit "${integration.pr_commit}" is not a valid hex SHA (8-40 chars)` + ); + } + + // test_files must exist on disk + if (Array.isArray(integration.test_files)) { + for (const tf of integration.test_files) { + const fullPath = resolve(REPO_ROOT, tf); + if (!existsSync(fullPath)) { + errors.push(`${name}: test_file not found: ${tf}`); + } + } + } + + // spec_refs must exist on disk + if (Array.isArray(integration.spec_refs)) { + for (const sr of integration.spec_refs) { + const fullPath = resolve(REPO_ROOT, sr); + if (!existsSync(fullPath)) { + errors.push(`${name}: spec_ref not found: ${sr}`); + } + } + } + + // evidence text must be non-empty + if (typeof integration.evidence === 'string' && integration.evidence.trim().length === 0) { + errors.push(`${name}: evidence text is empty`); + } + + // non-DD-90 integrations should have rationale + if (integration.dd90_gate === false && !integration.rationale) { + errors.push(`${name}: non-DD-90 integration should include rationale for classification`); + } + } + + return { + ok: errors.length === 0, + ecosystemCount: dd90.length, + totalIntegrations: data.integrations.length, + errors, + data, + }; +} + +// --------------------------------------------------------------------------- +// Markdown generation from JSON +// --------------------------------------------------------------------------- + +function generateMarkdown(data) { + const dd90 = data.integrations.filter((i) => i.dd90_gate === true); + const nonDd90 = data.integrations.filter((i) => i.dd90_gate !== true); + + const lines = []; + lines.push('# Integration Evidence Catalog'); + lines.push(''); + lines.push( + '> **Purpose:** Documents which ecosystem integrations count toward DD-90 gates and which do not.' + ); + lines.push( + '> **Rule:** Only integrations that produce or consume Wire 0.2 receipts in a distinct ecosystem count.' + ); + lines.push( + '> **Source:** Generated from `docs/adoption/integration-evidence.json`. Do not edit manually.' + ); + lines.push(''); + lines.push(`## DD-90 Ecosystem Integrations (Count: ${dd90.length})`); + lines.push(''); + + for (const i of dd90) { + lines.push(`### ${i.ecosystem} (${i.full_name})`); + lines.push(''); + lines.push(`- **PR:** #${i.pr} (commit \`${i.pr_commit}\`)`); + lines.push(`- **Surface:** ${i.surface}`); + lines.push(`- **Evidence:** ${i.evidence}`); + lines.push(`- **Wire version:** Wire ${i.wire_version}`); + lines.push('- **DD-90 gate:** YES (distinct ecosystem with Wire 0.2 production)'); + lines.push(`- **Test files:** ${i.test_files.map((f) => '`' + f + '`').join(', ')}`); + lines.push(`- **Spec refs:** ${i.spec_refs.map((f) => '`' + f + '`').join(', ')}`); + lines.push(''); + } + + if (nonDd90.length > 0) { + lines.push('## Non-DD-90 Integrations (Correctly Classified)'); + lines.push(''); + + for (const i of nonDd90) { + lines.push(`### ${i.ecosystem} (${i.full_name})`); + lines.push(''); + lines.push(`- **PR:** #${i.pr} (commit \`${i.pr_commit}\`)`); + lines.push(`- **Surface:** ${i.surface}`); + lines.push(`- **Evidence:** ${i.evidence}`); + lines.push( + `- **Wire version:** ${i.wire_version ? 'Wire ' + i.wire_version : 'N/A (identity input, not receipt output)'}` + ); + lines.push(`- **DD-90 gate:** NO${i.dd_reference ? ` (${i.dd_reference})` : ''}`); + if (i.rationale) { + lines.push(`- **Rationale:** ${i.rationale}`); + } + lines.push(`- **Test files:** ${i.test_files.map((f) => '`' + f + '`').join(', ')}`); + lines.push(`- **Spec refs:** ${i.spec_refs.map((f) => '`' + f + '`').join(', ')}`); + lines.push(''); + } + } + + lines.push('## Classification Rules'); + lines.push(''); + for (let idx = 0; idx < data.classification_rules.length; idx++) { + lines.push(`${idx + 1}. ${data.classification_rules[idx]}`); + } + lines.push(''); + + return lines.join('\n'); +} + +function checkMarkdownParity(data) { + const expected = generateMarkdown(data); + + if (!existsSync(EVIDENCE_MD)) { + return { ok: false, error: `Missing ${EVIDENCE_MD}. Run with --generate to create it.` }; + } + + const actual = readFileSync(EVIDENCE_MD, 'utf-8'); + if (actual !== expected) { + return { + ok: false, + error: + 'integration-evidence.md is out of sync with integration-evidence.json. Run: node scripts/release/validate-adoption-evidence.mjs --generate', + }; + } + + return { ok: true }; +} + +// --------------------------------------------------------------------------- +// External confirmations (structured markdown) +// --------------------------------------------------------------------------- + +function parseConfirmations() { + if (!existsSync(CONFIRMATIONS_MD)) { + return { ok: false, entries: [], errors: [`Missing ${CONFIRMATIONS_MD}`] }; + } + + const content = readFileSync(CONFIRMATIONS_MD, 'utf-8'); + const lines = content.split('\n'); + + const entries = []; + const errors = []; + let inComment = false; + let currentEntry = null; + + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + + // Skip HTML comment blocks + if (line.includes('')) { + inComment = false; + continue; + } + if (inComment) continue; + + // Detect entry heading + if (line.startsWith('### ')) { + if (currentEntry) { + entries.push(currentEntry); + } + currentEntry = { + name: line.slice(4).trim(), + line: i + 1, + fields: {}, + }; + continue; + } + + // Detect field within entry + if (currentEntry) { + const fieldMatch = line.match(/^-\s+\*\*(.+?):\*\*\s*(.+)/); + if (fieldMatch) { + currentEntry.fields[fieldMatch[1]] = fieldMatch[2].trim(); + } + } + } + + if (currentEntry) { + entries.push(currentEntry); + } + + // Skip the placeholder line + const realEntries = entries.filter( + (e) => e.name !== '_No external confirmations recorded._' + ); + + // Validate each entry against the 6-field quality bar + for (const entry of realEntries) { + for (const field of REQUIRED_FIELDS) { + if (!entry.fields[field]) { + errors.push(`"${entry.name}" (line ${entry.line}): missing required field "${field}"`); + } + } + + // Validate Date format + const date = entry.fields['Date']; + if (date && !ISO_DATE_RE.test(date)) { + errors.push( + `"${entry.name}" (line ${entry.line}): Date must be ISO 8601 (YYYY-MM-DD), got "${date}"` + ); + } + + // Validate Public Link + const link = entry.fields['Public Link']; + if (link && link !== 'private' && !URL_RE.test(link)) { + errors.push( + `"${entry.name}" (line ${entry.line}): Public Link must be a URL (https://...) or "private", got "${link}"` + ); + } + } + + return { + ok: realEntries.length >= REQUIRED_CONFIRMATIONS && errors.length === 0, + entries: realEntries, + errors, + }; +} + +// --------------------------------------------------------------------------- +// Main +// --------------------------------------------------------------------------- + +function main() { + const args = process.argv.slice(2); + const generateMode = args.includes('--generate'); + let failed = false; + + // 1. Integration evidence + const evidence = validateIntegrationEvidence(); + if (evidence.ok) { + console.log( + `Integration evidence: ${evidence.ecosystemCount} DD-90 ecosystems, ${evidence.totalIntegrations} total integrations` + ); + } else { + console.error('Integration evidence FAILED:'); + for (const err of evidence.errors) { + console.error(` ${err}`); + } + failed = true; + } + + // 2. Markdown generation or parity check + if (evidence.data) { + if (generateMode) { + const md = generateMarkdown(evidence.data); + writeFileSync(EVIDENCE_MD, md, 'utf-8'); + console.log(`Generated ${EVIDENCE_MD}`); + } else { + const parity = checkMarkdownParity(evidence.data); + if (parity.ok) { + console.log('Markdown parity: OK'); + } else { + console.error(`Markdown parity FAILED: ${parity.error}`); + failed = true; + } + } + } + + // 3. External confirmations + const confirmations = parseConfirmations(); + if (confirmations.entries.length === 0) { + console.error(`External confirmations: 0 valid entries (need >= ${REQUIRED_CONFIRMATIONS})`); + console.error( + ' Add at least one entry to docs/adoption/confirmations.md meeting the 6-field quality bar.' + ); + failed = true; + } else if (confirmations.errors.length > 0) { + console.error( + `External confirmations: ${confirmations.entries.length} entries, ${confirmations.errors.length} validation errors:` + ); + for (const err of confirmations.errors) { + console.error(` ${err}`); + } + failed = true; + } else { + console.log(`External confirmations: ${confirmations.entries.length} valid entries`); + } + + process.exit(failed ? 1 : 0); +} + +main(); diff --git a/tests/release/adoption-evidence-validator.test.ts b/tests/release/adoption-evidence-validator.test.ts new file mode 100644 index 000000000..d68b4a5c1 --- /dev/null +++ b/tests/release/adoption-evidence-validator.test.ts @@ -0,0 +1,226 @@ +/** + * Tests for scripts/release/validate-adoption-evidence.mjs + * + * Exercises the validator against synthetic fixtures to verify: + * - Schema validation (missing fields, bad types) + * - Immutable pointer checks (bad SHA, missing files) + * - Confirmation parsing (6-field quality bar, date/URL format) + * - Markdown parity detection + * - Edge cases (empty JSON, malformed JSON, comment stripping) + */ +import { describe, it, expect, beforeEach } from 'vitest'; +import { execFileSync } from 'node:child_process'; +import { join } from 'node:path'; + +const REPO_ROOT = join(import.meta.dirname, '..', '..'); +const VALIDATOR = join(REPO_ROOT, 'scripts/release/validate-adoption-evidence.mjs'); + +// Minimal valid integration-evidence.json that passes schema + pointer checks +// Uses files known to exist in the repo +function makeValidEvidence() { + return { + $schema: './integration-evidence.schema.json', + $comment: 'test fixture', + version: '1.0.0', + integrations: [ + { + ecosystem: 'TestA', + full_name: 'Test Protocol A', + pr: 1, + pr_commit: 'abcdef01', + dd90_gate: true, + surface: 'test surface A', + evidence: 'round-trip test A', + wire_version: '0.2', + test_files: ['package.json'], + spec_refs: ['README.md'], + }, + { + ecosystem: 'TestB', + full_name: 'Test Protocol B', + pr: 2, + pr_commit: 'abcdef02', + dd90_gate: true, + surface: 'test surface B', + evidence: 'round-trip test B', + wire_version: '0.2', + test_files: ['package.json'], + spec_refs: ['README.md'], + }, + ], + classification_rules: ['Rule 1'], + }; +} + +/** + * Run the validator with overridden paths via env-driven temp dirs. + * Since the validator hardcodes paths relative to REPO_ROOT, we run it + * directly and inspect stdout/stderr/exit code. + */ +function runValidator(args: string[] = []): { stdout: string; stderr: string; exitCode: number } { + try { + const stdout = execFileSync('node', [VALIDATOR, ...args], { + encoding: 'utf-8', + timeout: 15_000, + cwd: REPO_ROOT, + }); + return { stdout, stderr: '', exitCode: 0 }; + } catch (err: any) { + return { + stdout: err.stdout || '', + stderr: err.stderr || '', + exitCode: err.status ?? 1, + }; + } +} + +describe('validate-adoption-evidence.mjs (live repo)', () => { + it('runs against real repo files without crashing', () => { + const result = runValidator(); + // Will fail on confirmations (expected) but should not crash + const combined = result.stdout + result.stderr; + expect(combined).toContain('Integration evidence'); + // The exit code is 1 because no external confirmations exist yet + expect(result.exitCode).toBe(1); + }); + + it('reports correct DD-90 ecosystem count from real JSON', () => { + const result = runValidator(); + const combined = result.stdout + result.stderr; + expect(combined).toContain('2 DD-90 ecosystems'); + expect(combined).toContain('3 total integrations'); + }); + + it('passes markdown parity check on real files', () => { + const result = runValidator(); + const combined = result.stdout + result.stderr; + expect(combined).toContain('Markdown parity: OK'); + }); + + it('correctly identifies external confirmation blocker', () => { + const result = runValidator(); + const combined = result.stdout + result.stderr; + expect(combined).toContain('0 valid entries'); + expect(combined).toContain('6-field quality bar'); + }); + + it('generates markdown without error', () => { + const result = runValidator(['--generate']); + const combined = result.stdout + result.stderr; + expect(combined).toContain('Generated'); + // Still fails on confirmations, which is expected + expect(combined).toContain('0 valid entries'); + }); +}); + +describe('validate-adoption-evidence.mjs schema validation', () => { + it('validates real integration-evidence.json against schema', () => { + // The real validator run should not report schema errors + const result = runValidator(); + const combined = result.stdout + result.stderr; + expect(combined).not.toContain('Schema:'); + }); +}); + +describe('adoption evidence JSON schema', () => { + // These tests validate the schema itself using Ajv directly + let Ajv: any; + let schema: any; + let validate: any; + + beforeEach(async () => { + const ajvModule = await import('ajv/dist/2020.js'); + Ajv = ajvModule.default; + const { readFileSync } = await import('node:fs'); + schema = JSON.parse( + readFileSync(join(REPO_ROOT, 'docs/adoption/integration-evidence.schema.json'), 'utf-8') + ); + const ajv = new Ajv({ allErrors: true }); + validate = ajv.compile(schema); + }); + + it('accepts valid evidence', () => { + const data = makeValidEvidence(); + expect(validate(data)).toBe(true); + }); + + it('rejects missing ecosystem', () => { + const data = makeValidEvidence(); + delete (data.integrations[0] as any).ecosystem; + expect(validate(data)).toBe(false); + expect( + validate.errors.some( + (e: any) => e.message?.includes('ecosystem') || e.params?.missingProperty === 'ecosystem' + ) + ).toBe(true); + }); + + it('rejects invalid pr_commit (not hex)', () => { + const data = makeValidEvidence(); + data.integrations[0].pr_commit = 'ZZZZZZZZ'; + expect(validate(data)).toBe(false); + }); + + it('rejects pr_commit too short', () => { + const data = makeValidEvidence(); + data.integrations[0].pr_commit = 'abc'; + expect(validate(data)).toBe(false); + }); + + it('rejects empty test_files', () => { + const data = makeValidEvidence(); + data.integrations[0].test_files = []; + expect(validate(data)).toBe(false); + }); + + it('rejects missing integrations array', () => { + const data = { version: '1.0.0', classification_rules: ['r1'] }; + expect(validate(data)).toBe(false); + }); + + it('rejects non-integer pr', () => { + const data = makeValidEvidence(); + (data.integrations[0] as any).pr = 'not-a-number'; + expect(validate(data)).toBe(false); + }); + + it('rejects non-boolean dd90_gate', () => { + const data = makeValidEvidence(); + (data.integrations[0] as any).dd90_gate = 'yes'; + expect(validate(data)).toBe(false); + }); + + it('accepts null wire_version for non-receipt integrations', () => { + const data = makeValidEvidence(); + data.integrations[0].wire_version = null; + data.integrations[0].dd90_gate = false; + (data.integrations[0] as any).rationale = 'Test rationale'; + expect(validate(data)).toBe(true); + }); + + it('rejects additional properties on integration', () => { + const data = makeValidEvidence(); + (data.integrations[0] as any).unknown_field = 'bad'; + expect(validate(data)).toBe(false); + }); +}); + +describe('confirmation markdown parsing', () => { + // These test the validator's live parsing by inspecting its output + // when run against the real repo (which has 0 confirmations) + + it('reports missing required fields for malformed entries', () => { + // The current repo has 0 entries, so we verify the validator + // output describes what is needed + const result = runValidator(); + const combined = result.stdout + result.stderr; + expect(combined).toContain('6-field quality bar'); + }); + + it('handles the placeholder line correctly', () => { + // The placeholder "No confirmations yet" should not count as an entry + const result = runValidator(); + const combined = result.stdout + result.stderr; + expect(combined).toContain('0 valid entries'); + }); +}); diff --git a/vitest.config.ts b/vitest.config.ts index 2a3653e1e..be3126686 100644 --- a/vitest.config.ts +++ b/vitest.config.ts @@ -84,6 +84,7 @@ export default defineConfig({ 'tests/distribution/**/*.test.ts', 'tests/integration/**/*.test.ts', 'tests/security/**/*.test.ts', + 'tests/release/**/*.test.ts', ], // Timeout for tests testTimeout: 10000, From bd489a60fa7f8e41686d49a614da0e1a08d5d0f2 Mon Sep 17 00:00:00 2001 From: jithinraj <7850727+jithinraj@users.noreply.github.com> Date: Sun, 8 Mar 2026 10:17:20 +0530 Subject: [PATCH 2/3] chore: public artifact hygiene guards and PR template Add commit-msg hook reading local denylist for non-technical language. Add hidden Unicode and MEMORY.md checks to public-artifact linter. Update PR metadata CI workflow to generic-only checks (no code checkout). Update PR template with technical-only sections. Make pre-commit planning-leak check conditional (local-only script). --- .githooks/commit-msg | 64 ++++++++++++++++++++++++++ .githooks/pre-commit | 7 ++- .github/pull_request_template.md | 47 +++++++------------ .github/workflows/pr-metadata-lint.yml | 17 ++++--- scripts/check-public-artifacts.mjs | 2 + 5 files changed, 98 insertions(+), 39 deletions(-) create mode 100755 .githooks/commit-msg diff --git a/.githooks/commit-msg b/.githooks/commit-msg new file mode 100755 index 000000000..32f97dfa9 --- /dev/null +++ b/.githooks/commit-msg @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +# .githooks/commit-msg +# Repo-managed commit-msg hook: checks commit message against local denylist. +# Install: bash scripts/setup-hooks.sh (or: git config core.hooksPath .githooks) +# +# Reads patterns from reference/guard-denylist.txt (local-only, gitignored). +# Also checks for objective issues (internal markers, hidden Unicode). +# +# Bypass: PEAC_SKIP_COMMIT_MSG=1 git commit -m "..." + +set -euo pipefail + +COMMIT_MSG_FILE="$1" + +if [ "${PEAC_SKIP_COMMIT_MSG:-}" = "1" ]; then + echo "[commit-msg] BYPASSED (PEAC_SKIP_COMMIT_MSG=1)" + echo "[commit-msg] WARNING: Commit message was not checked for non-technical language." + exit 0 +fi + +REPO_ROOT="$(git rev-parse --show-toplevel)" +DENYLIST="$REPO_ROOT/reference/guard-denylist.txt" +FAILED=0 + +# Built-in checks (always active, safe for tracked script) +if grep -iE 'reference/.*_LOCAL\.' "$COMMIT_MSG_FILE" >/dev/null 2>&1; then + echo "[commit-msg] FAIL: Commit message references local-only files" + FAILED=1 +fi + +if grep -q 'x403' "$COMMIT_MSG_FILE" >/dev/null 2>&1; then + echo "[commit-msg] FAIL: Commit message contains x403 typo" + FAILED=1 +fi + +if grep -iE '(NEXT_STEPS|ROADMAP_LOCAL|STRATEGY_LOCAL|SCOPE_LEDGER|MASTER_PLAN_LOCAL)' "$COMMIT_MSG_FILE" >/dev/null 2>&1; then + echo "[commit-msg] FAIL: Commit message contains internal planning markers" + FAILED=1 +fi + +# Local denylist check (if denylist exists) +if [ -f "$DENYLIST" ]; then + while IFS= read -r line || [ -n "$line" ]; do + # Skip comments and blank lines + trimmed=$(echo "$line" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') + [ -z "$trimmed" ] && continue + echo "$trimmed" | grep -q '^#' && continue + + if grep -iE "$trimmed" "$COMMIT_MSG_FILE" >/dev/null 2>&1; then + echo "[commit-msg] FAIL: Commit message matches denylist pattern: $trimmed" + FAILED=1 + fi + done < "$DENYLIST" +fi + +if [ "$FAILED" -eq 1 ]; then + echo "" + echo "[commit-msg] Commit message contains non-technical language." + echo "[commit-msg] Rewrite the message to be technical and implementation-focused." + echo "[commit-msg] Bypass: PEAC_SKIP_COMMIT_MSG=1 git commit ..." + exit 1 +fi + +echo "[commit-msg] OK" diff --git a/.githooks/pre-commit b/.githooks/pre-commit index 336b2eaa7..80aeaffe2 100755 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -30,7 +30,10 @@ if git diff --cached --name-only | grep -q 'specs/kernel/errors.json'; then echo "[pre-commit] errors.generated.ts updated and staged" fi -echo "[pre-commit] Checking for planning leaks..." -bash scripts/check-planning-leak.sh +# Planning leak check (local-only script; skipped on fresh clones) +if [ -f scripts/check-planning-leak.sh ]; then + echo "[pre-commit] Checking for planning leaks..." + bash scripts/check-planning-leak.sh +fi echo "[pre-commit] OK" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index da573a532..6a0333f80 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,19 +1,10 @@ ## Summary - + -## Type of Change +## Scope -- [ ] Bug fix (non-breaking change that fixes an issue) -- [ ] New feature (non-breaking change that adds functionality) -- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) -- [ ] Documentation update -- [ ] Refactoring (no functional changes) -- [ ] CI/Build changes - -## Related Issues - - + ## Changes @@ -23,28 +14,24 @@ - - -## Testing +## Test plan - + -- [ ] Unit tests pass (`pnpm test`) -- [ ] Lint passes (`pnpm run lint`) -- [ ] Type check passes (`pnpm run typecheck`) -- [ ] Manual testing performed +- [ ] `pnpm test` passes +- [ ] `pnpm lint` passes +- [ ] `pnpm typecheck:core` passes +- [ ] `bash scripts/guard.sh` passes ## Checklist -- [ ] My code follows the project's coding standards -- [ ] I have updated documentation as needed -- [ ] I have added tests for new functionality -- [ ] All existing tests pass -- [ ] My commit messages follow conventional commits format -- [ ] I have checked for breaking changes and documented them - -## Breaking Changes - - +- [ ] Code follows project coding standards +- [ ] Documentation updated as needed +- [ ] Tests added for new functionality +- [ ] Commit messages use conventional format +- [ ] PR title is technical and under 70 chars +- [ ] PR body contains only technical content (no internal planning, sequencing, or process language) -## Additional Notes +## Follow-ups - + diff --git a/.github/workflows/pr-metadata-lint.yml b/.github/workflows/pr-metadata-lint.yml index 00711a358..170856099 100644 --- a/.github/workflows/pr-metadata-lint.yml +++ b/.github/workflows/pr-metadata-lint.yml @@ -11,9 +11,6 @@ jobs: timeout-minutes: 2 steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Lint PR metadata env: PR_TITLE: ${{ github.event.pull_request.title }} @@ -22,10 +19,10 @@ jobs: FAILED=0 # Write PR content to temp files for safe grep - echo "$PR_TITLE" > /tmp/pr-title.txt - echo "$PR_BODY" > /tmp/pr-body.txt + printf '%s' "$PR_TITLE" > /tmp/pr-title.txt + printf '%s' "$PR_BODY" > /tmp/pr-body.txt - # Check for reference path leaks + # Check for reference path leaks (local-only file references) if grep -iE 'reference/.*_LOCAL\.' /tmp/pr-title.txt /tmp/pr-body.txt 2>/dev/null; then echo "::error::PR metadata contains reference to local-only files" FAILED=1 @@ -38,11 +35,17 @@ jobs: fi # Check for internal planning markers - if grep -iE '(NEXT_STEPS|ROADMAP_LOCAL|STRATEGY_LOCAL|SCOPE_LEDGER|MASTER_PLAN_LOCAL)' /tmp/pr-title.txt /tmp/pr-body.txt 2>/dev/null; then + if grep -iE '(NEXT_STEPS|ROADMAP_LOCAL|STRATEGY_LOCAL|SCOPE_LEDGER|MASTER_PLAN_LOCAL|MEMORY\.md)' /tmp/pr-title.txt /tmp/pr-body.txt 2>/dev/null; then echo "::error::PR metadata contains internal planning markers" FAILED=1 fi + # Check for hidden/bidirectional Unicode + if grep -P '[\x{200B}-\x{200F}\x{2028}-\x{202F}\x{2060}-\x{206F}\x{FEFF}]' /tmp/pr-title.txt /tmp/pr-body.txt 2>/dev/null; then + echo "::error::PR metadata contains hidden or bidirectional Unicode characters" + FAILED=1 + fi + rm -f /tmp/pr-title.txt /tmp/pr-body.txt if [ "$FAILED" -eq 1 ]; then diff --git a/scripts/check-public-artifacts.mjs b/scripts/check-public-artifacts.mjs index f6d022e18..6a96f760e 100644 --- a/scripts/check-public-artifacts.mjs +++ b/scripts/check-public-artifacts.mjs @@ -33,6 +33,8 @@ const DENYLIST_PATH = resolve(REPO_ROOT, 'reference/guard-denylist.txt'); const BUILTIN_PATTERNS = [ /reference\/.*_LOCAL\./i, /x403/, + /[\u200B-\u200F\u2028-\u202F\u2060-\u206F\uFEFF]/, // hidden/bidi Unicode + /MEMORY\.md/, ]; /** From c52e5bf404e0bdfc952d214b63535f78dc7a173e Mon Sep 17 00:00:00 2001 From: jithinraj <7850727+jithinraj@users.noreply.github.com> Date: Sun, 8 Mar 2026 10:41:47 +0530 Subject: [PATCH 3/3] fix: add hygiene scripts to guard exclusion list Guard scripts that check for typos must exclude other scripts that also check for the same patterns: commit-msg hook, public-artifact linter, and PR metadata CI workflow. --- scripts/guard.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/guard.sh b/scripts/guard.sh index 3d8eaf689..98564e2c5 100755 --- a/scripts/guard.sh +++ b/scripts/guard.sh @@ -245,7 +245,7 @@ fi echo "== forbid x403 typo (must be x402) ==" # x403 is a common typo for x402; catch it before it leaks into code or docs -if git grep -n 'x403' -- ':!node_modules' ':!archive/**' ':!scripts/guard.sh' ':!scripts/check-planning-leak.sh' ':!CHANGELOG.md' | grep .; then +if git grep -n 'x403' -- ':!node_modules' ':!archive/**' ':!scripts/guard.sh' ':!scripts/check-planning-leak.sh' ':!scripts/check-public-artifacts.mjs' ':!.github/workflows/pr-metadata-lint.yml' ':!.githooks/commit-msg' ':!CHANGELOG.md' | grep .; then echo "FAIL: Found 'x403' -- did you mean 'x402'?" bad=1 else