diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ebd1bee..b44f7fd 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -69,6 +69,12 @@ jobs: - name: Build release binary (cross) if: matrix.cross run: | + # aarch64-unknown-linux-musl produces static binaries by default, but + # loongarch64 and riscv64 musl targets require explicit flags + export CROSS_LOONGARCH64_UNKNOWN_LINUX_MUSL_LINKER=loongarch64-unknown-linux-musl-gcc + export CROSS_RISCV64GC_UNKNOWN_LINUX_MUSL_LINKER=riscv64-unknown-linux-musl-gcc + export CROSS_LOONGARCH64_UNKNOWN_LINUX_MUSL_RUSTFLAGS='-C target-feature=+crt-static' + export CROSS_RISCV64GC_UNKNOWN_LINUX_MUSL_RUSTFLAGS='-C target-feature=+crt-static' cross build --release -p liyi-cli --target ${{ matrix.target }} ls -alF target/${{ matrix.target }}/release/ @@ -78,7 +84,7 @@ jobs: strip target/${{ matrix.target }}/release/liyi else # cross-compiled binaries may not be strippable by host strip - ${{ matrix.arch }}-linux-musl-strip target/${{ matrix.target }}/release/liyi 2>/dev/null || true + ${{ matrix.arch }}-unknown-linux-musl-strip target/${{ matrix.target }}/release/liyi 2>/dev/null || true fi - name: Package binary diff --git a/.github/workflows/release.yml.liyi.jsonc b/.github/workflows/release.yml.liyi.jsonc index 09f3ed5..70958b2 100644 --- a/.github/workflows/release.yml.liyi.jsonc +++ b/.github/workflows/release.yml.liyi.jsonc @@ -33,10 +33,10 @@ "intent": "Build one release tarball per configured musl target and publish each tarball as an artifact for the downstream release job. The job must choose native or cross compilation per matrix entry, produce the `liyi` CLI binary from package `liyi-cli`, and package the binary together with licenses and README.", "source_span": [ 17, - 96 + 102 ], "tree_path": "key.jobs::key.build", - "source_hash": "sha256:71f4a0f6d299fbf42ee8ba8eb3da5a4c46a157deb550e9c7cced6e9f55e4e537", + "source_hash": "sha256:ec4f57c4102358a0f0d1aeca3c15cb616e07bd1899103060109b8b6d0eb848f7", "source_anchor": " build:" }, { @@ -90,58 +90,58 @@ { "item": "build::build-cross-binary", "reviewed": true, - "intent": "For cross targets, build package `liyi-cli` in release mode through `cross` for the selected target triple. The post-build directory listing must verify that the expected target release directory was populated before later packaging steps run.", + "intent": "For cross targets, build package `liyi-cli` in release mode through `cross` for the selected target triple. Must set explicit linker and `crt-static` rustflags environment variables for loongarch64 and riscv64 musl targets, which do not produce static binaries by default. The post-build directory listing must verify that the expected target release directory was populated before later packaging steps run.", "source_span": [ - 69, - 73 + 71, + 79 ], "tree_path": "key.jobs::key.build::key.steps[6]::key.run", - "source_hash": "sha256:f0516755de82a5ca1b26380cc0157a046918dc7a99005c5e2a8954ecfef9084d", - "source_anchor": " - name: Build release binary (cross)" + "source_hash": "sha256:3c849bccad993cf25ddb56402d6ad2e135c8d203656e7ccfad9f509bc69b76b3", + "source_anchor": " run: |" }, { "item": "build::strip-binary", "reviewed": true, "intent": "Attempt to strip the produced `liyi` binary after compilation to reduce release artifact size. Native builds must use the host `strip`; cross builds must try the target-specific musl strip tool but tolerate its absence so packaging can still proceed.", "source_span": [ - 75, - 82 + 82, + 88 ], "tree_path": "key.jobs::key.build::key.steps[7]::key.run", - "source_hash": "sha256:3e21374960929ccbaed4d37fc5b285dc0b4b77cf5c75bddfda431871b20374d1", - "source_anchor": " - name: Strip binary" + "source_hash": "sha256:aa870f6b6cb901f17480e9d6936ef6d18a18b543d1083ba0a6987543e42308a0", + "source_anchor": " run: |" }, { "item": "build::package-binary", "reviewed": true, "intent": "Assemble the release tarball for the current target by copying the built `liyi` binary, the repository license files, and `README.md` into `dist/`, then creating `liyi--.tar.gz` in the workspace root. The tarball name must stay stable because the upload and release steps consume that exact filename pattern.", "source_span": [ - 84, - 90 + 91, + 96 ], "tree_path": "key.jobs::key.build::key.steps[8]::key.run", - "source_hash": "sha256:3e07ffc6ef6e4854235d2668749ade3536be6eece60e8c0f592244f701e506d5", - "source_anchor": " - name: Package binary" + "source_hash": "sha256:79785e2a57cf8c8df91bef86bb8d7804f12d5ae25e6a353a3267460caed0dd54", + "source_anchor": " run: |" }, { "item": "build::upload-artifact", "reviewed": true, "intent": "Upload the packaged tarball as a GitHub Actions artifact named `dist-artifact-`. The artifact naming convention must remain consistent with the release job's download pattern so every target tarball is collected for publication.", "source_span": [ - 92, - 96 + 100, + 102 ], "tree_path": "key.jobs::key.build::key.steps[9]::key.with", - "source_hash": "sha256:b53367295c966328cd1ed89adb45574227d5a1b28a929e9442f7fd7e86d994a5", - "source_anchor": " - name: Upload artifact" + "source_hash": "sha256:a3cb5812e6a5f176e8d803c906ea3f0118fd29add0e6b1c29bd46728df82c3b9", + "source_anchor": " with:" }, { "item": "jobs::release", "reviewed": true, "intent": "Wait for all build-matrix artifacts, gather the packaged tarballs into one directory, and publish them on the GitHub release corresponding to the pushed tag. The job must not try to publish before the build job succeeds.", "source_span": [ - 98, - 123 + 104, + 129 ], "tree_path": "key.jobs::key.release", "source_hash": "sha256:1c505f0c2dda23075de3f2b6265714e95dad26cf4b7840c3ce3fe070bc540eda", @@ -152,9 +152,10 @@ "reviewed": true, "intent": "Download every target tarball artifact emitted by the build matrix into the local `artifacts/` directory. The `dist-artifact-*` pattern and `merge-multiple: true` setting must gather all per-target uploads into one flat directory so the release step can attach them with a single glob.", "source_span": [ - 105, - 110 + 111, + 116 ], + "tree_path": "key.jobs::key.release::key.steps[1]::key.with", "source_hash": "sha256:5cb97468bcd3e1686454133b658486b7359407a75637bd46c408dc622c757ea8", "source_anchor": " - name: Download all artifacts" }, @@ -163,9 +164,10 @@ "reviewed": true, "intent": "Create or update the GitHub release only when the event is a tag push, then attach every `artifacts/*.tar.gz` asset, keep the release non-draft and non-prerelease, generate release notes, and authenticate with `GITHUB_TOKEN`. The explicit `if` guard must prevent accidental release publication from any future non-tag trigger added to this workflow.", "source_span": [ - 114, - 123 + 120, + 129 ], + "tree_path": "key.jobs::key.release::key.steps[3]::key.with", "source_hash": "sha256:216ea36572f4dc28e5641bc4cc64b64e769140ce0517f9cd84a7483b914891c4", "source_anchor": " - name: Create Release" } diff --git a/crates/liyi-cli/src/cli.rs b/crates/liyi-cli/src/cli.rs index 9f9a42b..d7a6065 100644 --- a/crates/liyi-cli/src/cli.rs +++ b/crates/liyi-cli/src/cli.rs @@ -113,5 +113,17 @@ pub enum Commands { /// Filter to a specific item by name #[arg(long)] item: Option, + + /// Only show unreviewed items + #[arg(long, conflicts_with_all = ["stale_only", "req_only"])] + unreviewed_only: bool, + + /// Only show stale-reviewed items + #[arg(long, conflicts_with_all = ["unreviewed_only", "req_only"])] + stale_only: bool, + + /// Only show requirement-changed items + #[arg(long, conflicts_with_all = ["unreviewed_only", "stale_only"])] + req_only: bool, }, } diff --git a/crates/liyi-cli/src/cli.rs.liyi.jsonc b/crates/liyi-cli/src/cli.rs.liyi.jsonc index 471830b..3609e70 100644 --- a/crates/liyi-cli/src/cli.rs.liyi.jsonc +++ b/crates/liyi-cli/src/cli.rs.liyi.jsonc @@ -30,13 +30,13 @@ { "item": "Commands", "reviewed": true, - "intent": "Enumerate all CLI subcommands (Check, Migrate, Init, Approve) with their full set of flags and arguments, providing defaults and mutual constraints (e.g. --level filters diagnostic output). Init includes --no-discover to skip tree-sitter item discovery and --trivial-threshold to configure the line-count cutoff for suggested trivial items.", + "intent": "Enumerate all CLI subcommands (Check, Migrate, Init, Approve) with their full set of flags and arguments, providing defaults and mutual constraints. Init includes --no-discover and --trivial-threshold. Approve includes mutually exclusive kind filters: --unreviewed-only, --stale-only, --req-only.", "source_span": [ 28, - 117 + 129 ], "tree_path": "enum.Commands", - "source_hash": "sha256:bc8e82f094939279c1359d7f376e0b199fbb8a535468e6d27777188532b68689", + "source_hash": "sha256:ee8fd6ce38ecdf1267174eff76d48e2d7958bf89a109ee817909a645481190b0", "source_anchor": "pub enum Commands {" } ] diff --git a/crates/liyi-cli/src/main.rs b/crates/liyi-cli/src/main.rs index b32179d..306586b 100644 --- a/crates/liyi-cli/src/main.rs +++ b/crates/liyi-cli/src/main.rs @@ -67,6 +67,8 @@ fn main() { process::exit(exit_code as i32); } + let github_actions = env::var("GITHUB_ACTIONS").is_ok_and(|v| v == "true"); + // Print summary first for immediate visibility let summary = liyi::diagnostics::format_summary(&diagnostics); println!("{summary}\n"); @@ -95,7 +97,14 @@ fn main() { continue; } } - println!("{}", d.display_with_root(&repo_root)); + if github_actions { + println!( + "{}", + liyi::diagnostics::format_github_actions(d, &repo_root) + ); + } else { + println!("{}", d.display_with_root(&repo_root)); + } } process::exit(exit_code as i32); @@ -167,6 +176,9 @@ fn main() { yes, dry_run, item, + unreviewed_only, + stale_only, + req_only, } => { let targets = if paths.is_empty() { vec![env::current_dir().unwrap_or_default()] @@ -174,18 +186,31 @@ fn main() { paths }; + let kind_filter = if unreviewed_only { + liyi::approve::ApproveFilter::UnreviewedOnly + } else if stale_only { + liyi::approve::ApproveFilter::StaleOnly + } else if req_only { + liyi::approve::ApproveFilter::ReqOnly + } else { + liyi::approve::ApproveFilter::All + }; + let is_interactive = !yes && is_tty(); if is_interactive { // Collect candidates, run TUI, apply decisions. - let candidates = - match liyi::approve::collect_approval_candidates(&targets, item.as_deref()) { - Ok(c) => c, - Err(e) => { - eprintln!("Error: {e}"); - process::exit(2); - } - }; + let candidates = match liyi::approve::collect_approval_candidates( + &targets, + item.as_deref(), + kind_filter, + ) { + Ok(c) => c, + Err(e) => { + eprintln!("Error: {e}"); + process::exit(2); + } + }; if candidates.is_empty() { println!("nothing to approve"); @@ -216,14 +241,17 @@ fn main() { } } else { // Batch mode: collect + auto-approve all. - let candidates = - match liyi::approve::collect_approval_candidates(&targets, item.as_deref()) { - Ok(c) => c, - Err(e) => { - eprintln!("Error: {e}"); - process::exit(2); - } - }; + let candidates = match liyi::approve::collect_approval_candidates( + &targets, + item.as_deref(), + kind_filter, + ) { + Ok(c) => c, + Err(e) => { + eprintln!("Error: {e}"); + process::exit(2); + } + }; let decisions = vec![liyi::approve::Decision::Yes; candidates.len()]; match liyi::approve::apply_approval_decisions(&candidates, &decisions, dry_run) { diff --git a/crates/liyi-cli/src/main.rs.liyi.jsonc b/crates/liyi-cli/src/main.rs.liyi.jsonc index ea6b2e0..c21ae9c 100644 --- a/crates/liyi-cli/src/main.rs.liyi.jsonc +++ b/crates/liyi-cli/src/main.rs.liyi.jsonc @@ -21,10 +21,10 @@ "intent": "=doc", "source_span": [ 26, - 246 + 274 ], "tree_path": "fn.main", - "source_hash": "sha256:600b1ce9423150de057f3c12c610117e6f0fcdf5eb372f87192d947b7075dc83", + "source_hash": "sha256:46a500d70b9f6a301936362a77822a29214be9bfefa26d3d2abe1691a7e772b8", "source_anchor": "fn main() {" }, { @@ -32,8 +32,8 @@ "reviewed": true, "intent": "=doc", "source_span": [ - 252, - 254 + 280, + 282 ], "tree_path": "fn.is_tty", "source_hash": "sha256:021dc096ffc5aa501d5fe90e8fd1835c4ce784a7de59434995a9be221f97e40f", diff --git a/crates/liyi-cli/src/tui_approve.rs b/crates/liyi-cli/src/tui_approve.rs index 0ba0676..7e5c4b0 100644 --- a/crates/liyi-cli/src/tui_approve.rs +++ b/crates/liyi-cli/src/tui_approve.rs @@ -16,7 +16,7 @@ use similar::{ChangeTag, TextDiff}; use syntect::highlighting::{self, ThemeSet}; use syntect::parsing::SyntaxSet; -use liyi::approve::{ApprovalCandidate, Decision}; +use liyi::approve::{ApprovalCandidate, CandidateKind, Decision}; /// Shared syntax-highlighting resources, initialised once per TUI session. struct Highlighter { @@ -129,6 +129,50 @@ fn edit_intent_in_editor(candidate: &ApprovalCandidate) -> Option { content.push_str(&format!("# {line}\n")); } } + + // StaleReviewed: show source diff so the reviewer can see what changed. + if candidate.kind == CandidateKind::StaleReviewed + && let (Some(old), Some(new)) = (&candidate.prev_source, &candidate.current_source) + { + content.push_str("#\n# Source diff (old → new):\n"); + for change in TextDiff::from_lines(old.as_str(), new.as_str()).iter_all_changes() { + let prefix = match change.tag() { + ChangeTag::Delete => "-", + ChangeTag::Insert => "+", + ChangeTag::Equal => " ", + }; + content.push_str(&format!("# {prefix} {change}")); + if !change.as_str().unwrap_or_default().ends_with('\n') { + content.push('\n'); + } + } + } + + // ReqChanged: show requirement diff context. + if let CandidateKind::ReqChanged { requirement } = &candidate.kind { + content.push_str(&format!("#\n# Requirement \"{requirement}\" changed:\n")); + match ( + &candidate.old_requirement_text, + &candidate.new_requirement_text, + ) { + (Some(old), Some(new)) => { + for change in TextDiff::from_lines(old.as_str(), new.as_str()).iter_all_changes() { + let prefix = match change.tag() { + ChangeTag::Delete => "-", + ChangeTag::Insert => "+", + ChangeTag::Equal => " ", + }; + content.push_str(&format!("# {prefix} {change}")); + if !change.as_str().unwrap_or_default().ends_with('\n') { + content.push('\n'); + } + } + } + _ => { + content.push_str("# (requirement diff unavailable)\n"); + } + } + } content.push_str("#\n# Lines starting with '#' are ignored.\n"); content.push_str("# An empty result (after stripping comments) cancels the edit.\n"); @@ -277,17 +321,30 @@ fn draw_header( current: usize, total: usize, ) { + let kind_label = match &candidate.kind { + CandidateKind::Unreviewed => "UNREVIEWED", + CandidateKind::StaleReviewed => "STALE — source changed since last review", + CandidateKind::ReqChanged { requirement } => { + // We'll format this below since we need dynamic content. + &format!("REQ CHANGED — {requirement}") + } + }; let title = format!( - " Item {current}/{total} │ {} │ {}:{}-{} ", + " [{kind_label}] Item {current}/{total} │ {} │ {}:{}-{} ", candidate.source_display, candidate.item_name, candidate.source_span[0], candidate.source_span[1], ); + let border_color = match &candidate.kind { + CandidateKind::Unreviewed => Color::Cyan, + CandidateKind::StaleReviewed => Color::Yellow, + CandidateKind::ReqChanged { .. } => Color::Magenta, + }; let block = Block::default() .title(title) .borders(Borders::ALL) - .border_style(Style::default().fg(Color::Cyan)); + .border_style(Style::default().fg(border_color)); f.render_widget(block, area); } @@ -354,6 +411,16 @@ fn push_span_lines<'a>(spans: Vec>, lines: &mut Vec>) { } fn draw_intent(f: &mut ratatui::Frame, area: Rect, candidate: &ApprovalCandidate) { + match &candidate.kind { + CandidateKind::Unreviewed => draw_intent_unreviewed(f, area, candidate), + CandidateKind::StaleReviewed => draw_intent_stale_reviewed(f, area, candidate), + CandidateKind::ReqChanged { requirement } => { + draw_intent_req_changed(f, area, candidate, requirement) + } + } +} + +fn draw_intent_unreviewed(f: &mut ratatui::Frame, area: Rect, candidate: &ApprovalCandidate) { let current_text = if candidate.intent == "=doc" { "(intent delegated to source docstring)".to_string() } else { @@ -448,6 +515,164 @@ fn draw_intent(f: &mut ratatui::Frame, area: Rect, candidate: &ApprovalCandidate } } +fn draw_intent_stale_reviewed(f: &mut ratatui::Frame, area: Rect, candidate: &ApprovalCandidate) { + let mut lines: Vec = Vec::new(); + + // Show intent first so the human knows what's supposed to hold. + lines.push(Line::from(Span::styled( + "▼ Current intent:", + Style::default().fg(Color::DarkGray).bold(), + ))); + let intent_text = if candidate.intent == "=doc" { + " (intent delegated to source docstring)" + } else { + &candidate.intent + }; + for l in intent_text.lines() { + lines.push(Line::from(format!(" {l}"))); + } + + lines.push(Line::from("")); + + // Show source diff if available. + match (&candidate.prev_source, &candidate.current_source) { + (Some(old), Some(new)) if old != new => { + lines.push(Line::from(Span::styled( + "▼ Source diff (old → new):", + Style::default().fg(Color::DarkGray).bold(), + ))); + + let prev_spans = diff_spans( + old, + new, + ChangeTag::Delete, + Style::default().fg(Color::Red), + Style::default() + .fg(Color::White) + .bg(Color::Red) + .add_modifier(Modifier::BOLD), + ); + push_span_lines(prev_spans, &mut lines); + + lines.push(Line::from("")); + + let cur_spans = diff_spans( + old, + new, + ChangeTag::Insert, + Style::default().fg(Color::Green), + Style::default() + .fg(Color::White) + .bg(Color::Green) + .add_modifier(Modifier::BOLD), + ); + push_span_lines(cur_spans, &mut lines); + } + (None, Some(_)) => { + lines.push(Line::from(Span::styled( + " (old source unavailable from git history)", + Style::default().fg(Color::DarkGray), + ))); + } + _ => { + lines.push(Line::from(Span::styled( + " (source diff unavailable)", + Style::default().fg(Color::DarkGray), + ))); + } + } + + let paragraph = Paragraph::new(Text::from(lines)) + .block( + Block::default() + .title(" Stale Review — does intent still hold? ") + .borders(Borders::ALL) + .border_style(Style::default().fg(Color::Yellow)), + ) + .wrap(Wrap { trim: false }); + f.render_widget(paragraph, area); +} + +fn draw_intent_req_changed( + f: &mut ratatui::Frame, + area: Rect, + candidate: &ApprovalCandidate, + requirement: &str, +) { + let mut lines: Vec = Vec::new(); + + // Show requirement diff. + lines.push(Line::from(Span::styled( + format!("▼ Requirement \"{requirement}\" changed:"), + Style::default().fg(Color::DarkGray).bold(), + ))); + + match ( + &candidate.old_requirement_text, + &candidate.new_requirement_text, + ) { + (Some(old), Some(new)) if old != new => { + let prev_spans = diff_spans( + old, + new, + ChangeTag::Delete, + Style::default().fg(Color::Red), + Style::default() + .fg(Color::White) + .bg(Color::Red) + .add_modifier(Modifier::BOLD), + ); + push_span_lines(prev_spans, &mut lines); + + lines.push(Line::from("")); + + let cur_spans = diff_spans( + old, + new, + ChangeTag::Insert, + Style::default().fg(Color::Green), + Style::default() + .fg(Color::White) + .bg(Color::Green) + .add_modifier(Modifier::BOLD), + ); + push_span_lines(cur_spans, &mut lines); + } + _ => { + lines.push(Line::from(Span::styled( + " (requirement diff unavailable)", + Style::default().fg(Color::DarkGray), + ))); + } + } + + lines.push(Line::from("")); + + // Show current intent. + lines.push(Line::from(Span::styled( + "▼ Item intent (does it still hold?):", + Style::default().fg(Color::DarkGray).bold(), + ))); + let intent_text = if candidate.intent == "=doc" { + " (intent delegated to source docstring)" + } else { + &candidate.intent + }; + for l in intent_text.lines() { + lines.push(Line::from(format!(" {l}"))); + } + + let paragraph = Paragraph::new(Text::from(lines)) + .block( + Block::default() + .title(format!(" Req Changed — {requirement} ")) + .borders(Borders::ALL) + .border_style(Style::default().fg(Color::Magenta)), + ) + .wrap(Wrap { trim: false }); + f.render_widget(paragraph, area); +} + /// Convert a syntect `Color` to a ratatui `Color`. fn to_ratatui_color(c: highlighting::Color) -> Color { Color::Rgb(c.r, c.g, c.b) diff --git a/crates/liyi-cli/src/tui_approve.rs.liyi.jsonc b/crates/liyi-cli/src/tui_approve.rs.liyi.jsonc index 5816cf2..6759a45 100644 --- a/crates/liyi-cli/src/tui_approve.rs.liyi.jsonc +++ b/crates/liyi-cli/src/tui_approve.rs.liyi.jsonc @@ -32,8 +32,8 @@ "reviewed": true, "intent": "Enter raw mode and alternate screen on stderr, run the main event loop (draw frame, read key, dispatch action), then restore terminal state. Returns a Vec parallel to candidates. Must cleanly restore terminal even on early quit. Keys: y=approve, n=reject, e=edit (leaves TUI for $EDITOR, approves with edited intent or cancels on empty result), s/Enter=skip, a=approve-all-remaining, b/Left=go-back, Right=go-forward, j/Down and k/Up=scroll source, PgUp/PgDn=page-scroll source, q/Esc=quit. The e key suspends raw mode and alternate screen, spawns the editor, then re-enters both, rebuilding the terminal.", "source_span": [ - 169, - 247 + 213, + 291 ], "tree_path": "fn.run_tui", "source_hash": "sha256:b5d2b469ff3c37403da5902d5187f4c6bd2675abe2eefb7cde0875b052d5d6e2", @@ -44,25 +44,73 @@ "reviewed": true, "intent": "Compose the full-screen layout into five vertical regions (header, intent pane, source pane, progress gauge, keybinding footer) and delegate rendering to the per-region draw functions, passing the shared Highlighter to draw_source.", "source_span": [ - 249, - 271 + 293, + 315 ], "tree_path": "fn.draw", "source_hash": "sha256:2d5de9dfd08ce60b1df40e5488a9e527055025afe98fee47c3b057b79b404ad4", "source_anchor": "fn draw(f: &mut ratatui::Frame, app: &ApproveTui) {" }, + { + "item": "draw_header", + "reviewed": true, + "intent": "Render the header bar with kind-aware label (UNREVIEWED, STALE, REQ CHANGED) and color-coded border (Cyan, Yellow, Magenta). Shows item counter, source file, item name, and span.", + "source_span": [ + 317, + 349 + ], + "tree_path": "fn.draw_header", + "source_hash": "sha256:5d189111b9d224042b60c3055c0b85055a05c720967032505c5512933a73f3d6", + "source_anchor": "fn draw_header(" + }, { "item": "draw_intent", "reviewed": true, - "intent": "Render the intent pane with one of three modes based on prev_intent: (1) 'Intent (new)' — no prior approval, show current intent in white; (2) 'Intent (unchanged from last approval)' — previous matches current, show with reassuring title; (3) 'Intent (changed)' — show previous and current intents with word-level diff highlighting via diff_spans: removed words in white-on-red, inserted words in white-on-green, unchanged text in base red/green. Uses push_span_lines for newline-aware rendering. Handles =doc sentinel for all branches.", + "intent": "Dispatch to the appropriate intent rendering function based on CandidateKind: draw_intent_unreviewed for Unreviewed, draw_intent_stale_reviewed for StaleReviewed, draw_intent_req_changed for ReqChanged.", "source_span": [ - 356, - 449 + 413, + 421 ], "tree_path": "fn.draw_intent", - "source_hash": "sha256:cda79be63c0fb3e2ff96301ed56e017cf585386242d4bee29dbddbc3ec5cb691", + "source_hash": "sha256:51a3b7ed71933dbe8d8190671c3e7e3321c79031ebe58e03c38014626f1df685", "source_anchor": "fn draw_intent(f: &mut ratatui::Frame, area: Rect, candidate: &ApprovalCandidate) {" }, + { + "item": "draw_intent_unreviewed", + "reviewed": true, + "intent": "Render the intent pane for unreviewed candidates with one of three modes based on prev_intent: (1) 'Intent (new)' — no prior approval, show current intent in white; (2) 'Intent (unchanged from last approval)' — previous matches current; (3) 'Intent (changed)' — show word-level diff between previous and current intents using diff_spans. Handles =doc sentinel.", + "source_span": [ + 423, + 516 + ], + "tree_path": "fn.draw_intent_unreviewed", + "source_hash": "sha256:9f46731e07b322db1a63654ee4f39ed541eb395354b51b5edc88d635948d01a6", + "source_anchor": "fn draw_intent_unreviewed(f: &mut ratatui::Frame, area: Rect, candidate: &ApprovalCandidate) {" + }, + { + "item": "draw_intent_stale_reviewed", + "reviewed": true, + "intent": "Render the stale-reviewed intent pane showing the current intent text first, then a unified source diff (old → new) if both prev_source and current_source are available. Uses diff_spans for red/green word-level highlighting. Falls back to a message when old source is unavailable from git history. Yellow border.", + "source_span": [ + 518, + 594 + ], + "tree_path": "fn.draw_intent_stale_reviewed", + "source_hash": "sha256:602c82bc7625de14fc37d3f0208f56d894e3c1a75fa53bb96edb21ec487c95d5", + "source_anchor": "fn draw_intent_stale_reviewed(f: &mut ratatui::Frame, area: Rect, candidate: &ApprovalCandidate) {" + }, + { + "item": "draw_intent_req_changed", + "reviewed": true, + "intent": "Render the req-changed intent pane showing requirement name, requirement text diff (old → new) if available, and the item's current intent. Uses diff_spans for word-level highlighting. Falls back to message when requirement diff is unavailable. Magenta border.", + "source_span": [ + 596, + 674 + ], + "tree_path": "fn.draw_intent_req_changed", + "source_hash": "sha256:d2f77dea7ef812504a6cf214260e15ca62374f3e26572bfbd972b8a43f210a1a", + "source_anchor": "fn draw_intent_req_changed(" + }, { "item": "go_forward", "reviewed": true, @@ -80,8 +128,8 @@ "reviewed": true, "intent": "Return true if the source file is small enough for syntax highlighting (at most MAX_HIGHLIGHT_LINES lines). Files exceeding this threshold fall back to plain text to avoid UI lag.", "source_span": [ - 466, - 468 + 691, + 693 ], "tree_path": "fn.file_highlight_enabled", "source_hash": "sha256:cfb2581067ccece845cd39e6edb2eacb5ee99fa8d2ffe3f1c8fbab7e96c3d159", @@ -92,8 +140,8 @@ "reviewed": true, "intent": "Return true if a single line is short enough for syntax highlighting (at most MAX_LINE_LEN bytes). Longer lines fall back to plain text to prevent the regex engine from stalling.", "source_span": [ - 472, - 474 + 697, + 699 ], "tree_path": "fn.line_highlight_enabled", "source_hash": "sha256:fda70f9cb32977604fcf8ba20bf16348d61c4447e270d30ddaba859eaa8eba8d", @@ -104,8 +152,8 @@ "reviewed": true, "intent": "Render the source code pane with syntect-based syntax highlighting, gated by file_highlight_enabled and line_highlight_enabled. Resolves a syntax definition from the source file extension (falling back to plain text), runs HighlightLines over each eligible line, and maps syntect styles to ratatui Span styles. Span lines receive a subtle background highlight (SPAN_BG) to distinguish them from surrounding file content. Prepends a line-number gutter. Supports vertical scrolling.", "source_span": [ - 476, - 570 + 701, + 795 ], "tree_path": "fn.draw_source", "source_hash": "sha256:ddd044516e2725481f30bb2f70008933076c589ef4b5cc21d567de3ae279e321", @@ -114,13 +162,13 @@ { "item": "edit_intent_in_editor", "reviewed": true, - "intent": "Open $VISUAL / $EDITOR / vi on a tempfile pre-populated with the current intent text followed by comment lines containing the item name, source location, and previous intent (if any). Comment lines start with '#' and are stripped on read-back. Returns Some(edited_text) if non-empty after stripping, None if empty or editor exited with error. Deletes the tempfile after reading.", + "intent": "Open $VISUAL / $EDITOR / vi on a tempfile pre-populated with the current intent text followed by comment lines containing the item name, source location, previous intent (if any), source diff for StaleReviewed candidates, and requirement diff for ReqChanged candidates. Comment lines start with '#' and are stripped on read-back. Returns Some(edited_text) if non-empty after stripping, None if empty or editor exited with error. Deletes the tempfile after reading.", "source_span": [ 101, - 165 + 209 ], "tree_path": "fn.edit_intent_in_editor", - "source_hash": "sha256:ccd97544ce0adbee4e805e4e81d23b1b1fa7a6c21f34b50f439f6e437b2c927d", + "source_hash": "sha256:3161a08c08cb27ab70cd19585125b27cfc7bc2cf3ce5512d015b0e9605db413b", "source_anchor": "fn edit_intent_in_editor(candidate: &ApprovalCandidate) -> Option {" }, { @@ -128,8 +176,8 @@ "reviewed": true, "intent": "Produce styled ratatui Spans for one side of a word-level diff between old and new text. Uses similar::TextDiff with Patience algorithm. Equal segments get base_style; segments matching the selected side (Delete for previous, Insert for current) get highlight_style; the other side's changes are omitted.", "source_span": [ - 301, - 326 + 358, + 383 ], "tree_path": "fn.diff_spans", "source_hash": "sha256:2dd68fc75281f00363cb33ef6931f6330e51e37e554a819450ea2e22badd5d90", @@ -140,8 +188,8 @@ "reviewed": true, "intent": "Convert a vec of styled Spans into indented display Lines, splitting on embedded newlines. Each logical line is prefixed with two-space indentation.", "source_span": [ - 330, - 353 + 387, + 410 ], "source_hash": "sha256:9a84687e73c1fd72b0f754e1cc2cba35cdb4a04791b6825cc1a0ffa9de1ffea7", "source_anchor": "fn push_span_lines<'a>(spans: Vec>, lines: &mut Vec>) {" @@ -151,8 +199,8 @@ "reviewed": true, "intent": "Render the keybinding footer line showing all available keys: y (approve), n (reject), e (edit), s/Enter (skip), a (approve all), b/arrows (navigation), j/k (scroll), q/Esc (quit).", "source_span": [ - 588, - 614 + 813, + 839 ], "tree_path": "fn.draw_keys", "source_hash": "sha256:32ce63aeded8af2046caa055e103f2ea680bba3d22013ece6b58caf4f7db4a3f", diff --git a/crates/liyi/src/approve.rs b/crates/liyi/src/approve.rs index cb7e260..db2c502 100644 --- a/crates/liyi/src/approve.rs +++ b/crates/liyi/src/approve.rs @@ -1,10 +1,12 @@ +use std::collections::HashMap; use std::fs; use std::io; use std::path::{Path, PathBuf}; -use crate::discovery::{find_repo_root, resolve_sidecar_targets}; +use crate::discovery::{discover, find_repo_root, resolve_sidecar_targets}; use crate::git::{git_log_revisions, git_show}; use crate::hashing::hash_span; +use crate::markers::{SourceMarker, requirement_spans, scan_markers}; use crate::sidecar::{Spec, parse_sidecar, write_sidecar}; /// Result of an approve operation on a single sidecar. @@ -50,6 +52,33 @@ pub enum Decision { Edit(String), } +/// What kind of review this candidate requires. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum CandidateKind { + /// Agent-inferred intent; no human confirmation yet. + Unreviewed, + /// Reviewed item whose source code changed (source_hash mismatch). + StaleReviewed, + /// Reviewed item whose related requirement text changed. + ReqChanged { + /// Name of the requirement that changed. + requirement: String, + }, +} + +/// Filter for which candidate kinds to collect. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ApproveFilter { + /// All candidate kinds. + All, + /// Only unreviewed items. + UnreviewedOnly, + /// Only stale-reviewed items. + StaleOnly, + /// Only requirement-changed items. + ReqOnly, +} + /// A single item pending approval, with all context needed for display. #[derive(Debug)] pub struct ApprovalCandidate { @@ -73,6 +102,18 @@ pub struct ApprovalCandidate { pub span_len: usize, /// Previously approved intent text from Git history, if available. pub prev_intent: Option, + /// What kind of review this candidate requires. + pub kind: CandidateKind, + /// Previous source text at the span (for StaleReviewed diffs). + pub prev_source: Option, + /// Current source text at the span (for StaleReviewed diffs). + pub current_source: Option, + /// Requirement name that changed (for ReqChanged display). + pub changed_requirement: Option, + /// Old requirement text (for ReqChanged diff). + pub old_requirement_text: Option, + /// New requirement text (for ReqChanged diff). + pub new_requirement_text: Option, } /// Look up the previously approved intent for an item by walking Git @@ -116,10 +157,242 @@ fn lookup_prev_intent( None } -/// Collect all unreviewed items as approval candidates. +/// Look up the source text at the time when `source_hash` was last valid, +/// by walking Git history on the source file. +/// +/// Uses a two-pass strategy: +/// 1. **Fast path:** walk source history with the current span. Works when +/// the span hasn't been shifted by `check --fix`. +/// 2. **Slow path:** walk sidecar history to recover the pre-shift span, +/// then read the source at that revision. Handles the case where +/// `check --fix` shifted the span after the function moved. +/// +/// Returns the source lines at the span from the historical version, +/// or `None` if git history is unavailable or no matching commit is found. +fn lookup_prev_source( + repo_root: Option<&Path>, + source_path: &Path, + sidecar_path: &Path, + item_name: &str, + tree_path: &str, + source_hash: &str, + span: [usize; 2], +) -> Option { + let root = repo_root?; + let source_rel = source_path.strip_prefix(root).ok()?.to_str()?; + + // Fast path: try source history with the current span. + let source_revisions = git_log_revisions(root, source_rel, 20); + for rev in &source_revisions { + let content = match git_show(root, source_rel, rev) { + Some(c) => c, + None => continue, + }; + if let Ok((hash, _)) = hash_span(&content, span) + && hash == source_hash + { + let lines: Vec<&str> = content.lines().collect(); + let start = span[0].saturating_sub(1); + let end = span[1].min(lines.len()); + return Some(lines[start..end].join("\n")); + } + } + + // Slow path: walk sidecar history to find the old span (before check --fix + // shifted it), then read the source at that revision. + let sidecar_rel = sidecar_path.strip_prefix(root).ok()?.to_str()?; + let sidecar_revisions = git_log_revisions(root, sidecar_rel, 20); + for rev in &sidecar_revisions { + let sidecar_content = match git_show(root, sidecar_rel, rev) { + Some(c) => c, + None => continue, + }; + let sidecar = match parse_sidecar(&sidecar_content) { + Ok(s) => s, + Err(_) => continue, + }; + for spec in &sidecar.specs { + if let Spec::Item(item) = spec { + let matched = if !tree_path.is_empty() && !item.tree_path.is_empty() { + item.tree_path == tree_path + } else { + item.item == item_name + }; + if !matched { + continue; + } + let old_span = item.source_span; + if old_span == span { + continue; // same span — already tried in fast path + } + // Read source at this sidecar revision with the old span. + let source_content = match git_show(root, source_rel, rev) { + Some(c) => c, + None => continue, + }; + if let Ok((hash, _)) = hash_span(&source_content, old_span) + && hash == source_hash + { + let lines: Vec<&str> = source_content.lines().collect(); + let start = old_span[0].saturating_sub(1); + let end = old_span[1].min(lines.len()); + return Some(lines[start..end].join("\n")); + } + } + } + } + + None +} + +/// Info about a requirement discovered from the global sidecar scan. +struct RequirementInfo { + source_path: PathBuf, + sidecar_path: PathBuf, + source_span: [usize; 2], + current_hash: String, + current_text: String, +} + +/// Build a global requirement registry by scanning all sidecars in the repo. +/// +/// For each requirement spec, reads the source file, computes a fresh hash +/// from the marker-delimited span (falling back to the sidecar span), and +/// stores the current text. This mirrors the pass-1 logic in `check.rs` +/// but is self-contained. +fn build_requirement_registry(repo_root: &Path) -> HashMap { + let disc = discover(repo_root, &[]); + let mut registry = HashMap::new(); + + for entry in &disc.sidecars { + let sc_content = match fs::read_to_string(&entry.sidecar_path) { + Ok(c) => c, + Err(_) => continue, + }; + let sidecar = match parse_sidecar(&sc_content) { + Ok(s) => s, + Err(_) => continue, + }; + + let source_content = fs::read_to_string(&entry.source_path).unwrap_or_default(); + let markers = scan_markers(&source_content); + let spans = requirement_spans(&markers); + let all_lines: Vec<&str> = source_content.lines().collect(); + + for spec in &sidecar.specs { + if let Spec::Requirement(req) = spec { + // Prefer marker-delimited span (current truth) over sidecar span. + let current_span = spans + .get(&req.requirement) + .copied() + .unwrap_or(req.source_span); + + if let Ok((hash, _)) = hash_span(&source_content, current_span) { + let start = current_span[0].saturating_sub(1); + let end = current_span[1].min(all_lines.len()); + let text = all_lines[start..end].join("\n"); + + registry.insert( + req.requirement.clone(), + RequirementInfo { + source_path: entry.source_path.clone(), + sidecar_path: entry.sidecar_path.clone(), + source_span: current_span, + current_hash: hash, + current_text: text, + }, + ); + } + } + } + } + + registry +} + +/// Look up the old text of a requirement by walking Git history. +/// +/// Uses the same two-pass strategy as `lookup_prev_source`: fast path on +/// the source history with the current span, slow path via sidecar history +/// to recover a pre-shift span. +fn lookup_prev_requirement_text( + repo_root: &Path, + req_info: &RequirementInfo, + stored_hash: &str, + req_name: &str, +) -> Option { + let source_rel = req_info + .source_path + .strip_prefix(repo_root) + .ok()? + .to_str()?; + + // Fast path: try source history with the current span. + let source_revisions = git_log_revisions(repo_root, source_rel, 20); + for rev in &source_revisions { + let content = match git_show(repo_root, source_rel, rev) { + Some(c) => c, + None => continue, + }; + if let Ok((hash, _)) = hash_span(&content, req_info.source_span) + && hash == stored_hash + { + let lines: Vec<&str> = content.lines().collect(); + let start = req_info.source_span[0].saturating_sub(1); + let end = req_info.source_span[1].min(lines.len()); + return Some(lines[start..end].join("\n")); + } + } + + // Slow path: walk sidecar history to find old span. + let sidecar_rel = req_info + .sidecar_path + .strip_prefix(repo_root) + .ok()? + .to_str()?; + let sidecar_revisions = git_log_revisions(repo_root, sidecar_rel, 20); + for rev in &sidecar_revisions { + let sidecar_content = match git_show(repo_root, sidecar_rel, rev) { + Some(c) => c, + None => continue, + }; + let sidecar = match parse_sidecar(&sidecar_content) { + Ok(s) => s, + Err(_) => continue, + }; + for spec in &sidecar.specs { + if let Spec::Requirement(req) = spec + && req.requirement == req_name + && req.source_span != req_info.source_span + { + let source_content = match git_show(repo_root, source_rel, rev) { + Some(c) => c, + None => continue, + }; + if let Ok((hash, _)) = hash_span(&source_content, req.source_span) + && hash == stored_hash + { + let lines: Vec<&str> = source_content.lines().collect(); + let start = req.source_span[0].saturating_sub(1); + let end = req.source_span[1].min(lines.len()); + return Some(lines[start..end].join("\n")); + } + } + } + } + + None +} + +/// Collect approval candidates matching the given filter. +/// +/// When `kind_filter` is `All`, collects unreviewed, stale-reviewed, and +/// req-changed items (in that order). Specific filters restrict to one kind. +// @liyi:related stale-reviewed-demands-human-judgment pub fn collect_approval_candidates( paths: &[PathBuf], item_filter: Option<&str>, + kind_filter: ApproveFilter, ) -> Result, ApproveError> { let targets = resolve_sidecar_targets(paths).map_err(ApproveError::Parse)?; if targets.is_empty() { @@ -129,7 +402,28 @@ pub fn collect_approval_candidates( // Try to locate the repo root for Git history lookups. let repo_root = targets.first().and_then(|p| find_repo_root(p)); - let mut candidates = Vec::new(); + let collect_unreviewed = + kind_filter == ApproveFilter::All || kind_filter == ApproveFilter::UnreviewedOnly; + let collect_stale = + kind_filter == ApproveFilter::All || kind_filter == ApproveFilter::StaleOnly; + let collect_req = kind_filter == ApproveFilter::All || kind_filter == ApproveFilter::ReqOnly; + + let mut unreviewed_candidates = Vec::new(); + let mut stale_candidates = Vec::new(); + let mut req_candidates = Vec::new(); + + // Build global requirement registry for ReqChanged detection. + // Requirements can live in any sidecar, so we scan the whole repo. + let req_registry: HashMap = if collect_req { + if let Some(ref root) = repo_root { + build_requirement_registry(root) + } else { + HashMap::new() + } + } else { + HashMap::new() + }; + for sidecar_path in &targets { let sc_content = fs::read_to_string(sidecar_path)?; let sidecar = parse_sidecar(&sc_content).map_err(ApproveError::Parse)?; @@ -137,6 +431,14 @@ pub fn collect_approval_candidates( let source_path = source_path_from_sidecar(sidecar_path)?; let source_content = fs::read_to_string(&source_path).unwrap_or_default(); let all_lines: Vec<&str> = source_content.lines().collect(); + let source_markers = scan_markers(&source_content); + + // Build source_lines once per file (shared across candidates). + let source_lines: Vec<(usize, String)> = all_lines + .iter() + .enumerate() + .map(|(i, line)| (i + 1, line.to_string())) + .collect(); for (spec_index, spec) in sidecar.specs.iter().enumerate() { // @liyi:related approve-never-approves-requirements @@ -146,44 +448,131 @@ pub fn collect_approval_candidates( { continue; } - if item.reviewed { - continue; - } let span_start = item.source_span[0].saturating_sub(1); let span_end = item.source_span[1].min(all_lines.len()); - - let source_lines: Vec<(usize, String)> = all_lines - .iter() - .enumerate() - .map(|(i, line)| (i + 1, line.to_string())) - .collect(); - let span_offset = span_start; let span_len = span_end.saturating_sub(span_start); - let prev_intent = lookup_prev_intent( - repo_root.as_deref(), - sidecar_path, - &item.item, - &item.tree_path, - ); - - candidates.push(ApprovalCandidate { - sidecar_path: sidecar_path.clone(), - source_display: sidecar.source.clone(), - spec_index, - item_name: item.item.clone(), - intent: item.intent.clone(), - source_span: item.source_span, - source_lines, - span_offset, - span_len, - prev_intent, + // Determine if this item is effectively reviewed. + let has_source_intent = source_markers.iter().any(|m| { + matches!(m, SourceMarker::Intent { line, .. } + if *line >= item.source_span[0] && *line <= item.source_span[1]) }); + let effectively_reviewed = item.reviewed || has_source_intent; + + if !effectively_reviewed && collect_unreviewed { + let prev_intent = lookup_prev_intent( + repo_root.as_deref(), + sidecar_path, + &item.item, + &item.tree_path, + ); + + unreviewed_candidates.push(ApprovalCandidate { + sidecar_path: sidecar_path.clone(), + source_display: sidecar.source.clone(), + spec_index, + item_name: item.item.clone(), + intent: item.intent.clone(), + source_span: item.source_span, + source_lines: source_lines.clone(), + span_offset, + span_len, + prev_intent, + kind: CandidateKind::Unreviewed, + prev_source: None, + current_source: None, + changed_requirement: None, + old_requirement_text: None, + new_requirement_text: None, + }); + } + + if effectively_reviewed + && collect_stale + && let Some(stored_hash) = &item.source_hash + && let Ok((current_hash, _)) = hash_span(&source_content, item.source_span) + && *stored_hash != current_hash + { + // Extract current and previous source at the span. + let cur_src = all_lines[span_start..span_end.min(all_lines.len())].join("\n"); + let prev_src = lookup_prev_source( + repo_root.as_deref(), + &source_path, + sidecar_path, + &item.item, + &item.tree_path, + stored_hash, + item.source_span, + ); + + stale_candidates.push(ApprovalCandidate { + sidecar_path: sidecar_path.clone(), + source_display: sidecar.source.clone(), + spec_index, + item_name: item.item.clone(), + intent: item.intent.clone(), + source_span: item.source_span, + source_lines: source_lines.clone(), + span_offset, + span_len, + prev_intent: None, + kind: CandidateKind::StaleReviewed, + prev_source: prev_src, + current_source: Some(cur_src), + changed_requirement: None, + old_requirement_text: None, + new_requirement_text: None, + }); + } + + // ReqChanged: check related edges against global requirement registry. + if effectively_reviewed + && collect_req + && let Some(related) = &item.related + { + for (req_name, stored_hash) in related { + if let Some(stored_h) = stored_hash + && let Some(req_info) = req_registry.get(req_name) + && *stored_h != req_info.current_hash + { + let old_text = repo_root.as_deref().and_then(|root| { + lookup_prev_requirement_text(root, req_info, stored_h, req_name) + }); + + req_candidates.push(ApprovalCandidate { + sidecar_path: sidecar_path.clone(), + source_display: sidecar.source.clone(), + spec_index, + item_name: item.item.clone(), + intent: item.intent.clone(), + source_span: item.source_span, + source_lines: source_lines.clone(), + span_offset, + span_len, + prev_intent: None, + kind: CandidateKind::ReqChanged { + requirement: req_name.clone(), + }, + prev_source: None, + current_source: None, + changed_requirement: Some(req_name.clone()), + old_requirement_text: old_text, + new_requirement_text: Some(req_info.current_text.clone()), + }); + } + } + } } } } + + // Ordering: unreviewed first, then stale-reviewed by file, then req-changed. + let mut candidates = Vec::new(); + candidates.append(&mut unreviewed_candidates); + candidates.append(&mut stale_candidates); + candidates.append(&mut req_candidates); Ok(candidates) } @@ -194,20 +583,19 @@ pub fn collect_approval_candidates( // @liyi:related reviewed-semantics // @liyi:related reqchanged-orthogonal-to-reviewed // @liyi:related reqchanged-demands-human-judgment +// @liyi:related stale-reviewed-demands-human-judgment pub fn apply_approval_decisions( candidates: &[ApprovalCandidate], decisions: &[Decision], dry_run: bool, ) -> Result, ApproveError> { - use std::collections::HashMap; - - // Group decisions by sidecar path. - let mut per_sidecar: HashMap<&Path, Vec<(usize, &Decision)>> = HashMap::new(); + // Group decisions by sidecar path, carrying the candidate kind. + let mut per_sidecar: HashMap<&Path, Vec<(usize, &Decision, &CandidateKind)>> = HashMap::new(); for (candidate, decision) in candidates.iter().zip(decisions.iter()) { per_sidecar .entry(candidate.sidecar_path.as_path()) .or_default() - .push((candidate.spec_index, decision)); + .push((candidate.spec_index, decision, &candidate.kind)); } let mut results = Vec::new(); @@ -222,16 +610,31 @@ pub fn apply_approval_decisions( let mut rejected = 0usize; let mut modified = false; - for &(spec_index, ref decision) in item_decisions { + for &(spec_index, ref decision, ref kind) in item_decisions { if let Some(Spec::Item(item)) = sidecar.specs.get_mut(spec_index) { match decision { Decision::Yes => { if !dry_run { - item.reviewed = true; - if let Ok((hash, anchor)) = hash_span(&source_content, item.source_span) - { - item.source_hash = Some(hash); - item.source_anchor = Some(anchor); + match kind { + CandidateKind::Unreviewed | CandidateKind::StaleReviewed => { + // Set reviewed and rehash. + item.reviewed = true; + if let Ok((hash, anchor)) = + hash_span(&source_content, item.source_span) + { + item.source_hash = Some(hash); + item.source_anchor = Some(anchor); + } + } + CandidateKind::ReqChanged { requirement } => { + // Refresh the related edge hash only. + // Do not touch reviewed, intent, or source_hash. + if let Some(related) = &mut item.related { + // Set to null so `liyi check --fix` fills + // in the current requirement hash. + related.insert(requirement.clone(), None); + } + } } modified = true; } @@ -240,11 +643,29 @@ pub fn apply_approval_decisions( Decision::Edit(new_intent) => { if !dry_run { item.intent = new_intent.clone(); - item.reviewed = true; - if let Ok((hash, anchor)) = hash_span(&source_content, item.source_span) - { - item.source_hash = Some(hash); - item.source_anchor = Some(anchor); + match kind { + CandidateKind::Unreviewed | CandidateKind::StaleReviewed => { + item.reviewed = true; + if let Ok((hash, anchor)) = + hash_span(&source_content, item.source_span) + { + item.source_hash = Some(hash); + item.source_anchor = Some(anchor); + } + } + CandidateKind::ReqChanged { requirement } => { + // Update intent and refresh the related edge. + item.reviewed = true; + if let Ok((hash, anchor)) = + hash_span(&source_content, item.source_span) + { + item.source_hash = Some(hash); + item.source_anchor = Some(anchor); + } + if let Some(related) = &mut item.related { + related.insert(requirement.clone(), None); + } + } } modified = true; } @@ -252,7 +673,17 @@ pub fn apply_approval_decisions( } Decision::No => { if !dry_run { - item.reviewed = false; + match kind { + CandidateKind::Unreviewed => { + // Explicitly mark as not reviewed. + item.reviewed = false; + modified = true; + } + CandidateKind::StaleReviewed | CandidateKind::ReqChanged { .. } => { + // Leave unchanged — item stays stale/req-changed + // as a todo marker. + } + } } rejected += 1; } diff --git a/crates/liyi/src/approve.rs.liyi.jsonc b/crates/liyi/src/approve.rs.liyi.jsonc index c23a5d7..ed16675 100644 --- a/crates/liyi/src/approve.rs.liyi.jsonc +++ b/crates/liyi/src/approve.rs.liyi.jsonc @@ -8,8 +8,8 @@ "reviewed": true, "intent": "Data structure returned per sidecar file reporting how many items were approved, skipped, and rejected during the approve operation.", "source_span": [ - 12, - 17 + 14, + 19 ], "tree_path": "struct.ApproveResult", "source_hash": "sha256:3913960707101bc0724d6635156fa7d8cf607b34d8e21400cb445969b1a862a6", @@ -20,8 +20,8 @@ "reviewed": true, "intent": "Error type for approve operations with variants for I/O errors, parse failures, and no-targets-found. Must implement Display and From.", "source_span": [ - 21, - 25 + 23, + 27 ], "tree_path": "enum.ApproveError", "source_hash": "sha256:79c540f91ede3129e555d35665d456f2311ce1523f309d9e2fb0c8a25b8fafd5", @@ -32,23 +32,47 @@ "reviewed": true, "intent": "Public enum representing a user's approval decision: Yes (approve and accept inferred intent), No (reject), Skip (defer), or Edit(String) (approve with human-overridden intent text). Used by both interactive TUI and batch workflows.", "source_span": [ - 45, - 51 + 47, + 53 ], "tree_path": "enum.Decision", "source_hash": "sha256:4d2e7d98844f6022c103a3acc72977b14f29c16d236c40f55b68f635c98e6dd6", "source_anchor": "pub enum Decision {" }, + { + "item": "CandidateKind", + "reviewed": true, + "intent": "Discriminant for the kind of review a candidate requires: Unreviewed (agent-inferred, no human confirmation), StaleReviewed (reviewed but source_hash mismatches), or ReqChanged (reviewed but a related requirement's text changed). Must derive PartialEq for filter comparisons.", + "source_span": [ + 57, + 67 + ], + "tree_path": "enum.CandidateKind", + "source_hash": "sha256:ffae12d4b8ae60efe28c99f78d9d21314820326857874ac4e8c6e170fab511a5", + "source_anchor": "pub enum CandidateKind {" + }, + { + "item": "ApproveFilter", + "reviewed": true, + "intent": "Filter enum controlling which candidate kinds to collect: All, UnreviewedOnly, StaleOnly, or ReqOnly. Must derive Copy for cheap passing.", + "source_span": [ + 71, + 80 + ], + "tree_path": "enum.ApproveFilter", + "source_hash": "sha256:e0416c012b44e7e1eed460f1ff77b450515c1ec97c53c60fc4f2ee9e40a29d34", + "source_anchor": "pub enum ApproveFilter {" + }, { "item": "ApprovalCandidate", "reviewed": true, - "intent": "Structured representation of a single unreviewed item with all context needed for display: sidecar path, source filename, spec index, item name, intent text, source span, all source file lines, span_offset (index into source_lines where the reviewed span begins), span_len (number of span lines), and prev_intent (the previously approved intent from Git history, if any). Decouples data gathering from presentation.", + "intent": "Structured representation of an approval candidate (unreviewed, stale-reviewed, or req-changed) with all context needed for TUI display: sidecar/source identity, spec index, item name, intent text, source span/lines, span offset/len, prev_intent from Git history, kind discriminant, previous and current source text (for StaleReviewed diffs), and old/new requirement text (for ReqChanged diffs). Decouples data gathering from presentation.", "source_span": [ - 55, - 76 + 84, + 117 ], "tree_path": "struct.ApprovalCandidate", - "source_hash": "sha256:0650288d78027156ced022abc60b6d992e9f4c212664451dcb36302d61075702", + "source_hash": "sha256:55fef1dab9c0d9667c43f82583cefc9b88cc675f89229db78381fb2610643c4b", "source_anchor": "pub struct ApprovalCandidate {" }, { @@ -56,43 +80,93 @@ "reviewed": true, "intent": "Walk Git history (up to 20 commits via git_log_revisions) to find the most recent commit where the item had reviewed=true in the sidecar. For each revision, retrieves the sidecar via git_show, parses it, and matches by tree_path (preferred) or item name. Returns Some(intent) from the first matching reviewed item. Returns None if repo_root is None, the path is not repo-relative, or no reviewed version exists in history.", "source_span": [ - 83, - 117 + 124, + 158 ], "tree_path": "fn.lookup_prev_intent", "source_hash": "sha256:7e641a17f160781026ef2afe2999343e9b5b60d6609ba13fb1348ceaded0fd13", "source_anchor": "fn lookup_prev_intent(" }, + { + "item": "lookup_prev_source", + "reviewed": true, + "intent": "Recover the source text at the time source_hash was valid, using a two-pass Git history strategy. Fast path: walk source file history with the current span (works pre-check-fix). Slow path: walk sidecar history to find the pre-shift span, then read source at that revision. Returns None when git history is unavailable. Handles the case where check --fix shifted the span after the function moved.", + "source_span": [ + 172, + 246 + ], + "tree_path": "fn.lookup_prev_source", + "source_hash": "sha256:4d98e7d46b43481f995217ca79ca4650ddd7be274d49e13b344126d3c43811e6", + "source_anchor": "fn lookup_prev_source(" + }, + { + "item": "RequirementInfo", + "reviewed": true, + "intent": "=trivial", + "source_span": [ + 249, + 255 + ], + "tree_path": "struct.RequirementInfo", + "source_hash": "sha256:c98ea3c19354724d40bde85dd2dfbd412db7cc3d75f7d8f0fda87dbfe1f8d78b", + "source_anchor": "struct RequirementInfo {" + }, + { + "item": "build_requirement_registry", + "reviewed": true, + "intent": "Build a global requirement registry by scanning all sidecars in the repo via discover(). For each RequirementSpec, reads the source file, computes a fresh hash from marker-delimited spans (falling back to sidecar span), and stores current text. Returns a HashMap mapping requirement name to RequirementInfo.", + "source_span": [ + 263, + 311 + ], + "tree_path": "fn.build_requirement_registry", + "source_hash": "sha256:3b5ddc2d04c2c579aa25282f7ac504230a7c8080831c9e91c667054f94c05811", + "source_anchor": "fn build_requirement_registry(repo_root: &Path) -> HashMap {" + }, + { + "item": "lookup_prev_requirement_text", + "reviewed": true, + "intent": "Recover the old text of a requirement by walking Git history, using the same two-pass strategy as lookup_prev_source (fast path with current span, slow path via sidecar history for pre-shift spans). Returns None when no matching revision is found.", + "source_span": [ + 318, + 385 + ], + "tree_path": "fn.lookup_prev_requirement_text", + "source_hash": "sha256:87f30988a8c03b2e06b2850486cb2643370c3eb4669e39f737584030c0ed50aa", + "source_anchor": "fn lookup_prev_requirement_text(" + }, { "item": "collect_approval_candidates", "reviewed": true, - "intent": "Resolve sidecar targets from paths, then collect all unreviewed items as ApprovalCandidate structs with the full source file for display. Derives source path by stripping .liyi.jsonc suffix. Applies item name filter if specified. Populates span_offset and span_len so the TUI can distinguish the reviewed span. Queries Git history via lookup_prev_intent to populate prev_intent for each candidate. Returns error if no targets found.", + "intent": "Resolve sidecar targets from paths, then collect approval candidates of all three kinds (unreviewed, stale-reviewed, req-changed) filtered by kind_filter and item_filter. Builds a global requirement registry for cross-sidecar ReqChanged detection. For unreviewed: gathers items without reviewed=true or @liyi:intent. For stale-reviewed: detects source_hash mismatches and recovers old source via lookup_prev_source. For req-changed: compares related edge hashes against freshly computed requirement hashes and populates old/new requirement text. Orders output: unreviewed first, then stale, then req-changed. Returns error if no targets found.", "source_span": [ - 120, - 188 + 392, + 577 ], "tree_path": "fn.collect_approval_candidates", - "source_hash": "sha256:e2543ec1fc4d235c152d5e280a25dad2f6e657d0a8f5361b656e391fd2f253fe", + "source_hash": "sha256:b4e074761641e415b1540030fa11961558e878febdcf1f86a2c8278499abd547", "source_anchor": "pub fn collect_approval_candidates(", "related": { - "approve-never-approves-requirements": "sha256:c2b107468dd999e0802bd648e619c863f6769835468fe187cc49d3255168994b" + "approve-never-approves-requirements": "sha256:c2b107468dd999e0802bd648e619c863f6769835468fe187cc49d3255168994b", + "stale-reviewed-demands-human-judgment": "sha256:0158af62360b393b12a0a34ed912c5fa7f75d313e1994536345e70910887d97a" } }, { "item": "apply_approval_decisions", "reviewed": true, - "intent": "Apply a parallel slice of Decision values to the candidates, grouped by sidecar file. For Yes: set reviewed=true and fill hashes. For Edit(text): update intent text, set reviewed=true, and fill hashes. For No: set reviewed=false. For Skip: no mutation. Write back modified sidecars unless dry_run. Returns per-sidecar ApproveResult.", + "intent": "Apply a parallel slice of Decision values to the candidates, grouped by sidecar file. Kind-aware behavior: For Unreviewed/StaleReviewed Yes: set reviewed=true and rehash. For ReqChanged Yes: refresh the related edge hash only (set to null for tool fill-in), do not touch reviewed/intent/source_hash. For Edit: update intent and rehash (all kinds); for ReqChanged also refresh the related edge. For No on Unreviewed: set reviewed=false. For No on StaleReviewed/ReqChanged: leave unchanged as todo marker. For Skip: no mutation. Write back modified sidecars unless dry_run. Returns per-sidecar ApproveResult.", "source_span": [ - 197, - 279 + 587, + 710 ], "tree_path": "fn.apply_approval_decisions", - "source_hash": "sha256:bfbe20536ff44961025d4090626a5fc5bf3adbd58dc77a81622aac93f39106c2", + "source_hash": "sha256:c398d3de6beb83aeadeb437ce0a4a3f2bcd1187f36d0ea8413a9bbc6918edfc3", "source_anchor": "pub fn apply_approval_decisions(", "related": { "reqchanged-demands-human-judgment": "sha256:30ba3f1eea4b3e68d6c5b3926c643cd41d753636d9420329934d9dd43ae14de5", "reqchanged-orthogonal-to-reviewed": "sha256:5c85639dd24c2cecdeb7c466e414319fdd40451d0abb896e495fcd13d63a78d8", - "reviewed-semantics": "sha256:eabc16cfe83d7ed5b52779d4771a04f9ba6fee37c24e4493f2577d907afbd9e0" + "reviewed-semantics": "sha256:eabc16cfe83d7ed5b52779d4771a04f9ba6fee37c24e4493f2577d907afbd9e0", + "stale-reviewed-demands-human-judgment": "sha256:0158af62360b393b12a0a34ed912c5fa7f75d313e1994536345e70910887d97a" } }, { @@ -100,8 +174,8 @@ "reviewed": true, "intent": "Derive the source file path from a sidecar path by stripping the .liyi.jsonc suffix. Must reject paths that do not end with .liyi.jsonc.", "source_span": [ - 282, - 293 + 713, + 724 ], "tree_path": "fn.source_path_from_sidecar", "source_hash": "sha256:490ac4b91d5418e093b852a804fb0c330b1642cd99af3035d3a43fb35429bfab", diff --git a/crates/liyi/src/check.rs b/crates/liyi/src/check.rs index 12867b6..2e208b4 100644 --- a/crates/liyi/src/check.rs +++ b/crates/liyi/src/check.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs; use std::path::{Path, PathBuf}; @@ -10,7 +10,7 @@ use crate::hashing::{SpanError, hash_span}; use crate::markers::{SourceMarker, requirement_spans, scan_markers}; use crate::schema::validate_version; use crate::shift::{ShiftResult, detect_shift}; -use crate::sidecar::{Spec, parse_sidecar, write_sidecar}; +use crate::sidecar::{ItemSpec, RequirementSpec, Spec, parse_sidecar, write_sidecar}; use crate::tree_path::{ compute_tree_path, detect_language, resolve_tree_path, resolve_tree_path_sibling_scan, }; @@ -50,7 +50,78 @@ pub fn run_check( let mut diagnostics: Vec = Vec::new(); // Surface discovery warnings as diagnostics. - for w in &disc.warnings { + emit_ambiguous_sidecar_warnings(root, &disc.warnings, &mut diagnostics); + + // Shared source-content cache (avoids re-reading the same file). + let mut source_cache: HashMap = HashMap::new(); + + // ------------------------------------------------------------------ + // Pass 1 — Requirement discovery (project-global) + // ------------------------------------------------------------------ + let mut requirements = + discover_requirements(&disc.all_files, &mut source_cache, &mut diagnostics); + compute_requirement_hashes(&disc.all_files, &mut source_cache, &mut requirements); + let source_related_refs = collect_source_related_refs(&disc.all_files, &mut source_cache); + let (requirements_with_sidecar, requirements_referenced, req_dep_graph) = + enrich_requirements_from_sidecars(&disc.sidecars, &mut requirements); + + // Detect cycles in the requirement dependency graph using DFS. + let cycles = detect_requirement_cycles(&req_dep_graph); + + // ------------------------------------------------------------------ + // Pass 2 — Item / requirement checking (scoped to discovered sidecars) + // ------------------------------------------------------------------ + for entry in &disc.sidecars { + check_sidecar( + entry, + &mut diagnostics, + &mut source_cache, + &requirements, + root, + fix, + dry_run, + ); + } + + // ------------------------------------------------------------------ + // Post-pass diagnostics + // ------------------------------------------------------------------ + emit_untracked_requirements( + &requirements, + &requirements_with_sidecar, + &mut source_cache, + &mut diagnostics, + ); + emit_unreferenced_requirements( + &requirements_with_sidecar, + &requirements_referenced, + &source_related_refs, + &requirements, + &mut diagnostics, + ); + emit_cycle_diagnostics(&cycles, &requirements, &mut diagnostics); + + // Sort by file path, then by item/requirement name. + diagnostics.sort_by(|a, b| { + a.file + .cmp(&b.file) + .then_with(|| a.item_or_req.cmp(&b.item_or_req)) + }); + + let exit_code = compute_exit_code(&diagnostics, flags); + (diagnostics, exit_code) +} + +// --------------------------------------------------------------------------- +// run_check helpers +// --------------------------------------------------------------------------- + +fn emit_ambiguous_sidecar_warnings( + root: &Path, + warnings: &[String], + diagnostics: &mut Vec, +) { + for w in warnings { diagnostics.push(Diagnostic { file: root.to_path_buf(), item_or_req: String::new(), @@ -65,20 +136,22 @@ pub fn run_check( span_start: None, annotation_line: None, requirement_text: None, + intent: None, }); } +} - // Shared source-content cache (avoids re-reading the same file). - let mut source_cache: HashMap = HashMap::new(); - - // ------------------------------------------------------------------ - // Pass 1 — Requirement discovery (project-global) - // ------------------------------------------------------------------ +/// Pass 1a: scan all source files for `@liyi:requirement` markers and build +/// the initial requirements map (hashes filled later). +fn discover_requirements( + all_files: &[PathBuf], + source_cache: &mut HashMap, + diagnostics: &mut Vec, +) -> HashMap { let mut requirements: HashMap = HashMap::new(); - for file_path in &disc.all_files { - let content = read_cached(&mut source_cache, file_path); - let content = match content { + for file_path in all_files { + let content = match read_cached(source_cache, file_path) { Some(c) => c, None => continue, }; @@ -87,7 +160,6 @@ pub fn run_check( for m in &markers { if let SourceMarker::Requirement { name, line } = m { if let Some(existing) = requirements.get(name) { - // Duplicate requirement name — emit error for both sites. diagnostics.push(Diagnostic { file: file_path.clone(), item_or_req: name.clone(), @@ -103,6 +175,7 @@ pub fn run_check( span_start: None, annotation_line: None, requirement_text: None, + intent: None, }); } else { requirements.insert( @@ -110,8 +183,8 @@ pub fn run_check( RequirementRecord { file: file_path.clone(), line: *line, - hash: None, // filled from sidecar below - computed_hash: None, // filled from source below + hash: None, + computed_hash: None, }, ); } @@ -119,11 +192,18 @@ pub fn run_check( } } - // Compute fresh hashes for requirement blocks from source markers so - // that downstream related-edge checks can detect cascading staleness - // in a single `liyi check` run, without needing `--fix` first. - for file_path in &disc.all_files { - let content = match read_cached(&mut source_cache, file_path) { + requirements +} + +/// Pass 1b: compute fresh hashes for requirement blocks from source so that +/// downstream related-edge checks detect cascading staleness in one pass. +fn compute_requirement_hashes( + all_files: &[PathBuf], + source_cache: &mut HashMap, + requirements: &mut HashMap, +) { + for file_path in all_files { + let content = match read_cached(source_cache, file_path) { Some(c) => c, None => continue, }; @@ -137,37 +217,45 @@ pub fn run_check( } } } +} - // Collect related markers (`\x40liyi:related`) from all source files so that - // source-level references count toward requirement coverage even - // when the sidecar `related` edge has not been written yet. - let mut source_related_refs: std::collections::HashSet = - std::collections::HashSet::new(); - for file_path in &disc.all_files { - let content = match read_cached(&mut source_cache, file_path) { +/// Pass 1c: collect `@liyi:related` marker names from all source files so that +/// source-level references count toward requirement coverage. +fn collect_source_related_refs( + all_files: &[PathBuf], + source_cache: &mut HashMap, +) -> HashSet { + let mut refs: HashSet = HashSet::new(); + for file_path in all_files { + let content = match read_cached(source_cache, file_path) { Some(c) => c, None => continue, }; for m in scan_markers(&content) { if let SourceMarker::Related { name, .. } = m { - source_related_refs.insert(name); + refs.insert(name); } } } + refs +} - // Enrich requirement records with hashes from any existing sidecars. - // Also track which requirements have a Spec::Requirement sidecar entry - // and which requirement names are referenced by any Spec::Item via `related`. - // Build a requirement dependency graph for cycle detection: - // edge A → B means: a sidecar defines requirement A AND contains an - // item that references requirement B. - let mut requirements_with_sidecar: std::collections::HashSet = - std::collections::HashSet::new(); - let mut requirements_referenced: std::collections::HashSet = - std::collections::HashSet::new(); +/// Pass 1d: enrich requirement records with hashes from existing sidecars, +/// track which requirements have sidecar entries and which are referenced +/// by items, and build the dependency graph for cycle detection. +fn enrich_requirements_from_sidecars( + sidecars: &[SidecarEntry], + requirements: &mut HashMap, +) -> ( + HashSet, + HashSet, + HashMap>, +) { + let mut requirements_with_sidecar: HashSet = HashSet::new(); + let mut requirements_referenced: HashSet = HashSet::new(); let mut req_dep_graph: HashMap> = HashMap::new(); - for entry in &disc.sidecars { + for entry in sidecars { let sc_content = match fs::read_to_string(&entry.sidecar_path) { Ok(c) => c, Err(_) => continue, @@ -177,8 +265,6 @@ pub fn run_check( Err(_) => continue, }; - // Collect requirements defined in this sidecar and requirements - // referenced by items in this sidecar. let mut defined_in_sidecar: Vec = Vec::new(); let mut referenced_in_sidecar: Vec = Vec::new(); @@ -204,7 +290,6 @@ pub fn run_check( } } - // Build graph edges: defined req → referenced reqs in same sidecar. for def in &defined_in_sidecar { for reff in &referenced_in_sidecar { if def != reff { @@ -217,36 +302,25 @@ pub fn run_check( } } - // Detect cycles in the requirement dependency graph using DFS. - let cycles = detect_requirement_cycles(&req_dep_graph); - - // ------------------------------------------------------------------ - // Pass 2 — Item / requirement checking (scoped to discovered sidecars) - // ------------------------------------------------------------------ - for entry in &disc.sidecars { - check_sidecar( - entry, - &mut diagnostics, - &mut source_cache, - &requirements, - root, - fix, - dry_run, - ); - } - - // ------------------------------------------------------------------ - // Post-pass diagnostics - // ------------------------------------------------------------------ + ( + requirements_with_sidecar, + requirements_referenced, + req_dep_graph, + ) +} - // Untracked: requirements found in source markers but absent from any sidecar. - for (name, rec) in &requirements { +/// Post-pass: emit `Untracked` for requirements found in source but absent +/// from any sidecar. +fn emit_untracked_requirements( + requirements: &HashMap, + requirements_with_sidecar: &HashSet, + source_cache: &mut HashMap, + diagnostics: &mut Vec, +) { + const MAX_REQ_TEXT_CHARS: usize = 4096; + for (name, rec) in requirements { if !requirements_with_sidecar.contains(name) { - // Extract requirement block text for prompt mode. - // Capped at MAX_REQ_TEXT_CHARS to bound untrusted content in - // --prompt output. See docs/prompt-mode-design.md §Security. - const MAX_REQ_TEXT_CHARS: usize = 4096; - let req_text = read_cached(&mut source_cache, &rec.file).and_then(|content| { + let req_text = read_cached(source_cache, &rec.file).and_then(|content| { let markers = scan_markers(&content); let spans = requirement_spans(&markers); spans.get(name).map(|span| { @@ -279,13 +353,22 @@ pub fn run_check( span_start: None, annotation_line: Some(rec.line), requirement_text: req_text, + intent: None, }); } } +} - // ReqNoRelated: requirements with sidecar entries that no item references - // (neither via sidecar `related` edges nor via source related markers). - for name in &requirements_with_sidecar { +/// Post-pass: emit `ReqNoRelated` for requirements with sidecar entries that +/// no item references. +fn emit_unreferenced_requirements( + requirements_with_sidecar: &HashSet, + requirements_referenced: &HashSet, + source_related_refs: &HashSet, + requirements: &HashMap, + diagnostics: &mut Vec, +) { + for name in requirements_with_sidecar { if !requirements_referenced.contains(name) && !source_related_refs.contains(name) && let Some(rec) = requirements.get(name) @@ -301,14 +384,20 @@ pub fn run_check( span_start: None, annotation_line: Some(rec.line), requirement_text: None, + intent: None, }); } } +} - // RequirementCycle: circular dependencies among requirements. - for cycle in &cycles { +/// Post-pass: emit `RequirementCycle` errors for circular dependencies. +fn emit_cycle_diagnostics( + cycles: &[Vec], + requirements: &HashMap, + diagnostics: &mut Vec, +) { + for cycle in cycles { let cycle_display = cycle.join(" → "); - // Report from the first requirement in the cycle. let first = &cycle[0]; let file = requirements .get(first) @@ -327,18 +416,9 @@ pub fn run_check( span_start: None, annotation_line: None, requirement_text: None, + intent: None, }); } - - // Sort by file path, then by item/requirement name. - diagnostics.sort_by(|a, b| { - a.file - .cmp(&b.file) - .then_with(|| a.item_or_req.cmp(&b.item_or_req)) - }); - - let exit_code = compute_exit_code(&diagnostics, flags); - (diagnostics, exit_code) } // --------------------------------------------------------------------------- @@ -381,6 +461,7 @@ fn check_sidecar( span_start: None, annotation_line: None, requirement_text: None, + intent: None, }); return; } @@ -400,6 +481,7 @@ fn check_sidecar( span_start: None, annotation_line: None, requirement_text: None, + intent: None, }); return; } @@ -420,6 +502,7 @@ fn check_sidecar( span_start: None, annotation_line: None, requirement_text: None, + intent: None, }); return; } @@ -443,6 +526,7 @@ fn check_sidecar( span_start: Some(item.source_span[0]), annotation_line: None, requirement_text: None, + intent: Some(item.intent.clone()), }); } if let Some(ref related) = item.related { @@ -463,6 +547,7 @@ fn check_sidecar( span_start: Some(item.source_span[0]), annotation_line: None, requirement_text: None, + intent: Some(item.intent.clone()), }); } } @@ -483,6 +568,7 @@ fn check_sidecar( span_start: Some(req.source_span[0]), annotation_line: None, requirement_text: None, + intent: None, }); } } @@ -502,6 +588,7 @@ fn check_sidecar( span_start: None, annotation_line: None, requirement_text: None, + intent: None, }); return; } @@ -522,755 +609,52 @@ fn check_sidecar( Spec::Item(item) => { let label = item.item.clone(); - // a. Hash the span - match hash_span(&source_content, item.source_span) { - Ok((computed_hash, _computed_anchor)) => { - let is_current = item.source_hash.as_ref() == Some(&computed_hash); - - if is_current { - // CURRENT - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Current, - severity: Severity::Info, - message: "hash matches".into(), - fix_hint: None, - fixed: false, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } else if item.source_hash.is_none() { - // No hash yet — try tree_path recovery - // before falling back to hashing at the - // existing (possibly stale) span. - let lang = detect_language(&entry.source_path); - let recovered_span = if !item.tree_path.is_empty() - && let Some(l) = lang - && let Some(tp_span) = - resolve_tree_path(&source_content, &item.tree_path, l) - { - Some(tp_span) - } else { - None - }; - - if fix { - if let Some(tp_span) = recovered_span { - item.source_span = tp_span; - } - if let Ok((fix_hash, fix_anchor)) = - hash_span(&source_content, item.source_span) - { - item.source_hash = Some(fix_hash); - item.source_anchor = Some(fix_anchor); - } - if let Some(l) = lang { - let canonical = - compute_tree_path(&source_content, item.source_span, l); - if !canonical.is_empty() { - item.tree_path = canonical; - } - } - // Source changed since last review — clear - // reviewed so a human re-confirms intent. - item.reviewed = false; - modified = true; - } - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Stale, - severity: Severity::Warning, - message: "missing source_hash".into(), - fix_hint: Some("liyi check --fix".into()), - fixed: fix, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } else { - // Hash mismatch — try tree_path first, then shift - let lang = detect_language(&entry.source_path); - - // Try tree_path-based recovery, tracking why it - // may not be available for diagnostic clarity. - let (tree_path_recovered, tree_path_note) = if item.tree_path.is_empty() - { - (None, "no tree_path set") - } else if lang.is_none() { - (None, "no grammar for source language") - } else { - let resolved = resolve_tree_path( - &source_content, - &item.tree_path, - lang.unwrap(), - ); - if resolved.is_some() { - (resolved, "") - } else { - (None, "tree_path resolution failed") - } - }; - - if let Some(new_span) = tree_path_recovered { - // tree_path resolved — check whether the - // content at new_span is unchanged (pure - // shift) or also changed (semantic drift). - let old_span = item.source_span; - let old_hash = item.source_hash.as_ref().unwrap(); - let content_unchanged = hash_span(&source_content, new_span) - .map(|(h, _)| h == *old_hash) - .unwrap_or(false); - - if new_span != old_span && content_unchanged { - // Pure shift — content intact, only - // position changed. Safe to auto-fix. - let delta = new_span[0] as i64 - old_span[0] as i64; - if fix { - item.source_span = new_span; - if let Ok((h, a)) = hash_span(&source_content, new_span) { - item.source_hash = Some(h); - item.source_anchor = Some(a); - } - if let Some(l) = lang { - let canonical = - compute_tree_path(&source_content, new_span, l); - if !canonical.is_empty() { - item.tree_path = canonical; - } - } - modified = true; - } - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Shifted { - from: old_span, - to: new_span, - }, - severity: Severity::Warning, - message: format!( - "tree_path resolved, span shifted by {delta:+} → [{}, {}]", - new_span[0], new_span[1] - ), - fix_hint: Some("liyi check --fix".into()), - fixed: fix, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } else if let Some(l) = lang - && let Some(sibling) = resolve_tree_path_sibling_scan( - &source_content, - &item.tree_path, - l, - old_hash, - ) - { - // Hash-based sibling scan found the - // element at a different array index — - // treat as a pure shift. - let delta = sibling.span[0] as i64 - old_span[0] as i64; - if fix { - item.source_span = sibling.span; - item.tree_path = sibling.updated_tree_path; - if let Ok((h, a)) = hash_span(&source_content, sibling.span) - { - item.source_hash = Some(h); - item.source_anchor = Some(a); - } - modified = true; - } - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Shifted { - from: old_span, - to: sibling.span, - }, - severity: Severity::Warning, - message: format!( - "sibling scan matched, span shifted by {delta:+} → [{}, {}]", - sibling.span[0], sibling.span[1] - ), - fix_hint: Some("liyi check --fix".into()), - fixed: fix, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } else { - // For reviewed specs (sidecar or - // @liyi:intent in source): update span - // but do NOT rehash — the stale hash - // signals that intent review is needed. - // For unreviewed specs: rehash is safe - // since no human judgment is at stake. - let effectively_reviewed = item.reviewed - || source_markers.iter().any(|m| { - matches!(m, SourceMarker::Intent { line, .. } if *line >= new_span[0] && *line <= new_span[1]) - }); - if fix { - if new_span != old_span { - item.source_span = new_span; - if let Some(l) = lang { - let canonical = - compute_tree_path(&source_content, new_span, l); - if !canonical.is_empty() { - item.tree_path = canonical; - } - } - } - if !effectively_reviewed { - // Unreviewed: rehash at - // current location. - if let Ok((h, a)) = hash_span(&source_content, new_span) - { - item.source_hash = Some(h); - item.source_anchor = Some(a); - } - } - modified = true; - } - if effectively_reviewed { - let msg = if new_span != old_span { - format!( - "source changed and shifted → [{}, {}] (tree_path resolved, not auto-rehashed — reviewed)", - new_span[0], new_span[1] - ) - } else { - "source changed at tree_path location (not auto-rehashed — reviewed)".into() - }; - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Stale, - severity: Severity::Warning, - message: msg, - fix_hint: None, - fixed: false, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } else { - let msg = if new_span != old_span { - format!( - "source changed and shifted → [{}, {}] (tree_path resolved, auto-rehashed — unreviewed)", - new_span[0], new_span[1] - ) - } else { - "source changed at tree_path location (auto-rehashed — unreviewed)".into() - }; - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Stale, - severity: Severity::Warning, - message: msg, - fix_hint: Some("liyi check --fix".into()), - fixed: fix, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } - } - } else if let Some(l) = lang - && let Some(sibling) = resolve_tree_path_sibling_scan( - &source_content, - &item.tree_path, - l, - item.source_hash.as_ref().unwrap(), - ) - { - // tree_path resolution failed (e.g., index - // out of bounds after array shrank) but - // sibling scan found the element elsewhere. - let old_span = item.source_span; - let delta = sibling.span[0] as i64 - old_span[0] as i64; - if fix { - item.source_span = sibling.span; - item.tree_path = sibling.updated_tree_path; - if let Ok((h, a)) = hash_span(&source_content, sibling.span) { - item.source_hash = Some(h); - item.source_anchor = Some(a); - } - modified = true; - } - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Shifted { - from: old_span, - to: sibling.span, - }, - severity: Severity::Warning, - message: format!( - "sibling scan matched, span shifted by {delta:+} → [{}, {}]", - sibling.span[0], sibling.span[1] - ), - fix_hint: Some("liyi check --fix".into()), - fixed: fix, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } else { - // Fallback to shift heuristic - let expected = item.source_hash.as_ref().unwrap(); - match detect_shift(&source_content, item.source_span, expected) { - ShiftResult::Shifted { delta, new_span } => { - let old_span = item.source_span; - if fix { - item.source_span = new_span; - // Recompute hash/anchor at new span - if let Ok((h, a)) = hash_span(&source_content, new_span) - { - item.source_hash = Some(h); - item.source_anchor = Some(a); - } - let lang = detect_language(&entry.source_path); - if let Some(l) = lang { - let canonical = - compute_tree_path(&source_content, new_span, l); - if !canonical.is_empty() { - item.tree_path = canonical; - } - } - modified = true; - } - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Shifted { - from: old_span, - to: new_span, - }, - severity: Severity::Warning, - message: format!( - "span shifted by {delta:+} → [{}, {}]", - new_span[0], new_span[1] - ), - fix_hint: Some("liyi check --fix".into()), - fixed: fix, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } - ShiftResult::Stale => { - let detail = if tree_path_note.is_empty() { - "hash mismatch, could not relocate".to_string() - } else { - format!( - "hash mismatch, could not relocate ({tree_path_note})" - ) - }; - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Stale, - severity: Severity::Warning, - message: detail, - fix_hint: None, - fixed: false, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } - } - } - } - } - Err(SpanError::PastEof { end, total }) => { - // Try tree-path recovery before giving up - let lang = detect_language(&entry.source_path); - let (recovered, tp_note) = if item.tree_path.is_empty() { - (None, "no tree_path set") - } else if lang.is_none() { - (None, "no grammar for source language") - } else { - let r = - resolve_tree_path(&source_content, &item.tree_path, lang.unwrap()); - if r.is_some() { - (r, "") - } else { - (None, "tree_path resolution failed") - } - }; - - if let Some(new_span) = recovered { - // PastEof means old hash is unreliable (can't - // hash a span past the file end). Check - // whether the content at the resolved span - // matches the *recorded* hash to distinguish - // pure shift from semantic drift. - let old_span = item.source_span; - let content_unchanged = item - .source_hash - .as_ref() - .and_then(|old_h| { - hash_span(&source_content, new_span) - .ok() - .map(|(h, _)| h == *old_h) - }) - .unwrap_or(false); - - if content_unchanged { - let delta = new_span[0] as i64 - old_span[0] as i64; - if fix { - item.source_span = new_span; - if let Ok((h, a)) = hash_span(&source_content, new_span) { - item.source_hash = Some(h); - item.source_anchor = Some(a); - } - if let Some(l) = lang { - let canonical = - compute_tree_path(&source_content, new_span, l); - if !canonical.is_empty() { - item.tree_path = canonical; - } - } - modified = true; - } - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Shifted { - from: old_span, - to: new_span, - }, - severity: Severity::Warning, - message: format!( - "span past EOF (end {end} > {total}), tree_path resolved, shifted by {delta:+} → [{}, {}]", - new_span[0], new_span[1] - ), - fix_hint: Some("liyi check --fix".into()), - fixed: fix, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } else { - // Content also changed — relocate span. - // For reviewed specs: leave hash stale. - // For unreviewed: rehash at new location. - let effectively_reviewed = item.reviewed - || source_markers.iter().any(|m| { - matches!(m, SourceMarker::Intent { line, .. } if *line >= new_span[0] && *line <= new_span[1]) - }); - if fix { - item.source_span = new_span; - if let Some(l) = lang { - let canonical = - compute_tree_path(&source_content, new_span, l); - if !canonical.is_empty() { - item.tree_path = canonical; - } - } - if !effectively_reviewed - && let Ok((h, a)) = hash_span(&source_content, new_span) - { - item.source_hash = Some(h); - item.source_anchor = Some(a); - } - modified = true; - } - if effectively_reviewed { - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Stale, - severity: Severity::Warning, - message: format!( - "span past EOF (end {end} > {total}), tree_path resolved to [{}, {}] but content also changed (not auto-rehashed — reviewed)", - new_span[0], new_span[1] - ), - fix_hint: None, - fixed: false, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } else { - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Stale, - severity: Severity::Warning, - message: format!( - "span past EOF (end {end} > {total}), tree_path resolved to [{}, {}] (auto-rehashed — unreviewed)", - new_span[0], new_span[1] - ), - fix_hint: Some("liyi check --fix".into()), - fixed: fix, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } - } - } else { - let detail = if tp_note.is_empty() { - format!("span end {end} exceeds file length {total}") - } else { - format!("span end {end} exceeds file length {total} ({tp_note})") - }; - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::SpanPastEof { - span: item.source_span, - file_lines: total, - }, - severity: Severity::Error, - message: detail, - fix_hint: Some("liyi check --fix".into()), - fixed: false, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } - } - Err(SpanError::Inverted { .. } | SpanError::Empty) => { - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::InvalidSpan { - span: item.source_span, - }, - severity: Severity::Error, - message: format!( - "invalid span [{}, {}]", - item.source_span[0], item.source_span[1] - ), - fix_hint: None, - fixed: false, - span_start: None, - annotation_line: None, - requirement_text: None, - }); - } + // a. Hash the span (with tree_path recovery, shift detection) + if check_item_hash( + &entry.source_path, + &label, + item, + &source_content, + &source_markers, + fix, + diagnostics, + ) { + modified = true; } // b. Review status - let reviewed_in_sidecar = item.reviewed; - let has_intent_marker = source_markers.iter().any(|m| { - if let SourceMarker::Intent { line, .. } = m { - *line >= item.source_span[0] && *line <= item.source_span[1] - } else { - false - } - }); - if !reviewed_in_sidecar && !has_intent_marker { - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Unreviewed, - severity: Severity::Warning, - message: "not reviewed".into(), - fix_hint: None, - fixed: false, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } - - // c. Trivial / ignore markers within or immediately before span - let span_start = item.source_span[0]; - let span_end = item.source_span[1]; - for m in &source_markers { - match m { - SourceMarker::Trivial { line } - if *line >= span_start.saturating_sub(1) && *line <= span_end => - { - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Trivial, - severity: Severity::Info, - message: "marked \x40liyi:trivial".into(), - fix_hint: None, - fixed: false, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } - SourceMarker::Ignore { line, .. } - if *line >= span_start.saturating_sub(1) && *line <= span_end => - { - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Ignored, - severity: Severity::Info, - message: "marked \x40liyi:ignore".into(), - fix_hint: None, - fixed: false, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } - _ => {} - } - } - - // c2. Sidecar "=trivial" sentinel - if item.intent == "=trivial" { - // Conflict: @liyi:nontrivial in source vs =trivial in sidecar - let has_nontrivial = source_markers.iter().any(|m| { - matches!(m, SourceMarker::Nontrivial { line } - if *line >= span_start.saturating_sub(1) && *line <= span_end) - }); - if has_nontrivial { - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::ConflictingTriviality, - severity: Severity::Error, - message: "\x40liyi:nontrivial in source conflicts with \"=trivial\" in sidecar".into(), - fix_hint: None, - fixed: false, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } else { - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::Trivial, - severity: Severity::Info, - message: "intent \"=trivial\"".into(), - fix_hint: None, - fixed: false, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } - } - - // d. Related requirements - if let Some(ref related) = item.related { - for (req_name, stored_hash) in related { - match requirements.get(req_name) { - None => { - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::UnknownRequirement { - name: req_name.clone(), - }, - severity: Severity::Error, - message: format!( - "related requirement \"{req_name}\" not found" - ), - fix_hint: None, - fixed: false, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } - Some(rec) => { - // Compare the item's stored related hash - // against the requirement's *current* source - // hash (computed_hash). This surfaces - // cascading staleness in one pass — even - // before `--fix` updates the requirement's - // sidecar hash. Fall back to the sidecar - // hash when computed_hash is unavailable. - let current_req_hash = - rec.computed_hash.as_ref().or(rec.hash.as_ref()); - if let (Some(sh), Some(rh)) = - (stored_hash.as_ref(), current_req_hash) - && sh != rh - { - // @liyi:related reqchanged-orthogonal-to-reviewed - // @liyi:related reqchanged-demands-human-judgment - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::ReqChanged { - requirement: req_name.clone(), - }, - severity: Severity::Warning, - message: format!("requirement \"{req_name}\" has changed"), - fix_hint: None, - fixed: false, - span_start: Some(item.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } - } - } - } - } - - // e. Source related markers missing from sidecar - let span_start = item.source_span[0]; - let span_end = item.source_span[1]; - for m in &source_markers { - if let SourceMarker::Related { name, line } = m { - // Include doc-comment lines immediately before the span - if *line >= span_start.saturating_sub(5) && *line <= span_end { - let has_edge = - item.related.as_ref().is_some_and(|r| r.contains_key(name)); - if !has_edge { - if fix { - let related = item.related.get_or_insert_with(BTreeMap::new); - let hash_val = - requirements.get(name).and_then(|rec| rec.hash.clone()); - related.insert(name.clone(), hash_val); - modified = true; - } - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label.clone(), - kind: DiagnosticKind::MissingRelatedEdge { - name: name.clone(), - }, - severity: Severity::Error, - message: format!( - "source has \x40liyi:related \"{name}\" but sidecar is missing the related edge" - ), - fix_hint: None, - fixed: fix, - span_start: Some(item.source_span[0]), - annotation_line: Some(*line), - requirement_text: None, - }); - } - } - } - } - - // f. Fill in null hashes on existing related edges - if fix && let Some(ref mut related) = item.related { - for (req_name, hash_val) in related.iter_mut() { - if hash_val.is_none() - && let Some(rec) = requirements.get(req_name) - && let Some(ref h) = rec.hash - { - *hash_val = Some(h.clone()); - modified = true; - } - } + check_review_status( + &entry.source_path, + &label, + item, + &source_markers, + diagnostics, + ); + + // c. Trivial / ignore markers and sidecar =trivial sentinel + check_trivial_ignore( + &entry.source_path, + &label, + item, + &source_markers, + diagnostics, + ); + + // d. Related requirements + source related sync + null hash fill + let related_modified = check_related_edges( + &entry.source_path, + &label, + item, + &source_markers, + requirements, + fix, + diagnostics, + ); + if related_modified { + modified = true; } } Spec::Requirement(req) => { - let label = req.requirement.clone(); - // Try marker-based span recovery first: if the file has // @liyi:end-requirement markers, use those for span. if let Some(&marker_span) = marker_span_map.get(&req.requirement) @@ -1282,181 +666,18 @@ fn check_sidecar( } } - match hash_span(&source_content, req.source_span) { - Ok((computed_hash, computed_anchor)) => { - let is_current = req.source_hash.as_ref() == Some(&computed_hash); - - if is_current { - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label, - kind: DiagnosticKind::Current, - severity: Severity::Info, - message: "requirement hash matches".into(), - fix_hint: None, - fixed: false, - span_start: Some(req.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } else { - if fix { - req.source_hash = Some(computed_hash); - req.source_anchor = Some(computed_anchor); - modified = true; - } - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label, - kind: DiagnosticKind::Stale, - severity: Severity::Warning, - message: "requirement hash mismatch or missing".into(), - fix_hint: None, - fixed: fix, - span_start: Some(req.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } - } - Err(SpanError::PastEof { end, total }) => { - // Try tree-path recovery before giving up - let lang = detect_language(&entry.source_path); - let (recovered, tp_note) = if req.tree_path.is_empty() { - (None, "no tree_path set") - } else if lang.is_none() { - (None, "no grammar for source language") - } else { - let r = - resolve_tree_path(&source_content, &req.tree_path, lang.unwrap()); - if r.is_some() { - (r, "") - } else { - (None, "tree_path resolution failed") - } - }; - - if let Some(new_span) = recovered { - let old_span = req.source_span; - let content_unchanged = req - .source_hash - .as_ref() - .and_then(|old_h| { - hash_span(&source_content, new_span) - .ok() - .map(|(h, _)| h == *old_h) - }) - .unwrap_or(false); - - if content_unchanged { - let delta = new_span[0] as i64 - old_span[0] as i64; - if fix { - req.source_span = new_span; - if let Ok((h, a)) = hash_span(&source_content, new_span) { - req.source_hash = Some(h); - req.source_anchor = Some(a); - } - if let Some(l) = lang { - let canonical = - compute_tree_path(&source_content, new_span, l); - if !canonical.is_empty() { - req.tree_path = canonical; - } - } - modified = true; - } - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label, - kind: DiagnosticKind::Shifted { - from: old_span, - to: new_span, - }, - severity: Severity::Warning, - message: format!( - "span past EOF (end {end} > {total}), tree_path resolved, shifted by {delta:+} → [{}, {}]", - new_span[0], new_span[1] - ), - fix_hint: Some("liyi check --fix".into()), - fixed: fix, - span_start: Some(req.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } else { - if fix { - req.source_span = new_span; - if let Some(l) = lang { - let canonical = - compute_tree_path(&source_content, new_span, l); - if !canonical.is_empty() { - req.tree_path = canonical; - } - } - modified = true; - } - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label, - kind: DiagnosticKind::Stale, - severity: Severity::Warning, - message: format!( - "span past EOF (end {end} > {total}), tree_path resolved to [{}, {}] but content also changed (not auto-rehashed)", - new_span[0], new_span[1] - ), - fix_hint: None, - fixed: false, - span_start: Some(req.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } - } else { - let detail = if tp_note.is_empty() { - format!("span end {end} exceeds file length {total}") - } else { - format!("span end {end} exceeds file length {total} ({tp_note})") - }; - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label, - kind: DiagnosticKind::SpanPastEof { - span: req.source_span, - file_lines: total, - }, - severity: Severity::Error, - message: detail, - fix_hint: Some("liyi check --fix".into()), - fixed: false, - span_start: Some(req.source_span[0]), - annotation_line: None, - requirement_text: None, - }); - } - } - Err(SpanError::Inverted { .. } | SpanError::Empty) => { - diagnostics.push(Diagnostic { - file: entry.source_path.clone(), - item_or_req: label, - kind: DiagnosticKind::InvalidSpan { - span: req.source_span, - }, - severity: Severity::Error, - message: format!( - "invalid span [{}, {}]", - req.source_span[0], req.source_span[1] - ), - fix_hint: None, - fixed: false, - span_start: None, - annotation_line: None, - requirement_text: None, - }); - } - } - } - } - } + if check_requirement_hash( + &entry.source_path, + req, + &source_content, + fix, + diagnostics, + ) { + modified = true; + } + } + } + } // Strip _hints when --fix is active (hints are transient scaffold aids). // @liyi:related hints-are-ephemeral @@ -1478,6 +699,1057 @@ fn check_sidecar( } } +// --------------------------------------------------------------------------- +// check_sidecar helpers — requirement hash check +// --------------------------------------------------------------------------- + +/// Check a requirement spec's hash freshness with tree_path recovery on +/// PastEof. Returns true if `--fix` modified the spec. +fn check_requirement_hash( + file: &Path, + req: &mut RequirementSpec, + source_content: &str, + fix: bool, + diagnostics: &mut Vec, +) -> bool { + let mut modified = false; + let label = req.requirement.clone(); + + match hash_span(source_content, req.source_span) { + Ok((computed_hash, computed_anchor)) => { + let is_current = req.source_hash.as_ref() == Some(&computed_hash); + + if is_current { + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label, + kind: DiagnosticKind::Current, + severity: Severity::Info, + message: "requirement hash matches".into(), + fix_hint: None, + fixed: false, + span_start: Some(req.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: None, + }); + } else { + if fix { + req.source_hash = Some(computed_hash); + req.source_anchor = Some(computed_anchor); + modified = true; + } + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label, + kind: DiagnosticKind::Stale, + severity: Severity::Warning, + message: "requirement hash mismatch or missing".into(), + fix_hint: None, + fixed: fix, + span_start: Some(req.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: None, + }); + } + } + Err(SpanError::PastEof { end, total }) => { + let lang = detect_language(file); + let (recovered, tp_note) = if req.tree_path.is_empty() { + (None, "no tree_path set") + } else if lang.is_none() { + (None, "no grammar for source language") + } else { + let r = resolve_tree_path(source_content, &req.tree_path, lang.unwrap()); + if r.is_some() { + (r, "") + } else { + (None, "tree_path resolution failed") + } + }; + + if let Some(new_span) = recovered { + let old_span = req.source_span; + let content_unchanged = req + .source_hash + .as_ref() + .and_then(|old_h| { + hash_span(source_content, new_span) + .ok() + .map(|(h, _)| h == *old_h) + }) + .unwrap_or(false); + + if content_unchanged { + let delta = new_span[0] as i64 - old_span[0] as i64; + if fix { + req.source_span = new_span; + if let Ok((h, a)) = hash_span(source_content, new_span) { + req.source_hash = Some(h); + req.source_anchor = Some(a); + } + if let Some(l) = lang { + let canonical = compute_tree_path(source_content, new_span, l); + if !canonical.is_empty() { + req.tree_path = canonical; + } + } + modified = true; + } + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label, + kind: DiagnosticKind::Shifted { + from: old_span, + to: new_span, + }, + severity: Severity::Warning, + message: format!( + "span past EOF (end {end} > {total}), tree_path resolved, shifted by {delta:+} → [{}, {}]", + new_span[0], new_span[1] + ), + fix_hint: Some("liyi check --fix".into()), + fixed: fix, + span_start: Some(req.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: None, + }); + } else { + if fix { + req.source_span = new_span; + if let Some(l) = lang { + let canonical = compute_tree_path(source_content, new_span, l); + if !canonical.is_empty() { + req.tree_path = canonical; + } + } + modified = true; + } + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label, + kind: DiagnosticKind::Stale, + severity: Severity::Warning, + message: format!( + "span past EOF (end {end} > {total}), tree_path resolved to [{}, {}] but content also changed (not auto-rehashed)", + new_span[0], new_span[1] + ), + fix_hint: None, + fixed: false, + span_start: Some(req.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: None, + }); + } + } else { + let detail = if tp_note.is_empty() { + format!("span end {end} exceeds file length {total}") + } else { + format!("span end {end} exceeds file length {total} ({tp_note})") + }; + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label, + kind: DiagnosticKind::SpanPastEof { + span: req.source_span, + file_lines: total, + }, + severity: Severity::Error, + message: detail, + fix_hint: Some("liyi check --fix".into()), + fixed: false, + span_start: Some(req.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: None, + }); + } + } + Err(SpanError::Inverted { .. } | SpanError::Empty) => { + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label, + kind: DiagnosticKind::InvalidSpan { + span: req.source_span, + }, + severity: Severity::Error, + message: format!( + "invalid span [{}, {}]", + req.source_span[0], req.source_span[1] + ), + fix_hint: None, + fixed: false, + span_start: None, + annotation_line: None, + requirement_text: None, + intent: None, + }); + } + } + + modified +} + +// --------------------------------------------------------------------------- +// check_sidecar helpers — item hash checking +// --------------------------------------------------------------------------- + +/// Hash-check an item spec: verify `source_hash`, attempt tree_path recovery, +/// sibling scan, and shift heuristic when stale. Returns `true` when the spec +/// was modified (and the sidecar must be rewritten). +#[allow(clippy::too_many_arguments)] +fn check_item_hash( + file: &Path, + label: &str, + item: &mut ItemSpec, + source_content: &str, + source_markers: &[SourceMarker], + fix: bool, + diagnostics: &mut Vec, +) -> bool { + let mut modified = false; + + match hash_span(source_content, item.source_span) { + Ok((computed_hash, _computed_anchor)) => { + let is_current = item.source_hash.as_ref() == Some(&computed_hash); + + if is_current { + // CURRENT + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Current, + severity: Severity::Info, + message: "hash matches".into(), + fix_hint: None, + fixed: false, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } else if item.source_hash.is_none() { + modified |= + handle_missing_hash(file, label, item, source_content, fix, diagnostics); + } else { + modified |= handle_hash_mismatch( + file, + label, + item, + source_content, + source_markers, + fix, + diagnostics, + ); + } + } + Err(SpanError::PastEof { end, total }) => { + modified |= handle_past_eof( + file, + label, + item, + source_content, + source_markers, + end, + total, + fix, + diagnostics, + ); + } + Err(SpanError::Inverted { .. } | SpanError::Empty) => { + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::InvalidSpan { + span: item.source_span, + }, + severity: Severity::Error, + message: format!( + "invalid span [{}, {}]", + item.source_span[0], item.source_span[1] + ), + fix_hint: None, + fixed: false, + span_start: None, + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } + } + + modified +} + +/// Handle an item with no `source_hash` — try tree_path recovery, then hash. +fn handle_missing_hash( + file: &Path, + label: &str, + item: &mut ItemSpec, + source_content: &str, + fix: bool, + diagnostics: &mut Vec, +) -> bool { + let mut modified = false; + let lang = detect_language(file); + let recovered_span = if !item.tree_path.is_empty() + && let Some(l) = lang + && let Some(tp_span) = resolve_tree_path(source_content, &item.tree_path, l) + { + Some(tp_span) + } else { + None + }; + + if fix { + if let Some(tp_span) = recovered_span { + item.source_span = tp_span; + } + if let Ok((fix_hash, fix_anchor)) = hash_span(source_content, item.source_span) { + item.source_hash = Some(fix_hash); + item.source_anchor = Some(fix_anchor); + } + if let Some(l) = lang { + let canonical = compute_tree_path(source_content, item.source_span, l); + if !canonical.is_empty() { + item.tree_path = canonical; + } + } + // Source changed since last review — clear reviewed so a human re-confirms intent. + item.reviewed = false; + modified = true; + } + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Stale, + severity: Severity::Warning, + message: "missing source_hash".into(), + fix_hint: Some("liyi check --fix".into()), + fixed: fix, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + modified +} + +/// Handle a hash mismatch: try tree_path → sibling scan → shift heuristic. +#[allow(clippy::too_many_arguments)] +fn handle_hash_mismatch( + file: &Path, + label: &str, + item: &mut ItemSpec, + source_content: &str, + source_markers: &[SourceMarker], + fix: bool, + diagnostics: &mut Vec, +) -> bool { + let mut modified = false; + let lang = detect_language(file); + + // Try tree_path-based recovery, tracking why it may not be available. + let (tree_path_recovered, tree_path_note) = if item.tree_path.is_empty() { + (None, "no tree_path set") + } else if lang.is_none() { + (None, "no grammar for source language") + } else { + let resolved = resolve_tree_path(source_content, &item.tree_path, lang.unwrap()); + if resolved.is_some() { + (resolved, "") + } else { + (None, "tree_path resolution failed") + } + }; + + if let Some(new_span) = tree_path_recovered { + modified |= handle_tree_path_resolved( + file, + label, + item, + source_content, + source_markers, + new_span, + lang, + fix, + diagnostics, + ); + } else if let Some(l) = lang + && let Some(sibling) = resolve_tree_path_sibling_scan( + source_content, + &item.tree_path, + l, + item.source_hash.as_ref().unwrap(), + ) + { + // tree_path resolution failed but sibling scan found the element. + let old_span = item.source_span; + let delta = sibling.span[0] as i64 - old_span[0] as i64; + if fix { + item.source_span = sibling.span; + item.tree_path = sibling.updated_tree_path; + if let Ok((h, a)) = hash_span(source_content, sibling.span) { + item.source_hash = Some(h); + item.source_anchor = Some(a); + } + modified = true; + } + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Shifted { + from: old_span, + to: sibling.span, + }, + severity: Severity::Warning, + message: format!( + "sibling scan matched, span shifted by {delta:+} → [{}, {}]", + sibling.span[0], sibling.span[1] + ), + fix_hint: Some("liyi check --fix".into()), + fixed: fix, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } else { + // Fallback to shift heuristic + let expected = item.source_hash.as_ref().unwrap(); + match detect_shift(source_content, item.source_span, expected) { + ShiftResult::Shifted { delta, new_span } => { + let old_span = item.source_span; + if fix { + item.source_span = new_span; + if let Ok((h, a)) = hash_span(source_content, new_span) { + item.source_hash = Some(h); + item.source_anchor = Some(a); + } + if let Some(l) = lang { + let canonical = compute_tree_path(source_content, new_span, l); + if !canonical.is_empty() { + item.tree_path = canonical; + } + } + modified = true; + } + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Shifted { + from: old_span, + to: new_span, + }, + severity: Severity::Warning, + message: format!( + "span shifted by {delta:+} → [{}, {}]", + new_span[0], new_span[1] + ), + fix_hint: Some("liyi check --fix".into()), + fixed: fix, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } + ShiftResult::Stale => { + let detail = if tree_path_note.is_empty() { + "hash mismatch, could not relocate".to_string() + } else { + format!("hash mismatch, could not relocate ({tree_path_note})") + }; + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Stale, + severity: Severity::Warning, + message: detail, + fix_hint: None, + fixed: false, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } + } + } + modified +} + +/// Handle tree_path-resolved span: pure shift vs content drift vs sibling. +#[allow(clippy::too_many_arguments)] +fn handle_tree_path_resolved( + file: &Path, + label: &str, + item: &mut ItemSpec, + source_content: &str, + source_markers: &[SourceMarker], + new_span: [usize; 2], + lang: Option, + fix: bool, + diagnostics: &mut Vec, +) -> bool { + let mut modified = false; + let old_span = item.source_span; + let old_hash = item.source_hash.as_ref().unwrap(); + let content_unchanged = hash_span(source_content, new_span) + .map(|(h, _)| h == *old_hash) + .unwrap_or(false); + + if new_span != old_span && content_unchanged { + // Pure shift — content intact, only position changed. + let delta = new_span[0] as i64 - old_span[0] as i64; + if fix { + item.source_span = new_span; + if let Ok((h, a)) = hash_span(source_content, new_span) { + item.source_hash = Some(h); + item.source_anchor = Some(a); + } + if let Some(l) = lang { + let canonical = compute_tree_path(source_content, new_span, l); + if !canonical.is_empty() { + item.tree_path = canonical; + } + } + modified = true; + } + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Shifted { + from: old_span, + to: new_span, + }, + severity: Severity::Warning, + message: format!( + "tree_path resolved, span shifted by {delta:+} → [{}, {}]", + new_span[0], new_span[1] + ), + fix_hint: Some("liyi check --fix".into()), + fixed: fix, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } else if let Some(l) = lang + && let Some(sibling) = + resolve_tree_path_sibling_scan(source_content, &item.tree_path, l, old_hash) + { + // Hash-based sibling scan found the element at a different array index. + let delta = sibling.span[0] as i64 - old_span[0] as i64; + if fix { + item.source_span = sibling.span; + item.tree_path = sibling.updated_tree_path; + if let Ok((h, a)) = hash_span(source_content, sibling.span) { + item.source_hash = Some(h); + item.source_anchor = Some(a); + } + modified = true; + } + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Shifted { + from: old_span, + to: sibling.span, + }, + severity: Severity::Warning, + message: format!( + "sibling scan matched, span shifted by {delta:+} → [{}, {}]", + sibling.span[0], sibling.span[1] + ), + fix_hint: Some("liyi check --fix".into()), + fixed: fix, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } else { + // Content at tree_path location has changed. + // For reviewed specs (`@liyi:intent` in source): update span but do NOT + // rehash — the stale hash signals that intent review is needed. + // For unreviewed specs: rehash is safe. + let effectively_reviewed = item.reviewed + || source_markers.iter().any(|m| { + matches!(m, SourceMarker::Intent { line, .. } if *line >= new_span[0] && *line <= new_span[1]) + }); + if fix { + if new_span != old_span { + item.source_span = new_span; + if let Some(l) = lang { + let canonical = compute_tree_path(source_content, new_span, l); + if !canonical.is_empty() { + item.tree_path = canonical; + } + } + } + if !effectively_reviewed && let Ok((h, a)) = hash_span(source_content, new_span) { + item.source_hash = Some(h); + item.source_anchor = Some(a); + } + modified = true; + } + if effectively_reviewed { + let msg = if new_span != old_span { + format!( + "source changed and shifted → [{}, {}] (tree_path resolved, not auto-rehashed — reviewed)", + new_span[0], new_span[1] + ) + } else { + "source changed at tree_path location (not auto-rehashed — reviewed)".into() + }; + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Stale, + severity: Severity::Warning, + message: msg, + fix_hint: None, + fixed: false, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } else { + let msg = if new_span != old_span { + format!( + "source changed and shifted → [{}, {}] (tree_path resolved, auto-rehashed — unreviewed)", + new_span[0], new_span[1] + ) + } else { + "source changed at tree_path location (auto-rehashed — unreviewed)".into() + }; + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Stale, + severity: Severity::Warning, + message: msg, + fix_hint: Some("liyi check --fix".into()), + fixed: fix, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } + } + modified +} + +/// Handle `SpanError::PastEof`: try tree_path recovery, distinguish pure shift +/// from content drift, emit appropriate diagnostics. +#[allow(clippy::too_many_arguments)] +fn handle_past_eof( + file: &Path, + label: &str, + item: &mut ItemSpec, + source_content: &str, + source_markers: &[SourceMarker], + end: usize, + total: usize, + fix: bool, + diagnostics: &mut Vec, +) -> bool { + let mut modified = false; + let lang = detect_language(file); + let (recovered, tp_note) = if item.tree_path.is_empty() { + (None, "no tree_path set") + } else if lang.is_none() { + (None, "no grammar for source language") + } else { + let r = resolve_tree_path(source_content, &item.tree_path, lang.unwrap()); + if r.is_some() { + (r, "") + } else { + (None, "tree_path resolution failed") + } + }; + + if let Some(new_span) = recovered { + let old_span = item.source_span; + let content_unchanged = item + .source_hash + .as_ref() + .and_then(|old_h| { + hash_span(source_content, new_span) + .ok() + .map(|(h, _)| h == *old_h) + }) + .unwrap_or(false); + + if content_unchanged { + let delta = new_span[0] as i64 - old_span[0] as i64; + if fix { + item.source_span = new_span; + if let Ok((h, a)) = hash_span(source_content, new_span) { + item.source_hash = Some(h); + item.source_anchor = Some(a); + } + if let Some(l) = lang { + let canonical = compute_tree_path(source_content, new_span, l); + if !canonical.is_empty() { + item.tree_path = canonical; + } + } + modified = true; + } + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Shifted { + from: old_span, + to: new_span, + }, + severity: Severity::Warning, + message: format!( + "span past EOF (end {end} > {total}), tree_path resolved, shifted by {delta:+} → [{}, {}]", + new_span[0], new_span[1] + ), + fix_hint: Some("liyi check --fix".into()), + fixed: fix, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } else { + // Content also changed — relocate span. + let effectively_reviewed = item.reviewed + || source_markers.iter().any(|m| { + matches!(m, SourceMarker::Intent { line, .. } if *line >= new_span[0] && *line <= new_span[1]) + }); + if fix { + item.source_span = new_span; + if let Some(l) = lang { + let canonical = compute_tree_path(source_content, new_span, l); + if !canonical.is_empty() { + item.tree_path = canonical; + } + } + if !effectively_reviewed && let Ok((h, a)) = hash_span(source_content, new_span) { + item.source_hash = Some(h); + item.source_anchor = Some(a); + } + modified = true; + } + if effectively_reviewed { + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Stale, + severity: Severity::Warning, + message: format!( + "span past EOF (end {end} > {total}), tree_path resolved to [{}, {}] but content also changed (not auto-rehashed — reviewed)", + new_span[0], new_span[1] + ), + fix_hint: None, + fixed: false, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } else { + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Stale, + severity: Severity::Warning, + message: format!( + "span past EOF (end {end} > {total}), tree_path resolved to [{}, {}] (auto-rehashed — unreviewed)", + new_span[0], new_span[1] + ), + fix_hint: Some("liyi check --fix".into()), + fixed: fix, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } + } + } else { + let detail = if tp_note.is_empty() { + format!("span end {end} exceeds file length {total}") + } else { + format!("span end {end} exceeds file length {total} ({tp_note})") + }; + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::SpanPastEof { + span: item.source_span, + file_lines: total, + }, + severity: Severity::Error, + message: detail, + fix_hint: Some("liyi check --fix".into()), + fixed: false, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } + modified +} + +// --------------------------------------------------------------------------- +// check_sidecar helpers — item sub-checks +// --------------------------------------------------------------------------- + +/// Check review status: emit `Unreviewed` if neither `reviewed: true` in the +/// sidecar nor `@liyi:intent` in source within the item's span. +fn check_review_status( + file: &Path, + label: &str, + item: &ItemSpec, + source_markers: &[SourceMarker], + diagnostics: &mut Vec, +) { + let reviewed_in_sidecar = item.reviewed; + let has_intent_marker = source_markers.iter().any(|m| { + if let SourceMarker::Intent { line, .. } = m { + *line >= item.source_span[0] && *line <= item.source_span[1] + } else { + false + } + }); + if !reviewed_in_sidecar && !has_intent_marker { + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Unreviewed, + severity: Severity::Warning, + message: "not reviewed".into(), + fix_hint: None, + fixed: false, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } +} + +/// Check trivial/ignore source markers and the sidecar `=trivial` sentinel. +fn check_trivial_ignore( + file: &Path, + label: &str, + item: &ItemSpec, + source_markers: &[SourceMarker], + diagnostics: &mut Vec, +) { + let span_start = item.source_span[0]; + let span_end = item.source_span[1]; + + for m in source_markers { + match m { + SourceMarker::Trivial { line } + if *line >= span_start.saturating_sub(1) && *line <= span_end => + { + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Trivial, + severity: Severity::Info, + message: "marked \x40liyi:trivial".into(), + fix_hint: None, + fixed: false, + span_start: Some(span_start), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } + SourceMarker::Ignore { line, .. } + if *line >= span_start.saturating_sub(1) && *line <= span_end => + { + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Ignored, + severity: Severity::Info, + message: "marked \x40liyi:ignore".into(), + fix_hint: None, + fixed: false, + span_start: Some(span_start), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } + _ => {} + } + } + + // Sidecar "=trivial" sentinel + if item.intent == "=trivial" { + let has_nontrivial = source_markers.iter().any(|m| { + matches!(m, SourceMarker::Nontrivial { line } + if *line >= span_start.saturating_sub(1) && *line <= span_end) + }); + if has_nontrivial { + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::ConflictingTriviality, + severity: Severity::Error, + message: "\x40liyi:nontrivial in source conflicts with \"=trivial\" in sidecar" + .into(), + fix_hint: None, + fixed: false, + span_start: Some(span_start), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } else { + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::Trivial, + severity: Severity::Info, + message: "intent \"=trivial\"".into(), + fix_hint: None, + fixed: false, + span_start: Some(span_start), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } + } +} + +/// Check related-requirement edges, sync source `@liyi:related` markers into +/// sidecar, and fill null hashes on existing edges. Returns true if fixups +/// modified the item. +fn check_related_edges( + file: &Path, + label: &str, + item: &mut ItemSpec, + source_markers: &[SourceMarker], + requirements: &HashMap, + fix: bool, + diagnostics: &mut Vec, +) -> bool { + let mut modified = false; + + // Validate existing related edges + if let Some(ref related) = item.related { + for (req_name, stored_hash) in related { + match requirements.get(req_name) { + None => { + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::UnknownRequirement { + name: req_name.clone(), + }, + severity: Severity::Error, + message: format!("related requirement \"{req_name}\" not found"), + fix_hint: None, + fixed: false, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } + Some(rec) => { + // Compare against computed_hash (fresh from source) for + // cascading staleness detection; fall back to sidecar hash. + let current_req_hash = rec.computed_hash.as_ref().or(rec.hash.as_ref()); + if let (Some(sh), Some(rh)) = (stored_hash.as_ref(), current_req_hash) + && sh != rh + { + // @liyi:related reqchanged-orthogonal-to-reviewed + // @liyi:related reqchanged-demands-human-judgment + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::ReqChanged { + requirement: req_name.clone(), + }, + severity: Severity::Warning, + message: format!("requirement \"{req_name}\" has changed"), + fix_hint: None, + fixed: false, + span_start: Some(item.source_span[0]), + annotation_line: None, + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } + } + } + } + } + + // Source `@liyi:related` markers missing from sidecar + let span_start = item.source_span[0]; + let span_end = item.source_span[1]; + for m in source_markers { + if let SourceMarker::Related { name, line } = m { + // Include doc-comment lines immediately before the span + if *line >= span_start.saturating_sub(5) && *line <= span_end { + let has_edge = item.related.as_ref().is_some_and(|r| r.contains_key(name)); + if !has_edge { + if fix { + let related = item.related.get_or_insert_with(BTreeMap::new); + let hash_val = requirements.get(name).and_then(|rec| rec.hash.clone()); + related.insert(name.clone(), hash_val); + modified = true; + } + diagnostics.push(Diagnostic { + file: file.to_path_buf(), + item_or_req: label.to_string(), + kind: DiagnosticKind::MissingRelatedEdge { + name: name.clone(), + }, + severity: Severity::Error, + message: format!( + "source has \x40liyi:related \"{name}\" but sidecar is missing the related edge" + ), + fix_hint: None, + fixed: fix, + span_start: Some(span_start), + annotation_line: Some(*line), + requirement_text: None, + intent: Some(item.intent.clone()), + }); + } + } + } + } + + // Fill null hashes on existing related edges + if fix && let Some(ref mut related) = item.related { + for (req_name, hash_val) in related.iter_mut() { + if hash_val.is_none() + && let Some(rec) = requirements.get(req_name) + && let Some(ref h) = rec.hash + { + *hash_val = Some(h.clone()); + modified = true; + } + } + } + + modified +} + // --------------------------------------------------------------------------- // Helpers // --------------------------------------------------------------------------- diff --git a/crates/liyi/src/check.rs.liyi.jsonc b/crates/liyi/src/check.rs.liyi.jsonc index d236b49..d08e58b 100644 --- a/crates/liyi/src/check.rs.liyi.jsonc +++ b/crates/liyi/src/check.rs.liyi.jsonc @@ -18,13 +18,13 @@ { "item": "run_check", "reviewed": true, - "intent": "Execute the two-pass check: pass 1 discovers all @liyi:requirement and @liyi:related markers project-wide, enriches them with hashes from existing sidecars, and computes fresh hashes from current source spans so that downstream related-edge staleness is detected in a single run without requiring --fix first; pass 2 validates each sidecar's items and requirements (hash freshness, review status, related-requirement integrity using computed_hash, trivial/ignore markers, MissingRelatedEdge coverage). When --fix is set, shifted spans, missing hashes, and missing related edges are corrected in place and their diagnostics are marked fixed. Return sorted diagnostics and the appropriate exit code based on CheckFlags.", + "intent": "Orchestrate the two-pass check by delegating to helpers: pass 1 calls discover_requirements, compute_requirement_hashes, collect_source_related_refs, enrich_requirements_from_sidecars to build the global requirement registry; pass 2 calls check_sidecar for each entry. Post-pass calls emit_untracked_requirements, emit_unreferenced_requirements, detect_requirement_cycles, and emit_cycle_diagnostics. Return sorted diagnostics and the appropriate exit code based on CheckFlags.", "source_span": [ 42, - 342 + 113 ], "tree_path": "fn.run_check", - "source_hash": "sha256:0e7bb66ac62c7f2104955a2706d12bb4bb724376a4e65d8d772486fd725f73f9", + "source_hash": "sha256:eb0b33db4b23fd3aff3d72bd2074f199a2851c9a70e9f785118cd1554055d7f2", "source_anchor": "pub fn run_check(", "related": { "cycle-detection": "sha256:e8af30a5974fcccf038c0d07eb98c279b57c9671dad032123424aa041d1262ec", @@ -32,16 +32,112 @@ "requirement-name-uniqueness": "sha256:fc329b7eb4016b20e9baa89b14470f2953aa6bad5311aa5d4867702994d94f32" } }, + { + "item": "emit_ambiguous_sidecar_warnings", + "reviewed": true, + "intent": "Convert discovery warning strings into AmbiguousSidecar diagnostics.", + "source_span": [ + 119, + 142 + ], + "tree_path": "fn.emit_ambiguous_sidecar_warnings", + "source_hash": "sha256:455a2dcba2cc18e683f10bfe99645ba444e883bef6815a392fb7ab87348de072", + "source_anchor": "fn emit_ambiguous_sidecar_warnings(" + }, + { + "item": "discover_requirements", + "reviewed": true, + "intent": "=doc", + "source_span": [ + 146, + 196 + ], + "tree_path": "fn.discover_requirements", + "source_hash": "sha256:3bd591e5b058e23ff286ce8a0cc6fe6e1d54f103210a8819d056040c67c57a40", + "source_anchor": "fn discover_requirements(" + }, + { + "item": "compute_requirement_hashes", + "reviewed": true, + "intent": "=doc", + "source_span": [ + 200, + 220 + ], + "tree_path": "fn.compute_requirement_hashes", + "source_hash": "sha256:a6776f9aa98c59c5f85f1a63f896c49e6a9db52c0c851b3701b8bbdecbb94734", + "source_anchor": "fn compute_requirement_hashes(" + }, + { + "item": "collect_source_related_refs", + "reviewed": true, + "intent": "=doc", + "source_span": [ + 224, + 241 + ], + "tree_path": "fn.collect_source_related_refs", + "source_hash": "sha256:f664ab1dd3f4d52d90bed14d9d670e13a0b9a50396e6162e5b82f76cf3cf2636", + "source_anchor": "fn collect_source_related_refs(" + }, + { + "item": "enrich_requirements_from_sidecars", + "reviewed": true, + "intent": "=doc", + "source_span": [ + 246, + 310 + ], + "tree_path": "fn.enrich_requirements_from_sidecars", + "source_hash": "sha256:81fbdbe3165852809ddce49dfd7bb304944eb5b1a901d548745deca7b63d6d03", + "source_anchor": "fn enrich_requirements_from_sidecars(" + }, + { + "item": "emit_untracked_requirements", + "reviewed": true, + "intent": "=doc", + "source_span": [ + 314, + 360 + ], + "tree_path": "fn.emit_untracked_requirements", + "source_hash": "sha256:4530fe9e2fa03f0adfbef50a87b1dee61b3c822d3de0c74aa50b6102b46dbe2d", + "source_anchor": "fn emit_untracked_requirements(" + }, + { + "item": "emit_unreferenced_requirements", + "reviewed": true, + "intent": "=doc", + "source_span": [ + 364, + 391 + ], + "tree_path": "fn.emit_unreferenced_requirements", + "source_hash": "sha256:d0891b49f5cc3e0081d87578cc71d2f82c6f3ac7ef6310b119eb1d7f44310d44", + "source_anchor": "fn emit_unreferenced_requirements(" + }, + { + "item": "emit_cycle_diagnostics", + "reviewed": true, + "intent": "=doc", + "source_span": [ + 394, + 422 + ], + "tree_path": "fn.emit_cycle_diagnostics", + "source_hash": "sha256:7d16344ccd0a1863a20f37ea1c2dff370c697110d78f31b59e9f589a289e48aa", + "source_anchor": "fn emit_cycle_diagnostics(" + }, { "item": "check_sidecar", "reviewed": true, - "intent": "For a single sidecar entry: parse the sidecar, validate its version, validate source_hash format (sha256: regex) on all item and requirement hashes including related edges, verify the source file exists, then for each spec check hash freshness (with shift detection, tree_path computation, hash-based sibling scan fallback, PastEof tree_path recovery, and --fix support), review status (sidecar reviewed flag or @liyi:intent marker), trivial/ignore source markers and sidecar =trivial sentinel (emitting Trivial info or ConflictingTriviality error when @liyi:nontrivial conflicts), and related-requirement edges (comparing against computed_hash to detect cascading staleness in one pass; including hash backfill: when --fix is active, fill null hash values in related edges from the requirements map, and insert new edges with the requirement's current hash). For requirements: marker-based span recovery from @liyi:requirement/@liyi:end-requirement blocks before hashing; PastEof tree_path recovery with pure-shift vs content-changed distinction. Each diagnostic carries a fixed flag set to true when --fix resolves it (pure shifts, missing hashes, missing related edges) or false for unfixable issues (structural errors). On semantic drift (tree_path resolves but content changed): for reviewed specs, update span but leave hash stale so the spec is flagged for intent review; for unreviewed specs, also rehash at the new location since no human judgment is at stake. When hash mismatches after tree_path resolution, try hash-based sibling scan before treating as drift or falling back to shift heuristic. When --fix fills a missing source_hash, reviewed is reset to false so a human must re-confirm intent. When --fix is active, strip _hints from all item specs (hints are transient scaffold aids). Write the sidecar back if --fix produced modifications.", + "intent": "For a single sidecar entry: parse the sidecar, validate its version and source_hash format (sha256: regex), verify the source file exists, then for each spec delegate to check_item_hash, check_review_status, check_trivial_ignore, check_related_edges (for items) or check_requirement_hash (for requirements, with marker-based span recovery first). When --fix is active, strip _hints from all item specs. Write the sidecar back if --fix produced modifications.", "source_span": [ - 351, - 1479 + 431, + 700 ], "tree_path": "fn.check_sidecar", - "source_hash": "sha256:dd5826e2c97fff3e280ffd3247a6c4aeed96ce61e8a0c37c67df610a945b201c", + "source_hash": "sha256:7073aadcbb2212d01fa60615ecada03f86acc6382aaafc6fa4b462b3cc9471fb", "source_anchor": "fn check_sidecar(", "related": { "fix-never-modifies-human-fields": "sha256:2101e1597493adb25b7743b3b2f983f672f967e2e24ae18ef07ece70ccfb76bb", @@ -52,13 +148,151 @@ "reviewed-semantics": "sha256:eabc16cfe83d7ed5b52779d4771a04f9ba6fee37c24e4493f2577d907afbd9e0" } }, + { + "item": "check_requirement_hash", + "reviewed": true, + "intent": "=doc", + "source_span": [ + 708, + 894 + ], + "tree_path": "fn.check_requirement_hash", + "source_hash": "sha256:33b049164f163dcc79e1de3f27f216fffd3c93b7fbb1773914bef667584d44c8", + "source_anchor": "fn check_requirement_hash(" + }, + { + "item": "check_item_hash", + "reviewed": true, + "intent": "=doc", + "source_span": [ + 904, + 985 + ], + "tree_path": "fn.check_item_hash", + "source_hash": "sha256:d292e9a449db185ae7a0b9aa6b1e541a6ab02001b553a1f947eefc846e5c4859", + "source_anchor": "fn check_item_hash(" + }, + { + "item": "handle_missing_hash", + "reviewed": true, + "intent": "=doc", + "source_span": [ + 988, + 1039 + ], + "tree_path": "fn.handle_missing_hash", + "source_hash": "sha256:0b84f4da4912fb4daf1d45a6a0f093eff81cbdce22bc38247f74c6f1c0eeb2a5", + "source_anchor": "fn handle_missing_hash(" + }, + { + "item": "handle_hash_mismatch", + "reviewed": true, + "intent": "=doc", + "source_span": [ + 1043, + 1183 + ], + "tree_path": "fn.handle_hash_mismatch", + "source_hash": "sha256:322f1c68b92739e5e546672ddda40c7a886327fb4dac984b4f2ccd0ae751da5c", + "source_anchor": "fn handle_hash_mismatch(", + "related": { + "fix-semantic-drift-protection": "sha256:7c7bcb74ddf40462f9162ada24feeffa60e080adae3f4a69d37dde5fb23ac81b" + } + }, + { + "item": "handle_tree_path_resolved", + "reviewed": true, + "intent": "=doc", + "source_span": [ + 1187, + 1347 + ], + "tree_path": "fn.handle_tree_path_resolved", + "source_hash": "sha256:be18c43e0b7b16a0fe7481556498c2455c6cd3a7430139d40c106a65cf47a9d6", + "source_anchor": "fn handle_tree_path_resolved(", + "related": { + "fix-semantic-drift-protection": "sha256:7c7bcb74ddf40462f9162ada24feeffa60e080adae3f4a69d37dde5fb23ac81b", + "reviewed-semantics": "sha256:eabc16cfe83d7ed5b52779d4771a04f9ba6fee37c24e4493f2577d907afbd9e0" + } + }, + { + "item": "handle_past_eof", + "reviewed": true, + "intent": "=doc", + "source_span": [ + 1352, + 1505 + ], + "tree_path": "fn.handle_past_eof", + "source_hash": "sha256:545b3d996cf4c3623067dc4269e984235c8b1d235376fdeb023ef985b597de64", + "source_anchor": "fn handle_past_eof(", + "related": { + "fix-semantic-drift-protection": "sha256:7c7bcb74ddf40462f9162ada24feeffa60e080adae3f4a69d37dde5fb23ac81b", + "reviewed-semantics": "sha256:eabc16cfe83d7ed5b52779d4771a04f9ba6fee37c24e4493f2577d907afbd9e0" + } + }, + { + "item": "check_review_status", + "reviewed": true, + "intent": "=doc", + "source_span": [ + 1513, + 1543 + ], + "tree_path": "fn.check_review_status", + "source_hash": "sha256:62cd6e7a08df4bb831f0f4c860cb6cbaf41ec1483e53ed0bbb36894e93bf1eb0", + "source_anchor": "fn check_review_status(", + "related": { + "reviewed-semantics": "sha256:eabc16cfe83d7ed5b52779d4771a04f9ba6fee37c24e4493f2577d907afbd9e0" + } + }, + { + "item": "check_trivial_ignore", + "reviewed": true, + "intent": "Emit Trivial or Ignored info diagnostics for @liyi:trivial and @liyi:ignore markers within the item's span. For the sidecar =trivial sentinel, emit Trivial info unless @liyi:nontrivial is present in the span, in which case emit ConflictingTriviality error.", + "source_span": [ + 1546, + 1633 + ], + "tree_path": "fn.check_trivial_ignore", + "source_hash": "sha256:434e82e6c548233c1113313ec013030b349da1f922c4f5e8b9a5c5f2a6f4feb5", + "source_anchor": "fn check_trivial_ignore(" + }, + { + "item": "check_related_edges", + "reviewed": true, + "intent": "Validate existing related edges (emit UnknownRequirement or ReqChanged using computed_hash for cascading staleness), sync source @liyi:related markers into sidecar as MissingRelatedEdge (auto-fix inserts edge with current hash), and fill null hashes on existing edges when --fix is active. Return true if the item was modified.", + "source_span": [ + 1638, + 1751 + ], + "tree_path": "fn.check_related_edges", + "source_hash": "sha256:66f6a7ee8df06a18e9a4e29e19851b775ee70717bc5b01a179afc8738a00aa12", + "source_anchor": "fn check_related_edges(", + "related": { + "reqchanged-demands-human-judgment": "sha256:30ba3f1eea4b3e68d6c5b3926c643cd41d753636d9420329934d9dd43ae14de5", + "reqchanged-orthogonal-to-reviewed": "sha256:5c85639dd24c2cecdeb7c466e414319fdd40451d0abb896e495fcd13d63a78d8" + } + }, + { + "item": "detect_requirement_cycles", + "reviewed": true, + "intent": "=doc", + "source_span": [ + 1761, + 1813 + ], + "tree_path": "fn.detect_requirement_cycles", + "source_hash": "sha256:c7124f27dcaade014657e1b571274470d11511922c508ca2a3836d7df20e808d", + "source_anchor": "fn detect_requirement_cycles(graph: &HashMap>) -> Vec> {" + }, { "item": "read_cached", "reviewed": true, "intent": "Read a file's contents with caching: return the cached string if already loaded, otherwise read from disk, store in the cache, and return. Return None on I/O failure.", "source_span": [ - 1544, - 1555 + 1816, + 1827 ], "tree_path": "fn.read_cached", "source_hash": "sha256:c5e98a29f80d683b44e3c883feff6cc1eee817296cfa669df07d5c7175deb94f", diff --git a/crates/liyi/src/diagnostics.rs b/crates/liyi/src/diagnostics.rs index 8682b80..825565b 100644 --- a/crates/liyi/src/diagnostics.rs +++ b/crates/liyi/src/diagnostics.rs @@ -50,6 +50,8 @@ pub struct Diagnostic { pub annotation_line: Option, /// Full text of a requirement block, for `--prompt` output. pub requirement_text: Option, + /// Intent text from the sidecar spec, for CI annotation output. + pub intent: Option, } impl Diagnostic { @@ -249,3 +251,55 @@ pub fn format_summary(diagnostics: &[Diagnostic]) -> String { parts.join(", ") } } + +/// Format a diagnostic as a GitHub Actions workflow command. +/// +/// Emits `::notice`, `::warning`, or `::error` with `file`, `line`, and +/// `title` parameters so that annotations appear inline in PR diffs. +pub fn format_github_actions(d: &Diagnostic, root: &std::path::Path) -> String { + let level = match d.severity { + Severity::Info => "notice", + Severity::Warning => "warning", + Severity::Error => "error", + }; + let rel = d.file.strip_prefix(root).unwrap_or(&d.file); + let file = rel.display(); + + // Escape special characters per GitHub Actions workflow command spec: + // https://github.com/actions/toolkit/blob/main/packages/core/src/command.ts + let escape = |s: &str| { + s.replace('%', "%25") + .replace('\r', "%0D") + .replace('\n', "%0A") + }; + let message = escape(&d.message); + + // Append intent text to the message when available and non-sentinel. + let message = match &d.intent { + Some(intent) if intent != "=doc" && intent != "=trivial" => { + format!("{message}%0AIntent: {}", escape(intent)) + } + _ => message, + }; + + let title = if d.item_or_req.is_empty() { + "立意".to_string() + } else { + let icon = if d.fixed { + "✓" + } else { + Diagnostic::icon(&d.kind, d.severity) + }; + format!("立意 {} {}", icon, d.item_or_req) + }; + + let display_line = d.span_start.or(d.annotation_line); + match display_line { + Some(line) => { + format!("::{level} file={file},line={line},title={title}::{message}") + } + None => { + format!("::{level} file={file},title={title}::{message}") + } + } +} diff --git a/crates/liyi/src/diagnostics.rs.liyi.jsonc b/crates/liyi/src/diagnostics.rs.liyi.jsonc index 0028764..f06ae2c 100644 --- a/crates/liyi/src/diagnostics.rs.liyi.jsonc +++ b/crates/liyi/src/diagnostics.rs.liyi.jsonc @@ -30,13 +30,13 @@ { "item": "Diagnostic", "reviewed": true, - "intent": "Bundle a single diagnostic report with the file path, item or requirement name, diagnostic kind, severity, a human-readable message, an optional fix hint, a fixed flag indicating whether --fix resolved this diagnostic, an optional span_start for file:line display, an optional annotation_line for --prompt output, and an optional requirement_text carrying the full requirement block text.", + "intent": "Bundle a single diagnostic report with the file path, item or requirement name, diagnostic kind, severity, a human-readable message, an optional fix hint, a fixed flag indicating whether --fix resolved this diagnostic, an optional span_start for file:line display, an optional annotation_line for --prompt output, an optional requirement_text carrying the full requirement block text, and an optional intent carrying the sidecar intent text for CI annotation output.", "source_span": [ 37, - 53 + 55 ], "tree_path": "struct.Diagnostic", - "source_hash": "sha256:e322921d0a64ea81ce561fda31905e4fa31926f770d921448274aef5835d970c", + "source_hash": "sha256:f729285bd25967eae5b2e8e38683beda296a493847af86c45aff9d69673b85fb", "source_anchor": "pub struct Diagnostic {" }, { @@ -44,8 +44,8 @@ "reviewed": true, "intent": "Format a diagnostic for terminal output. display_with_root shows a repo-relative file path with optional :line suffix (from span_start or annotation_line); for fixed diagnostics uses '✓ fixed' icon and suppresses fix hints, otherwise selects an icon via the icon helper (✓/·/↕/⚠/✗ by severity×kind), formats as 'file:line: item: icon message', and appends a 'fix: …' line when a fix_hint is present.", "source_span": [ - 55, - 97 + 57, + 99 ], "tree_path": "impl.Diagnostic", "source_hash": "sha256:77b03739b7ba64ee8b743a75eadbd3edc66aac342a86c0a1818453d16a600b36", @@ -56,8 +56,8 @@ "reviewed": true, "intent": "Hold the four boolean policy flags (fail_on_stale, fail_on_unreviewed, fail_on_req_changed, fail_on_untracked) that control which diagnostic kinds escalate the exit code to CheckFailure.", "source_span": [ - 122, - 127 + 124, + 129 ], "tree_path": "struct.CheckFlags", "source_hash": "sha256:7db1a400401611d2d83f2c98a9258920cd81a040e2237804b1a46e5530411fa9", @@ -68,8 +68,8 @@ "reviewed": true, "intent": "Define the three possible exit codes: Clean (0) for no actionable findings, CheckFailure (1) for policy violations, InternalError (2) for parse or version errors.", "source_span": [ - 133, - 137 + 135, + 139 ], "tree_path": "enum.LiyiExitCode", "source_hash": "sha256:065e327cb7a385b508af79995f55c2ab18bb0fcb3aeda9c5cf9d2cd0368401c5", @@ -83,8 +83,8 @@ "reviewed": true, "intent": "Scan diagnostics and return InternalError immediately if any ParseError or UnknownVersion is found. Skip diagnostics with fixed=true. Return CheckFailure if any flagged policy violation (stale, unreviewed, req-changed, untracked) or error-severity diagnostic (MalformedHash, UnknownRequirement, RequirementCycle, SpanPastEof, InvalidSpan, OrphanedSource, DuplicateEntry, MissingRelatedEdge, ConflictingTriviality) is present per the CheckFlags, otherwise Clean.", "source_span": [ - 140, - 178 + 142, + 180 ], "tree_path": "fn.compute_exit_code", "source_hash": "sha256:4a2e15f9b73a85306d2b0b8e1e0f565bb1a114e24d21a2b02e84012aac384d0b", @@ -92,6 +92,18 @@ "related": { "liyi-check-exit-code": "sha256:2cb6ab36efd4724e3f7e7021bf36233633357e9fddbd1a231bc7b7ed07137d65" } + }, + { + "item": "format_github_actions", + "reviewed": true, + "intent": "Format a single diagnostic as a GitHub Actions workflow command (::notice, ::warning, or ::error) with file, line, and title parameters. Maps severity to annotation level, strips the repo root from file paths, escapes %, \\r, \\n per the Actions toolkit spec, uses the item name with a severity icon as the annotation title, and appends the intent text to the message body when present and non-sentinel (not =doc or =trivial).", + "source_span": [ + 259, + 305 + ], + "tree_path": "fn.format_github_actions", + "source_hash": "sha256:4b40f8d57fb98a81e056599bccfca5433e9329071c6f3b39d9b521075a191533", + "source_anchor": "pub fn format_github_actions(d: &Diagnostic, root: &std::path::Path) -> String {" } ] } diff --git a/docs/approve-impl.md b/docs/approve-impl.md index 0a994cd..3b97d29 100644 --- a/docs/approve-impl.md +++ b/docs/approve-impl.md @@ -2,8 +2,8 @@ # `liyi approve`: Implementation Plan -**Status:** Partially implemented (unreviewed-item approval is shipped; requirement-change review is planned) -**Design authority:** `docs/liyi-design.md` — *reviewed-semantics*, *fix-never-modifies-human-fields* +**Status:** Partially implemented (unreviewed-item approval is shipped; requirement-change and stale-reviewed approval are planned) +**Design authority:** `docs/liyi-design.md` — *reviewed-semantics*, *fix-never-modifies-human-fields*, *fix-semantic-drift-protection* --- @@ -11,7 +11,7 @@ -`liyi approve` is the human review gate. Agents infer intent, but only a human can confirm that an intent description is correct or that a code item still satisfies a changed requirement. Without this gate, agent-inferred specs accumulate unchecked, and requirement changes propagate silently. +`liyi approve` is the human review gate. Agents infer intent, but only a human can confirm that an intent description is correct, that a code item still satisfies a changed requirement, or that a previously reviewed item's intent still holds after the source code changed. Without this gate, agent-inferred specs accumulate unchecked, requirement changes propagate silently, and stale reviewed items linger with no structured path back to a clean state. --- @@ -31,6 +31,10 @@ The following constraints are normative for the implementation. **ReqChanged always demands human judgment.** Even if a requirement change appears cosmetic (rewording with no semantic impact), the human must confirm. The point of liyi is that humans don't trust their own recall — "I'm sure this is fine" is exactly the failure mode liyi prevents. No auto-fix path exists for stale related edges; only `liyi approve` (or manual sidecar editing) can refresh them. + +**Stale reviewed specs always demand human judgment.** When a reviewed item's source code changes, `liyi check --fix` refuses to auto-rehash — the spec remains stale until a human confirms via `liyi approve` (or manual sidecar editing) that the declared intent still describes the changed code. This is the complement of the ReqChanged gate: ReqChanged guards against requirement drift; StaleReviewed guards against implementation drift. No auto-fix path exists for stale reviewed specs. + + --- ## Status quo (implemented) @@ -103,10 +107,11 @@ Extend `liyi approve` to surface **requirement-changed items** alongside unrevie #### Candidate types -The `ApprovalCandidate` struct gains a discriminant (or a new sibling type) to distinguish two review modes: +The `ApprovalCandidate` struct gains a discriminant (or a new sibling type) to distinguish three review modes: 1. **Unreviewed** (existing): agent-inferred intent, no human confirmation yet 2. **ReqChanged** (new): reviewed item whose related requirement text changed +3. **StaleReviewed** (new): reviewed item whose source code changed (`source_hash` mismatch, flagged by `liyi check` as "not auto-rehashed — reviewed") For ReqChanged candidates, the candidate struct carries additional context: @@ -153,13 +158,14 @@ liyi approve [paths...] --item NAME Filter to specific item --req-only Only show requirement-changed items (skip unreviewed) --unreviewed-only Only show unreviewed items (current behavior) + --stale-only Only show stale-reviewed items ``` -Without `--req-only` or `--unreviewed-only`, both kinds are interleaved (unreviewed first, then req-changed, or sorted by file). The progress bar shows the combined count. +Without `--req-only`, `--unreviewed-only`, or `--stale-only`, all three kinds are interleaved (unreviewed first, then stale-reviewed, then req-changed). The progress bar shows the combined count. #### Ordering -Present unreviewed items first (these are typically fewer and higher-priority), then ReqChanged items grouped by requirement (so all items affected by the same requirement change appear together, with the requirement diff shown once as a group header). +Present unreviewed items first (these are typically fewer and higher-priority), then StaleReviewed items sorted by file (so the human walks through changed code in file order), then ReqChanged items grouped by requirement (so all items affected by the same requirement change appear together, with the requirement diff shown once as a group header). ### Implementation notes @@ -176,6 +182,78 @@ Present unreviewed items first (these are typically fewer and higher-priority), --- +## Planned: stale-reviewed approval + +### Problem + +When a reviewed item's source code changes, `liyi check --fix` updates the `source_span` (if the item shifted) but refuses to rehash `source_hash` — the "not auto-rehashed — reviewed" diagnostic. The spec remains stale until a human confirms the intent still holds. Currently the only resolution paths are: + +1. Manual sidecar editing (set `source_hash` to `null`, run `--fix`) +2. Agent triage (write a triage report, run `liyi triage --apply` for cosmetic changes) + +Neither path provides the ergonomic, interactive review that `liyi approve` offers for unreviewed items. A human looking at `liyi check` output with several "not auto-rehashed — reviewed" warnings has no structured way to walk through each one, compare old code vs new code, and confirm or update the intent. + +### Design + +Extend `liyi approve` to surface **stale reviewed items** alongside unreviewed and ReqChanged items. The same TUI, same decision loop, broader scope. + + + + +#### Candidate collection + +`collect_approval_candidates` gains a pass over `liyi check` diagnostics (or re-runs the check logic internally) to find `DiagnosticKind::Stale` items where `reviewed == true` (or `@liyi:intent` in source). For each, it collects: + +- **Item metadata:** name, intent text, source span, spec index +- **Current source:** all lines, with the item's span highlighted +- **Old source from Git:** the source lines at the span recorded in the stale `source_hash`, recovered via `git show :` or by walking git history to find the commit whose content matches the recorded hash. This enables a source diff +- **Current intent:** the human-reviewed intent that may or may not still apply + +#### TUI presentation for StaleReviewed + +The intent pane changes layout for StaleReviewed candidates: + +1. **Header:** item name, source file, line range, label "STALE — source changed since last review" +2. **Source diff:** old source (at time of last hash) → new source (current span), word-level or line-level diff with red/green highlighting. This is the key information — the human needs to see *what changed* in the code +3. **Intent pane:** the item's current intent text, displayed in full. The question is: "does this intent still describe the changed code?" +4. **Source pane:** full syntax-highlighted source code of the item's current span, scrollable + +#### Decisions for StaleReviewed + +| Key | Meaning for StaleReviewed | +|-----|---------------------------| +| `y` | Confirm — the intent still describes the changed code. Rehash `source_hash` and `source_anchor` to current, keep `reviewed: true` | +| `n` | Reject — the intent no longer holds. Leave stale as a todo marker | +| `s` | Skip — defer | +| `e` | Edit — update the intent to match the new code, then rehash. Set `reviewed: true` (the human authored the new intent directly) | + +#### Application logic + + + + +For StaleReviewed approvals: + +- **Yes:** recompute `source_hash` and `source_anchor` from the current source span. Keep `reviewed = true` and `intent` unchanged — the human confirmed the existing intent still holds +- **Edit(text):** update `intent` to the new text, recompute `source_hash` and `source_anchor`, set `reviewed = true` (the human directly authored the replacement intent) +- **No:** leave `source_hash` unchanged so `liyi check` continues to flag it as stale +- **Skip:** no mutation + +### Implementation notes + +- Source diff recovery: `lookup_prev_source` (new function, parallel to `lookup_prev_intent`) walks git history to find the source file content at the commit where the item's `source_hash` last matched. Falls back to showing just the current source if git history is unavailable +- The `similar` crate's line-level diff (already used for intent diffs) works for source diffs. Word-level diff may be too noisy for code; line-level with inline change highlighting is likely more readable +- `edit_intent_in_editor` reuses the existing flow but includes the source diff and current intent in the comment block + +### Edge cases + +- **Item both stale and ReqChanged:** the item appears in both categories. Present it as StaleReviewed first (the source change is the more fundamental concern); if the human approves the intent, the ReqChanged review follows. Alternatively, combine into a single review showing both the source diff and the requirement diff — but this risks information overload. Presenting separately is safer +- **`@liyi:intent` in source:** if the item has `@liyi:intent` in source (making it effectively reviewed), and the source changed, it should still surface as StaleReviewed. The source-level intent assertion may no longer match the code. `liyi check` already flags these as stale +- **Stale but cosmetically:** the source hash changed but the behavioral semantics didn't (e.g., variable rename, comment edit). The human sees the diff, confirms it's cosmetic, and approves. No shortcut — the whole point is that the human judges, not the tool +- **`--yes` batch mode:** allowed for consistency, but carries the same caution as ReqChanged batch approval — the human claims to have verified all stale intents still hold, which is a strong assertion + +--- + ## Non-goals - **Approving requirement text itself:** requirements are authored by humans in design docs. They don't go through `approve` — they're the *input* to the review process, not its subject diff --git a/docs/approve-impl.md.liyi.jsonc b/docs/approve-impl.md.liyi.jsonc index 0acd41f..7ab1772 100644 --- a/docs/approve-impl.md.liyi.jsonc +++ b/docs/approve-impl.md.liyi.jsonc @@ -29,6 +29,15 @@ ], "source_hash": "sha256:30ba3f1eea4b3e68d6c5b3926c643cd41d753636d9420329934d9dd43ae14de5", "source_anchor": "" + }, + { + "requirement": "stale-reviewed-demands-human-judgment", + "source_span": [ + 34, + 36 + ], + "source_hash": "sha256:0158af62360b393b12a0a34ed912c5fa7f75d313e1994536345e70910887d97a", + "source_anchor": "" } ] }